mirror of
https://github.com/python/cpython.git
synced 2025-10-25 18:54:53 +00:00
Merged revisions 76063,76068 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/branches/py3k
................
r76063 | benjamin.peterson | 2009-11-02 12:16:28 -0600 (Mon, 02 Nov 2009) | 77 lines
Merged revisions 76062 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
................
r76062 | benjamin.peterson | 2009-11-02 12:12:12 -0600 (Mon, 02 Nov 2009) | 70 lines
Merged revisions 74359,75081,75088,75213,75278,75303,75427-75428,75734-75736,75865,76059-76061 via svnmerge from
svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3
........
r74359 | benjamin.peterson | 2009-08-12 17:23:13 -0500 (Wed, 12 Aug 2009) | 1 line
don't pass the deprecated print_function option
........
r75081 | benjamin.peterson | 2009-09-26 22:02:57 -0500 (Sat, 26 Sep 2009) | 1 line
let 2to3 work with extended iterable unpacking
........
r75088 | benjamin.peterson | 2009-09-27 11:25:21 -0500 (Sun, 27 Sep 2009) | 1 line
look on the type only for __call__
........
r75213 | benjamin.peterson | 2009-10-03 10:09:46 -0500 (Sat, 03 Oct 2009) | 5 lines
revert 75212; it's not correct
People can use isinstance(x, collections.Callable) if they expect objects with
__call__ in their instance dictionaries.
........
r75278 | benjamin.peterson | 2009-10-07 16:25:56 -0500 (Wed, 07 Oct 2009) | 4 lines
fix whitespace problems with fix_idioms #3563
Patch by Joe Amenta.
........
r75303 | benjamin.peterson | 2009-10-09 16:59:11 -0500 (Fri, 09 Oct 2009) | 1 line
port latin-1 and utf-8 cookie improvements
........
r75427 | benjamin.peterson | 2009-10-14 20:35:57 -0500 (Wed, 14 Oct 2009) | 1 line
force floor division
........
r75428 | benjamin.peterson | 2009-10-14 20:39:21 -0500 (Wed, 14 Oct 2009) | 1 line
silence -3 warnings about __hash__
........
r75734 | benjamin.peterson | 2009-10-26 16:25:53 -0500 (Mon, 26 Oct 2009) | 2 lines
warn on map(None, ...) with more than 2 arguments #7203
........
r75735 | benjamin.peterson | 2009-10-26 16:28:25 -0500 (Mon, 26 Oct 2009) | 1 line
remove unused result
........
r75736 | benjamin.peterson | 2009-10-26 16:29:02 -0500 (Mon, 26 Oct 2009) | 1 line
using get() here is a bit pointless
........
r75865 | benjamin.peterson | 2009-10-27 15:49:00 -0500 (Tue, 27 Oct 2009) | 1 line
explain reason for warning
........
r76059 | benjamin.peterson | 2009-11-02 11:43:47 -0600 (Mon, 02 Nov 2009) | 1 line
tuples are no longer used for children
........
r76060 | benjamin.peterson | 2009-11-02 11:55:40 -0600 (Mon, 02 Nov 2009) | 1 line
revert r76059; apparently some fixers rely on Leaf no () for children
........
r76061 | benjamin.peterson | 2009-11-02 12:06:17 -0600 (Mon, 02 Nov 2009) | 1 line
make fix_tuple_params keep the tree valid #7253
........
................
................
r76068 | benjamin.peterson | 2009-11-02 12:30:48 -0600 (Mon, 02 Nov 2009) | 24 lines
Merged revisions 76064,76066-76067 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
................
r76064 | benjamin.peterson | 2009-11-02 12:16:36 -0600 (Mon, 02 Nov 2009) | 1 line
add space
................
r76066 | benjamin.peterson | 2009-11-02 12:22:53 -0600 (Mon, 02 Nov 2009) | 9 lines
Merged revisions 76065 via svnmerge from
svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3
........
r76065 | benjamin.peterson | 2009-11-02 12:21:25 -0600 (Mon, 02 Nov 2009) | 1 line
don't print stuff in tests
........
................
r76067 | benjamin.peterson | 2009-11-02 12:24:57 -0600 (Mon, 02 Nov 2009) | 1 line
enable test_parser in lib2to3
................
................
This commit is contained in:
parent
868b578929
commit
e80b51fab7
10 changed files with 118 additions and 23 deletions
|
|
@ -53,8 +53,9 @@ stmt: simple_stmt | compound_stmt
|
||||||
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
|
||||||
small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
|
small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
|
||||||
import_stmt | global_stmt | exec_stmt | assert_stmt)
|
import_stmt | global_stmt | exec_stmt | assert_stmt)
|
||||||
expr_stmt: testlist (augassign (yield_expr|testlist) |
|
expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
|
||||||
('=' (yield_expr|testlist))*)
|
('=' (yield_expr|testlist_star_expr))*)
|
||||||
|
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
|
||||||
augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
|
||||||
'<<=' | '>>=' | '**=' | '//=')
|
'<<=' | '>>=' | '**=' | '//=')
|
||||||
# For normal assignments, additional restrictions enforced by the interpreter
|
# For normal assignments, additional restrictions enforced by the interpreter
|
||||||
|
|
@ -112,6 +113,7 @@ and_test: not_test ('and' not_test)*
|
||||||
not_test: 'not' not_test | comparison
|
not_test: 'not' not_test | comparison
|
||||||
comparison: expr (comp_op expr)*
|
comparison: expr (comp_op expr)*
|
||||||
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
|
||||||
|
star_expr: '*' expr
|
||||||
expr: xor_expr ('|' xor_expr)*
|
expr: xor_expr ('|' xor_expr)*
|
||||||
xor_expr: and_expr ('^' and_expr)*
|
xor_expr: and_expr ('^' and_expr)*
|
||||||
and_expr: shift_expr ('&' shift_expr)*
|
and_expr: shift_expr ('&' shift_expr)*
|
||||||
|
|
@ -125,14 +127,14 @@ atom: ('(' [yield_expr|testlist_gexp] ')' |
|
||||||
'{' [dictsetmaker] '}' |
|
'{' [dictsetmaker] '}' |
|
||||||
'`' testlist1 '`' |
|
'`' testlist1 '`' |
|
||||||
NAME | NUMBER | STRING+ | '.' '.' '.')
|
NAME | NUMBER | STRING+ | '.' '.' '.')
|
||||||
listmaker: test ( comp_for | (',' test)* [','] )
|
listmaker: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
|
||||||
testlist_gexp: test ( comp_for | (',' test)* [','] )
|
testlist_gexp: test ( comp_for | (',' (test|star_expr))* [','] )
|
||||||
lambdef: 'lambda' [varargslist] ':' test
|
lambdef: 'lambda' [varargslist] ':' test
|
||||||
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
|
||||||
subscriptlist: subscript (',' subscript)* [',']
|
subscriptlist: subscript (',' subscript)* [',']
|
||||||
subscript: test | [test] ':' [test] [sliceop]
|
subscript: test | [test] ':' [test] [sliceop]
|
||||||
sliceop: ':' [test]
|
sliceop: ':' [test]
|
||||||
exprlist: expr (',' expr)* [',']
|
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
|
||||||
testlist: test (',' test)* [',']
|
testlist: test (',' test)* [',']
|
||||||
dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
|
dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
|
||||||
(test (comp_for | (',' test)* [','])) )
|
(test (comp_for | (',' test)* [','])) )
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@
|
||||||
|
|
||||||
# Local imports
|
# Local imports
|
||||||
from .. import fixer_base
|
from .. import fixer_base
|
||||||
from ..fixer_util import Call, Comma, Name, Node, syms
|
from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms
|
||||||
|
|
||||||
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
|
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
|
||||||
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
|
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
|
||||||
|
|
@ -130,5 +130,24 @@ def transform_sort(self, node, results):
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("should not have reached here")
|
raise RuntimeError("should not have reached here")
|
||||||
sort_stmt.remove()
|
sort_stmt.remove()
|
||||||
if next_stmt:
|
|
||||||
next_stmt[0].prefix = sort_stmt.prefix
|
btwn = sort_stmt.prefix
|
||||||
|
# Keep any prefix lines between the sort_stmt and the list_call and
|
||||||
|
# shove them right after the sorted() call.
|
||||||
|
if "\n" in btwn:
|
||||||
|
if next_stmt:
|
||||||
|
# The new prefix should be everything from the sort_stmt's
|
||||||
|
# prefix up to the last newline, then the old prefix after a new
|
||||||
|
# line.
|
||||||
|
prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix)
|
||||||
|
next_stmt[0].prefix = "\n".join(prefix_lines)
|
||||||
|
else:
|
||||||
|
assert list_call.parent
|
||||||
|
assert list_call.next_sibling is None
|
||||||
|
# Put a blank line after list_call and set its prefix.
|
||||||
|
end_line = BlankLine()
|
||||||
|
list_call.parent.append_child(end_line)
|
||||||
|
assert list_call.next_sibling is end_line
|
||||||
|
# The new prefix should be everything up to the first new line
|
||||||
|
# of sort_stmt's prefix.
|
||||||
|
end_line.prefix = btwn.rpartition("\n")[0]
|
||||||
|
|
|
||||||
|
|
@ -49,8 +49,7 @@ class FixMap(fixer_base.ConditionalFix):
|
||||||
>
|
>
|
||||||
|
|
|
|
||||||
power<
|
power<
|
||||||
'map'
|
'map' trailer< '(' [arglist=any] ')' >
|
||||||
args=trailer< '(' [any] ')' >
|
|
||||||
>
|
>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
@ -66,13 +65,22 @@ def transform(self, node, results):
|
||||||
new.prefix = ""
|
new.prefix = ""
|
||||||
new = Call(Name("list"), [new])
|
new = Call(Name("list"), [new])
|
||||||
elif "map_lambda" in results:
|
elif "map_lambda" in results:
|
||||||
new = ListComp(results.get("xp").clone(),
|
new = ListComp(results["xp"].clone(),
|
||||||
results.get("fp").clone(),
|
results["fp"].clone(),
|
||||||
results.get("it").clone())
|
results["it"].clone())
|
||||||
else:
|
else:
|
||||||
if "map_none" in results:
|
if "map_none" in results:
|
||||||
new = results["arg"].clone()
|
new = results["arg"].clone()
|
||||||
else:
|
else:
|
||||||
|
if "arglist" in results:
|
||||||
|
args = results["arglist"]
|
||||||
|
if args.type == syms.arglist and \
|
||||||
|
args.children[0].type == token.NAME and \
|
||||||
|
args.children[0].value == "None":
|
||||||
|
self.warning(node, "cannot convert map(None, ...) "
|
||||||
|
"with multiple arguments because map() "
|
||||||
|
"now truncates to the shortest sequence")
|
||||||
|
return
|
||||||
if in_special_context(node):
|
if in_special_context(node):
|
||||||
return None
|
return None
|
||||||
new = node.clone()
|
new = node.clone()
|
||||||
|
|
|
||||||
|
|
@ -96,6 +96,8 @@ def handle_tuple(tuple_arg, add_prefix=False):
|
||||||
new_lines[0].prefix = indent
|
new_lines[0].prefix = indent
|
||||||
after = start + 1
|
after = start + 1
|
||||||
|
|
||||||
|
for line in new_lines:
|
||||||
|
line.parent = suite[0]
|
||||||
suite[0].children[after:after] = new_lines
|
suite[0].children[after:after] = new_lines
|
||||||
for i in range(after+1, after+len(new_lines)+1):
|
for i in range(after+1, after+len(new_lines)+1):
|
||||||
suite[0].children[i].prefix = indent
|
suite[0].children[i].prefix = indent
|
||||||
|
|
|
||||||
|
|
@ -379,6 +379,8 @@ def __eq__(self, other):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
__hash__ = None # For Py3 compatibility.
|
||||||
|
|
||||||
def generate_grammar(filename="Grammar.txt"):
|
def generate_grammar(filename="Grammar.txt"):
|
||||||
p = ParserGenerator(filename)
|
p = ParserGenerator(filename)
|
||||||
return p.make_grammar()
|
return p.make_grammar()
|
||||||
|
|
|
||||||
|
|
@ -231,6 +231,17 @@ def compat(self, token, iterable):
|
||||||
|
|
||||||
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
|
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
|
||||||
|
|
||||||
|
def _get_normal_name(orig_enc):
|
||||||
|
"""Imitates get_normal_name in tokenizer.c."""
|
||||||
|
# Only care about the first 12 characters.
|
||||||
|
enc = orig_enc[:12].lower().replace("_", "-")
|
||||||
|
if enc == "utf-8" or enc.startswith("utf-8-"):
|
||||||
|
return "utf-8"
|
||||||
|
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
|
||||||
|
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
|
||||||
|
return "iso-8859-1"
|
||||||
|
return orig_enc
|
||||||
|
|
||||||
def detect_encoding(readline):
|
def detect_encoding(readline):
|
||||||
"""
|
"""
|
||||||
The detect_encoding() function is used to detect the encoding that should
|
The detect_encoding() function is used to detect the encoding that should
|
||||||
|
|
@ -265,7 +276,7 @@ def find_cookie(line):
|
||||||
matches = cookie_re.findall(line_string)
|
matches = cookie_re.findall(line_string)
|
||||||
if not matches:
|
if not matches:
|
||||||
return None
|
return None
|
||||||
encoding = matches[0]
|
encoding = _get_normal_name(matches[0])
|
||||||
try:
|
try:
|
||||||
codec = lookup(encoding)
|
codec = lookup(encoding)
|
||||||
except LookupError:
|
except LookupError:
|
||||||
|
|
@ -375,7 +386,7 @@ def generate_tokens(readline):
|
||||||
column = 0
|
column = 0
|
||||||
while pos < max: # measure leading whitespace
|
while pos < max: # measure leading whitespace
|
||||||
if line[pos] == ' ': column = column + 1
|
if line[pos] == ' ': column = column + 1
|
||||||
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
|
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
|
||||||
elif line[pos] == '\f': column = 0
|
elif line[pos] == '\f': column = 0
|
||||||
else: break
|
else: break
|
||||||
pos = pos + 1
|
pos = pos + 1
|
||||||
|
|
|
||||||
|
|
@ -63,6 +63,8 @@ def __eq__(self, other):
|
||||||
return NotImplemented
|
return NotImplemented
|
||||||
return self._eq(other)
|
return self._eq(other)
|
||||||
|
|
||||||
|
__hash__ = None # For Py3 compatibility.
|
||||||
|
|
||||||
def __ne__(self, other):
|
def __ne__(self, other):
|
||||||
"""
|
"""
|
||||||
Compare two nodes for inequality.
|
Compare two nodes for inequality.
|
||||||
|
|
|
||||||
|
|
@ -16,10 +16,8 @@
|
||||||
|
|
||||||
class Test_all(support.TestCase):
|
class Test_all(support.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
options = {"print_function" : False}
|
self.refactor = support.get_refactorer()
|
||||||
self.refactor = support.get_refactorer(options=options)
|
|
||||||
|
|
||||||
def test_all_project_files(self):
|
def test_all_project_files(self):
|
||||||
for filepath in support.all_project_files():
|
for filepath in support.all_project_files():
|
||||||
print("Fixing %s..." % filepath)
|
|
||||||
self.refactor.refactor_file(filepath)
|
self.refactor.refactor_file(filepath)
|
||||||
|
|
|
||||||
|
|
@ -339,6 +339,12 @@ def test_simple_call(self):
|
||||||
a = "from functools import reduce\nreduce(a, b, c)"
|
a = "from functools import reduce\nreduce(a, b, c)"
|
||||||
self.check(b, a)
|
self.check(b, a)
|
||||||
|
|
||||||
|
def test_bug_7253(self):
|
||||||
|
# fix_tuple_params was being bad and orphaning nodes in the tree.
|
||||||
|
b = "def x(arg): reduce(sum, [])"
|
||||||
|
a = "from functools import reduce\ndef x(arg): reduce(sum, [])"
|
||||||
|
self.check(b, a)
|
||||||
|
|
||||||
def test_call_with_lambda(self):
|
def test_call_with_lambda(self):
|
||||||
b = "reduce(lambda x, y: x + y, seq)"
|
b = "reduce(lambda x, y: x + y, seq)"
|
||||||
a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
|
a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
|
||||||
|
|
@ -2834,6 +2840,11 @@ def test_trailing_comment(self):
|
||||||
a = """x = list(map(f, 'abc')) # foo"""
|
a = """x = list(map(f, 'abc')) # foo"""
|
||||||
self.check(b, a)
|
self.check(b, a)
|
||||||
|
|
||||||
|
def test_None_with_multiple_arguments(self):
|
||||||
|
s = """x = map(None, a, b, c)"""
|
||||||
|
self.warns_unchanged(s, "cannot convert map(None, ...) with "
|
||||||
|
"multiple arguments")
|
||||||
|
|
||||||
def test_map_basic(self):
|
def test_map_basic(self):
|
||||||
b = """x = map(f, 'abc')"""
|
b = """x = map(f, 'abc')"""
|
||||||
a = """x = list(map(f, 'abc'))"""
|
a = """x = list(map(f, 'abc'))"""
|
||||||
|
|
@ -2847,10 +2858,6 @@ def test_map_basic(self):
|
||||||
a = """x = list('abc')"""
|
a = """x = list('abc')"""
|
||||||
self.check(b, a)
|
self.check(b, a)
|
||||||
|
|
||||||
b = """x = map(None, 'abc', 'def')"""
|
|
||||||
a = """x = list(map(None, 'abc', 'def'))"""
|
|
||||||
self.check(b, a)
|
|
||||||
|
|
||||||
b = """x = map(lambda x: x+1, range(4))"""
|
b = """x = map(lambda x: x+1, range(4))"""
|
||||||
a = """x = [x+1 for x in range(4)]"""
|
a = """x = [x+1 for x in range(4)]"""
|
||||||
self.check(b, a)
|
self.check(b, a)
|
||||||
|
|
@ -3238,6 +3245,46 @@ def test_sort_list_call(self):
|
||||||
"""
|
"""
|
||||||
self.check(b, a)
|
self.check(b, a)
|
||||||
|
|
||||||
|
b = r"""
|
||||||
|
try:
|
||||||
|
m = list(s)
|
||||||
|
m.sort()
|
||||||
|
except: pass
|
||||||
|
"""
|
||||||
|
|
||||||
|
a = r"""
|
||||||
|
try:
|
||||||
|
m = sorted(s)
|
||||||
|
except: pass
|
||||||
|
"""
|
||||||
|
self.check(b, a)
|
||||||
|
|
||||||
|
b = r"""
|
||||||
|
try:
|
||||||
|
m = list(s)
|
||||||
|
# foo
|
||||||
|
m.sort()
|
||||||
|
except: pass
|
||||||
|
"""
|
||||||
|
|
||||||
|
a = r"""
|
||||||
|
try:
|
||||||
|
m = sorted(s)
|
||||||
|
# foo
|
||||||
|
except: pass
|
||||||
|
"""
|
||||||
|
self.check(b, a)
|
||||||
|
|
||||||
|
b = r"""
|
||||||
|
m = list(s)
|
||||||
|
# more comments
|
||||||
|
m.sort()"""
|
||||||
|
|
||||||
|
a = r"""
|
||||||
|
m = sorted(s)
|
||||||
|
# more comments"""
|
||||||
|
self.check(b, a)
|
||||||
|
|
||||||
def test_sort_simple_expr(self):
|
def test_sort_simple_expr(self):
|
||||||
b = """
|
b = """
|
||||||
v = t
|
v = t
|
||||||
|
|
|
||||||
|
|
@ -147,7 +147,6 @@ class TestParserIdempotency(support.TestCase):
|
||||||
|
|
||||||
def test_all_project_files(self):
|
def test_all_project_files(self):
|
||||||
for filepath in support.all_project_files():
|
for filepath in support.all_project_files():
|
||||||
print("Parsing %s..." % filepath)
|
|
||||||
with open(filepath, "rb") as fp:
|
with open(filepath, "rb") as fp:
|
||||||
encoding = tokenize.detect_encoding(fp.readline)[0]
|
encoding = tokenize.detect_encoding(fp.readline)[0]
|
||||||
fp.seek(0)
|
fp.seek(0)
|
||||||
|
|
@ -161,6 +160,11 @@ def test_all_project_files(self):
|
||||||
if diff(filepath, new):
|
if diff(filepath, new):
|
||||||
self.fail("Idempotency failed: %s" % filepath)
|
self.fail("Idempotency failed: %s" % filepath)
|
||||||
|
|
||||||
|
def test_extended_unpacking(self):
|
||||||
|
driver.parse_string("a, *b, c = x\n")
|
||||||
|
driver.parse_string("[*a, b] = x\n")
|
||||||
|
driver.parse_string("(z, *y, w) = m\n")
|
||||||
|
driver.parse_string("for *z, m in d: pass\n")
|
||||||
|
|
||||||
class TestLiterals(GrammarTest):
|
class TestLiterals(GrammarTest):
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue