mirror of
https://github.com/python/cpython.git
synced 2025-12-08 06:10:17 +00:00
[3.14] gh-137078: Fix keyword typo recognition when executed over files (GH-137079) (#137826)
This commit is contained in:
parent
4cb99bf9e3
commit
7b56d08219
6 changed files with 56 additions and 7 deletions
|
|
@ -94,10 +94,12 @@ func_type[mod_ty]: '(' a=[type_expressions] ')' '->' b=expression NEWLINE* ENDMA
|
|||
# GENERAL STATEMENTS
|
||||
# ==================
|
||||
|
||||
statements[asdl_stmt_seq*]: a=statement+ { _PyPegen_register_stmts(p, (asdl_stmt_seq*)_PyPegen_seq_flatten(p, a)) }
|
||||
statements[asdl_stmt_seq*]: a=statement+ { (asdl_stmt_seq*)_PyPegen_seq_flatten(p, a) }
|
||||
|
||||
statement[asdl_stmt_seq*]:
|
||||
| a=compound_stmt { (asdl_stmt_seq*)_PyPegen_singleton_seq(p, a) }
|
||||
| a=compound_stmt { _PyPegen_register_stmts(p ,
|
||||
(asdl_stmt_seq*)_PyPegen_singleton_seq(p, a)
|
||||
) }
|
||||
| a[asdl_stmt_seq*]=simple_stmts { a }
|
||||
|
||||
single_compound_stmt[asdl_stmt_seq*]:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
requires_debug_ranges, has_no_debug_ranges,
|
||||
requires_subprocess)
|
||||
from test.support.os_helper import TESTFN, unlink
|
||||
from test.support.script_helper import assert_python_ok, assert_python_failure
|
||||
from test.support.script_helper import assert_python_ok, assert_python_failure, make_script
|
||||
from test.support.import_helper import forget
|
||||
from test.support import force_not_colorized, force_not_colorized_test_class
|
||||
|
||||
|
|
@ -1740,6 +1740,49 @@ def f():
|
|||
]
|
||||
self.assertEqual(result_lines, expected)
|
||||
|
||||
class TestKeywordTypoSuggestions(unittest.TestCase):
|
||||
TYPO_CASES = [
|
||||
("with block ad something:\n pass", "and"),
|
||||
("fur a in b:\n pass", "for"),
|
||||
("for a in b:\n pass\nelso:\n pass", "else"),
|
||||
("whille True:\n pass", "while"),
|
||||
("iff x > 5:\n pass", "if"),
|
||||
("if x:\n pass\nelseif y:\n pass", "elif"),
|
||||
("tyo:\n pass\nexcept y:\n pass", "try"),
|
||||
("classe MyClass:\n pass", "class"),
|
||||
("impor math", "import"),
|
||||
("form x import y", "from"),
|
||||
("defn calculate_sum(a, b):\n return a + b", "def"),
|
||||
("def foo():\n returm result", "return"),
|
||||
("lamda x: x ** 2", "lambda"),
|
||||
("def foo():\n yeld i", "yield"),
|
||||
("def foo():\n globel counter", "global"),
|
||||
("frum math import sqrt", "from"),
|
||||
("asynch def fetch_data():\n pass", "async"),
|
||||
("async def foo():\n awaid fetch_data()", "await"),
|
||||
('raisee ValueError("Error")', "raise"),
|
||||
("[x for x\nin range(3)\nof x]", "if"),
|
||||
("[123 fur x\nin range(3)\nif x]", "for"),
|
||||
("for x im n:\n pass", "in"),
|
||||
]
|
||||
|
||||
def test_keyword_suggestions_from_file(self):
|
||||
with tempfile.TemporaryDirectory() as script_dir:
|
||||
for i, (code, expected_kw) in enumerate(self.TYPO_CASES):
|
||||
with self.subTest(typo=expected_kw):
|
||||
source = textwrap.dedent(code).strip()
|
||||
script_name = make_script(script_dir, f"script_{i}", source)
|
||||
rc, stdout, stderr = assert_python_failure(script_name)
|
||||
stderr_text = stderr.decode('utf-8')
|
||||
self.assertIn(f"Did you mean '{expected_kw}'", stderr_text)
|
||||
|
||||
def test_keyword_suggestions_from_command_string(self):
|
||||
for code, expected_kw in self.TYPO_CASES:
|
||||
with self.subTest(typo=expected_kw):
|
||||
source = textwrap.dedent(code).strip()
|
||||
rc, stdout, stderr = assert_python_failure('-c', source)
|
||||
stderr_text = stderr.decode('utf-8')
|
||||
self.assertIn(f"Did you mean '{expected_kw}'", stderr_text)
|
||||
|
||||
@requires_debug_ranges()
|
||||
@force_not_colorized_test_class
|
||||
|
|
|
|||
|
|
@ -1310,7 +1310,6 @@ def _find_keyword_typos(self):
|
|||
lines = source.splitlines()
|
||||
|
||||
error_code = lines[line -1 if line > 0 else 0:end_line]
|
||||
error_code[0] = error_code[0][offset:]
|
||||
error_code = textwrap.dedent('\n'.join(error_code))
|
||||
|
||||
# Do not continue if the source is too large
|
||||
|
|
@ -1326,7 +1325,8 @@ def _find_keyword_typos(self):
|
|||
if token.type != tokenize.NAME:
|
||||
continue
|
||||
# Only consider NAME tokens on the same line as the error
|
||||
if from_filename and token.start[0]+line != end_line+1:
|
||||
the_end = end_line if line == 0 else end_line + 1
|
||||
if from_filename and token.start[0]+line != the_end:
|
||||
continue
|
||||
wrong_name = token.string
|
||||
if wrong_name in keyword.kwlist:
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
Fix keyword typo recognition when parsing files. Patch by Pablo Galindo.
|
||||
|
|
@ -1939,6 +1939,9 @@ _PyPegen_register_stmts(Parser *p, asdl_stmt_seq* stmts) {
|
|||
return stmts;
|
||||
}
|
||||
stmt_ty last_stmt = asdl_seq_GET(stmts, len - 1);
|
||||
if (p->last_stmt_location.lineno > last_stmt->lineno) {
|
||||
return stmts;
|
||||
}
|
||||
p->last_stmt_location.lineno = last_stmt->lineno;
|
||||
p->last_stmt_location.col_offset = last_stmt->col_offset;
|
||||
p->last_stmt_location.end_lineno = last_stmt->end_lineno;
|
||||
|
|
|
|||
4
Parser/parser.c
generated
4
Parser/parser.c
generated
|
|
@ -1201,7 +1201,7 @@ statements_rule(Parser *p)
|
|||
)
|
||||
{
|
||||
D(fprintf(stderr, "%*c+ statements[%d-%d]: %s succeeded!\n", p->level, ' ', _mark, p->mark, "statement+"));
|
||||
_res = _PyPegen_register_stmts ( p , ( asdl_stmt_seq* ) _PyPegen_seq_flatten ( p , a ) );
|
||||
_res = ( asdl_stmt_seq* ) _PyPegen_seq_flatten ( p , a );
|
||||
if (_res == NULL && PyErr_Occurred()) {
|
||||
p->error_indicator = 1;
|
||||
p->level--;
|
||||
|
|
@ -1244,7 +1244,7 @@ statement_rule(Parser *p)
|
|||
)
|
||||
{
|
||||
D(fprintf(stderr, "%*c+ statement[%d-%d]: %s succeeded!\n", p->level, ' ', _mark, p->mark, "compound_stmt"));
|
||||
_res = ( asdl_stmt_seq* ) _PyPegen_singleton_seq ( p , a );
|
||||
_res = _PyPegen_register_stmts ( p , ( asdl_stmt_seq* ) _PyPegen_singleton_seq ( p , a ) );
|
||||
if (_res == NULL && PyErr_Occurred()) {
|
||||
p->error_indicator = 1;
|
||||
p->level--;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue