Closes #1521950: Made shlex parsing more shell-like.

This commit is contained in:
Vinay Sajip 2016-07-29 22:35:03 +01:00
parent d2f87472fe
commit c1f974c944
3 changed files with 264 additions and 31 deletions

View file

@ -5,6 +5,7 @@
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
# changes to tokenize more like Posix shells by Vinay Sajip, July 2016.
import os
import re
@ -17,7 +18,8 @@
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
def __init__(self, instream=None, infile=None, posix=False,
punctuation_chars=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
@ -49,6 +51,19 @@ def __init__(self, instream=None, infile=None, posix=False):
self.token = ''
self.filestack = deque()
self.source = None
if not punctuation_chars:
punctuation_chars = ''
elif punctuation_chars is True:
punctuation_chars = '();<>|&'
self.punctuation_chars = punctuation_chars
if punctuation_chars:
# _pushback_chars is a push back queue used by lookahead logic
self._pushback_chars = deque()
# these chars added because allowed in file names, args, wildcards
self.wordchars += '~-./*?='
#remove any punctuation chars from wordchars
t = self.wordchars.maketrans(dict.fromkeys(punctuation_chars))
self.wordchars = self.wordchars.translate(t)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
@ -115,12 +130,15 @@ def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if self.punctuation_chars and self._pushback_chars:
nextchar = self._pushback_chars.pop()
else:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
self.lineno += 1
if self.debug >= 3:
print("shlex: in state", repr(self.state), \
"I see character:", repr(nextchar))
print("shlex: in state %r I see character: %r" % (self.state,
nextchar))
if self.state is None:
self.token = '' # past end of file
break
@ -137,13 +155,16 @@ def read_token(self):
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
self.lineno += 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.punctuation_chars:
self.token = nextchar
self.state = 'c'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
@ -166,17 +187,17 @@ def read_token(self):
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.token += nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
elif (self.posix and nextchar in self.escape and self.state
in self.escapedquotes):
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
self.token += nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
@ -185,12 +206,12 @@ def read_token(self):
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
if (escapedstate in self.quotes and
nextchar != self.state and nextchar != escapedstate):
self.token += self.state
self.token += nextchar
self.state = escapedstate
elif self.state == 'a':
elif self.state in ('a', 'c'):
if not nextchar:
self.state = None # end of file
break
@ -204,7 +225,7 @@ def read_token(self):
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
self.lineno += 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
@ -216,15 +237,26 @@ def read_token(self):
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
elif self.state == 'c':
if nextchar in self.punctuation_chars:
self.token += nextchar
else:
if nextchar not in self.whitespace:
self._pushback_chars.append(nextchar)
self.state = ' '
break
elif (nextchar in self.wordchars or nextchar in self.quotes
or self.whitespace_split):
self.token += nextchar
else:
self.pushback.appendleft(nextchar)
if self.punctuation_chars:
self._pushback_chars.append(nextchar)
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("shlex: I see punctuation in word state")
self.state = ' '
if self.token:
if self.token or (self.posix and quoted):
break # emit current token
else:
continue

View file

@ -173,6 +173,118 @@ def testCompat(self):
"%s: %s != %s" %
(self.data[i][0], l, self.data[i][1:]))
def testSyntaxSplitAmpersandAndPipe(self):
"""Test handling of syntax splitting of &, |"""
# Could take these forms: &&, &, |&, ;&, ;;&
# of course, the same applies to | and ||
# these should all parse to the same output
for delimiter in ('&&', '&', '|&', ';&', ';;&',
'||', '|', '&|', ';|', ';;|'):
src = ['echo hi %s echo bye' % delimiter,
'echo hi%secho bye' % delimiter]
ref = ['echo', 'hi', delimiter, 'echo', 'bye']
for ss in src:
s = shlex.shlex(ss, punctuation_chars=True)
result = list(s)
self.assertEqual(ref, result, "While splitting '%s'" % ss)
def testSyntaxSplitSemicolon(self):
"""Test handling of syntax splitting of ;"""
# Could take these forms: ;, ;;, ;&, ;;&
# these should all parse to the same output
for delimiter in (';', ';;', ';&', ';;&'):
src = ['echo hi %s echo bye' % delimiter,
'echo hi%s echo bye' % delimiter,
'echo hi%secho bye' % delimiter]
ref = ['echo', 'hi', delimiter, 'echo', 'bye']
for ss in src:
s = shlex.shlex(ss, punctuation_chars=True)
result = list(s)
self.assertEqual(ref, result, "While splitting '%s'" % ss)
def testSyntaxSplitRedirect(self):
"""Test handling of syntax splitting of >"""
# of course, the same applies to <, |
# these should all parse to the same output
for delimiter in ('<', '|'):
src = ['echo hi %s out' % delimiter,
'echo hi%s out' % delimiter,
'echo hi%sout' % delimiter]
ref = ['echo', 'hi', delimiter, 'out']
for ss in src:
s = shlex.shlex(ss, punctuation_chars=True)
result = list(s)
self.assertEqual(ref, result, "While splitting '%s'" % ss)
def testSyntaxSplitParen(self):
"""Test handling of syntax splitting of ()"""
# these should all parse to the same output
src = ['( echo hi )',
'(echo hi)']
ref = ['(', 'echo', 'hi', ')']
for ss in src:
s = shlex.shlex(ss, punctuation_chars=True)
result = list(s)
self.assertEqual(ref, result, "While splitting '%s'" % ss)
def testSyntaxSplitCustom(self):
"""Test handling of syntax splitting with custom chars"""
ref = ['~/a', '&', '&', 'b-c', '--color=auto', '||', 'd', '*.py?']
ss = "~/a && b-c --color=auto || d *.py?"
s = shlex.shlex(ss, punctuation_chars="|")
result = list(s)
self.assertEqual(ref, result, "While splitting '%s'" % ss)
def testTokenTypes(self):
"""Test that tokens are split with types as expected."""
for source, expected in (
('a && b || c',
[('a', 'a'), ('&&', 'c'), ('b', 'a'),
('||', 'c'), ('c', 'a')]),
):
s = shlex.shlex(source, punctuation_chars=True)
observed = []
while True:
t = s.get_token()
if t == s.eof:
break
if t[0] in s.punctuation_chars:
tt = 'c'
else:
tt = 'a'
observed.append((t, tt))
self.assertEqual(observed, expected)
def testPunctuationInWordChars(self):
"""Test that any punctuation chars are removed from wordchars"""
s = shlex.shlex('a_b__c', punctuation_chars='_')
self.assertNotIn('_', s.wordchars)
self.assertEqual(list(s), ['a', '_', 'b', '__', 'c'])
def testPunctuationWithWhitespaceSplit(self):
"""Test that with whitespace_split, behaviour is as expected"""
s = shlex.shlex('a && b || c', punctuation_chars='&')
# whitespace_split is False, so splitting will be based on
# punctuation_chars
self.assertEqual(list(s), ['a', '&&', 'b', '|', '|', 'c'])
s = shlex.shlex('a && b || c', punctuation_chars='&')
s.whitespace_split = True
# whitespace_split is True, so splitting will be based on
# white space
self.assertEqual(list(s), ['a', '&&', 'b', '||', 'c'])
def testEmptyStringHandling(self):
"""Test that parsing of empty strings is correctly handled."""
# see Issue #21999
expected = ['', ')', 'abc']
for punct in (False, True):
s = shlex.shlex("'')abc", posix=True, punctuation_chars=punct)
slist = list(s)
self.assertEqual(slist, expected)
expected = ["''", ')', 'abc']
s = shlex.shlex("'')abc", punctuation_chars=True)
self.assertEqual(list(s), expected)
def testQuote(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s