diff -r cf12fe9ce2b0 -r 079ab75d29a4 Doc/library/shlex.rst --- a/Doc/library/shlex.rst Tue Feb 21 13:59:34 2012 +0200 +++ b/Doc/library/shlex.rst Tue Feb 21 17:13:46 2012 +0000 @@ -70,7 +70,7 @@ The :mod:`shlex` module defines the following class: -.. class:: shlex(instream=None, infile=None, posix=False) +.. class:: shlex(instream=None, infile=None, posix=False, punctuation_chars=False) A :class:`shlex` instance or subclass instance is a lexical analyzer object. The initialization argument, if present, specifies where to read characters @@ -84,6 +84,9 @@ operate in compatibility mode. When operating in POSIX mode, :class:`shlex` will try to be as close as possible to the POSIX shell parsing rules. + .. versionchanged:: 3.3 + The `punctuation_chars` parameter was added. See + :ref:`improved-shell-compatibility`. .. seealso:: @@ -267,6 +270,23 @@ (``''``), in non-POSIX mode, and to ``None`` in POSIX mode. +.. attribute:: shlex.punctuation_chars + + Characters that will be considered punctuation. Runs of punctuation + characters will be returned as a single token. However, note that no + semantic validity checking will be performed: for example, '>>>' could be + returned as a token, even though it may not recognised as such by shells. + + .. versionadded:: 3.3 + +.. attribute:: shlex.token_type + + This will reflect the type of token returned. The possible values are 'a' + for an alphanumeric token or 'c' for a punctuation token. + + .. versionadded:: 3.3 + + .. _shlex-parsing-rules: Parsing Rules @@ -316,3 +336,59 @@ * EOF is signaled with a :const:`None` value; * Quoted empty strings (``''``) are allowed. + +.. _improved-shell-compatibility: + +Improved Compatibility with Shells +---------------------------------- + +.. versionadded:: 3.3 + +The :class:`shlex` class now provides added compatibility with the parsing +performed by common Unix shells like `bash`, `dash`, and `sh`. To allow you to +take advantage of this improved compatibility, an additional keyword parameter, +`punctuation_chars`, has been added to the constructor. This defaults to +``False``, which preserves existing behaviour. However, if it is set to +``True``, then parsing of the characters ``();<>|&`` is changed: any run of +these characters is returned as a single token. While this is short of a full +parser for shells (which would be out of scope for the standard library, given +the multiplicity of shells out there), it does allow you to perform processing +of command lines more easily than you could before. To illustrate, you can see +the difference in the following snippet:: + + import shlex + + for punct, message in ((False, 'Old'), (True, 'New')): + text = "a && b; c && d || e; f >'abc'; (def \"ghi\")" + s = shlex.shlex(text, punctuation_chars=punct) + print('%s: %s' % (message, list(s))) + +which prints out:: + + Old: ['a', '&', '&', 'b', ';', 'c', '&', '&', 'd', '|', '|', 'e', ';', 'f', '>', "'abc'", ';', '(', 'def', '"ghi"', ')'] + New: ['a', '&&', 'b', ';', 'c', '&&', 'd', '||', 'e', ';', 'f', '>', "'abc'", ';', '(', 'def', '"ghi"', ')'] + +Of course, tokens will be returned which are not valid for shells, and you'll +need to implement your own error checks on the returned tokens. + +Instead of passing ``True`` as the value for the punctuation_chars parameter, +you can pass a string with specific characters, which will be used to determine +which characters constitute punctuation. For example:: + + >>> import shlex + >>> s = shlex.shlex("a && b || c", punctuation_chars="|") + >>> list(s) + ['a', '&', '&', 'b', '||', 'c'] + +.. note:: When ``punctuation_chars`` is specified, the :attr:`~shlex.wordchars` + attribute is augmented with the characters ``~-./*?=``. That is because these + characters can appear in file names (including wildcards) and command-line + arguments (e.g. ``--color=auto``). Hence:: + + >>> import shlex + >>> s = shlex.shlex('~/a && b-c --color=auto || d *.py?', + ... punctuation_chars=True) + >>> list(s) + ['~/a', '&&', 'b-c', '--color=auto', '||', 'd', '*.py?'] + + diff -r cf12fe9ce2b0 -r 079ab75d29a4 Lib/shlex.py --- a/Lib/shlex.py Tue Feb 21 13:59:34 2012 +0200 +++ b/Lib/shlex.py Tue Feb 21 17:13:46 2012 +0000 @@ -5,6 +5,7 @@ # push_source() and pop_source() made explicit by ESR, January 2001. # Posix compliance, split(), string arguments, and # iterator interface by Gustavo Niemeyer, April 2003. +# changes to tokenize more like Posix shells by Vinay Sajip, January 2012. import os import re @@ -17,7 +18,7 @@ class shlex: "A lexical analyzer class for simple shell-like syntaxes." - def __init__(self, instream=None, infile=None, posix=False): + def __init__(self, instream=None, infile=None, posix=False, punctuation_chars=False): if isinstance(instream, str): instream = StringIO(instream) if instream is not None: @@ -49,9 +50,19 @@ self.token = '' self.filestack = deque() self.source = None + if not punctuation_chars: + punctuation_chars = '' + elif punctuation_chars is True: + punctuation_chars = '();<>|&' + self.punctuation_chars = punctuation_chars + if punctuation_chars: + # _pushback_chars is a push back queue used by lookahead logic + self._pushback_chars = deque() + # these chars added because allowed in file names, args, wildcards + self.wordchars += '~-./*?=' if self.debug: - print('shlex: reading from %s, line %d' \ - % (self.instream, self.lineno)) + print('shlex: reading from %s, line %d' % (self.instream, + self.lineno)) def push_token(self, tok): "Push a token onto the stack popped by the get_token method" @@ -118,17 +129,21 @@ quoted = False escapedstate = ' ' while True: - nextchar = self.instream.read(1) + if self.punctuation_chars and self._pushback_chars: + nextchar = self._pushback_chars.pop() + else: + nextchar = self.instream.read(1) if nextchar == '\n': - self.lineno = self.lineno + 1 + self.lineno += 1 if self.debug >= 3: - print("shlex: in state", repr(self.state), \ - "I see character:", repr(nextchar)) + print("shlex: in state %r I see character: %r" % (self.state, + nextchar)) if self.state is None: self.token = '' # past end of file break elif self.state == ' ': if not nextchar: + self.token_type = self.state self.state = None # end of file break elif nextchar in self.whitespace: @@ -140,19 +155,27 @@ continue elif nextchar in self.commenters: self.instream.readline() - self.lineno = self.lineno + 1 + self.lineno += 1 elif self.posix and nextchar in self.escape: escapedstate = 'a' + self.token_type = self.state self.state = nextchar elif nextchar in self.wordchars: self.token = nextchar + self.token_type = self.state self.state = 'a' + elif nextchar in self.punctuation_chars: + self.token = nextchar + self.token_type = self.state + self.state = 'c' elif nextchar in self.quotes: if not self.posix: self.token = nextchar + self.token_type = self.state self.state = nextchar elif self.whitespace_split: self.token = nextchar + self.token_type = self.state self.state = 'a' else: self.token = nextchar @@ -168,18 +191,20 @@ # XXX what error should be raised here? raise ValueError("No closing quotation") if nextchar == self.state: + self.token_type = self.state if not self.posix: - self.token = self.token + nextchar + self.token += nextchar self.state = ' ' break else: self.state = 'a' - elif self.posix and nextchar in self.escape and \ - self.state in self.escapedquotes: + elif (self.posix and nextchar in self.escape and self.state + in self.escapedquotes): escapedstate = self.state + self.token_type = self.state self.state = nextchar else: - self.token = self.token + nextchar + self.token += nextchar elif self.state in self.escape: if not nextchar: # end of file if self.debug >= 2: @@ -188,18 +213,21 @@ raise ValueError("No escaped character") # In posix shells, only the quote itself or the escape # character may be escaped within quotes. - if escapedstate in self.quotes and \ - nextchar != self.state and nextchar != escapedstate: - self.token = self.token + self.state - self.token = self.token + nextchar + if (escapedstate in self.quotes and nextchar != self.state + and nextchar != escapedstate): + self.token += self.state + self.token += nextchar + self.token_type = self.state self.state = escapedstate - elif self.state == 'a': + elif self.state in ('a', 'c'): if not nextchar: + self.token_type = self.state self.state = None # end of file break elif nextchar in self.whitespace: if self.debug >= 2: print("shlex: I see whitespace in word state") + self.token_type = self.state self.state = ' ' if self.token or (self.posix and quoted): break # emit current token @@ -207,25 +235,41 @@ continue elif nextchar in self.commenters: self.instream.readline() - self.lineno = self.lineno + 1 + self.lineno += 1 if self.posix: + self.token_type = self.state self.state = ' ' if self.token or (self.posix and quoted): break # emit current token else: continue elif self.posix and nextchar in self.quotes: + self.token_type = self.state self.state = nextchar elif self.posix and nextchar in self.escape: escapedstate = 'a' + self.token_type = self.state self.state = nextchar - elif nextchar in self.wordchars or nextchar in self.quotes \ - or self.whitespace_split: - self.token = self.token + nextchar + elif self.state == 'c': + if nextchar in self.punctuation_chars: + self.token += nextchar + else: + if nextchar not in self.whitespace: + self._pushback_chars.append(nextchar) + self.token_type = self.state + self.state = ' ' + break + elif (nextchar in self.wordchars or nextchar in self.quotes + or self.whitespace_split): + self.token += nextchar else: - self.pushback.appendleft(nextchar) + if self.punctuation_chars: + self._pushback_chars.append(nextchar) + else: + self.pushback.appendleft(nextchar) if self.debug >= 2: print("shlex: I see punctuation in word state") + self.token_type = self.state self.state = ' ' if self.token: break # emit current token diff -r cf12fe9ce2b0 -r 079ab75d29a4 Lib/test/test_shlex.py --- a/Lib/test/test_shlex.py Tue Feb 21 13:59:34 2012 +0200 +++ b/Lib/test/test_shlex.py Tue Feb 21 17:13:46 2012 +0000 @@ -174,6 +174,84 @@ "%s: %s != %s" % (self.data[i][0], l, self.data[i][1:])) + def testSyntaxSplitAmpersandAndPipe(self): + """Test handling of syntax splitting of &, |""" + # Could take these forms: &&, &, |&, ;&, ;;& + # of course, the same applies to | and || + # these should all parse to the same output + for delimiter in ('&&', '&', '|&', ';&', ';;&', + '||', '|', '&|', ';|', ';;|'): + src = ['echo hi %s echo bye' % delimiter, + 'echo hi%secho bye' % delimiter] + ref = ['echo', 'hi', delimiter, 'echo', 'bye'] + for ss in src: + s = shlex.shlex(ss, punctuation_chars=True) + result = list(s) + self.assertEqual(ref, result, "While splitting '%s'" % ss) + + def testSyntaxSplitSemicolon(self): + """Test handling of syntax splitting of ;""" + # Could take these forms: ;, ;;, ;&, ;;& + # these should all parse to the same output + for delimiter in (';', ';;', ';&', ';;&'): + src = ['echo hi %s echo bye' % delimiter, + 'echo hi%s echo bye' % delimiter, + 'echo hi%secho bye' % delimiter] + ref = ['echo', 'hi', delimiter, 'echo', 'bye'] + for ss in src: + s = shlex.shlex(ss, punctuation_chars=True) + result = list(s) + self.assertEqual(ref, result, "While splitting '%s'" % ss) + + def testSyntaxSplitRedirect(self): + """Test handling of syntax splitting of >""" + # of course, the same applies to <, | + # these should all parse to the same output + for delimiter in ('<', '|'): + src = ['echo hi %s out' % delimiter, + 'echo hi%s out' % delimiter, + 'echo hi%sout' % delimiter] + ref = ['echo', 'hi', delimiter, 'out'] + for ss in src: + s = shlex.shlex(ss, punctuation_chars=True) + result = list(s) + self.assertEqual(ref, result, "While splitting '%s'" % ss) + + def testSyntaxSplitParen(self): + """Test handling of syntax splitting of ()""" + # these should all parse to the same output + src = ['( echo hi )', + '(echo hi)'] + ref = ['(', 'echo', 'hi', ')'] + for ss in src: + s = shlex.shlex(ss, punctuation_chars=True) + result = list(s) + self.assertEqual(ref, result, "While splitting '%s'" % ss) + + def testSyntaxSplitCustom(self): + """Test handling of syntax splitting with custom chars""" + ref = ['~/a', '&', '&', 'b-c', '--color=auto', '||', 'd', '*.py?'] + ss = "~/a && b-c --color=auto || d *.py?" + s = shlex.shlex(ss, punctuation_chars="|") + result = list(s) + self.assertEqual(ref, result, "While splitting '%s'" % ss) + + def testTokenTypes(self): + for source, expected in ( + ('a && b || c', + [('a', 'a'), ('&&', 'c'), ('b', 'a'), + ('||', 'c'), ('c', 'a')]), + ): + s = shlex.shlex(source, punctuation_chars=True) + observed = [] + while True: + t = s.get_token() + if t == s.eof: + break + tt = s.token_type + observed.append((t, tt)) + self.assertEqual(observed, expected) + def testQuote(self): safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./' unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s