diff options
author | Patrick Ohly <patrick.ohly@intel.com> | 2016-11-18 16:23:22 +0100 |
---|---|---|
committer | Armin Kuster <akuster808@gmail.com> | 2017-04-28 06:38:37 -0700 |
commit | 9feeff5805c70e345972188d6f290a2c9c32e0bb (patch) | |
tree | b238616567a374fe62554e9b4be56ede0102773f /lib/bb/codeparser.py | |
parent | bdf7a362ecdafd47f309b7a21feac4b94624f287 (diff) | |
download | bitbake-contrib-akuster/1.32-next.tar.gz |
codeparser.py: support deeply nested tokensakuster/1.32-next
For shell constructs like
echo hello & wait $!
the process_tokens() method ended up with a situation where "token"
in the "name, value = token" assignment was a list of tuples
and not the expected tuple, causing the assignment to fail.
There were already two for loops (one in _parse_shell(), one in
process_tokens()) which iterated over token lists. Apparently the
actual nesting can also be deeper.
Now there is just one such loop in process_token_list() which calls
itself recursively when it detects that a list entry is another list.
As a side effect (improvement?!) of the loop removal in
_parse_shell(), the local function definitions in process_tokens() get
executed less often.
Fixes: [YOCTO #10668]
Signed-off-by: Patrick Ohly <patrick.ohly@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Signed-off-by: Armin Kuster <akuster808@gmail.com>
Diffstat (limited to 'lib/bb/codeparser.py')
-rw-r--r-- | lib/bb/codeparser.py | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/lib/bb/codeparser.py b/lib/bb/codeparser.py index 25938d658..5d2d44065 100644 --- a/lib/bb/codeparser.py +++ b/lib/bb/codeparser.py @@ -342,8 +342,7 @@ class ShellParser(): except pyshlex.NeedMore: raise sherrors.ShellSyntaxError("Unexpected EOF") - for token in tokens: - self.process_tokens(token) + self.process_tokens(tokens) def process_tokens(self, tokens): """Process a supplied portion of the syntax tree as returned by @@ -389,18 +388,24 @@ class ShellParser(): "case_clause": case_clause, } - for token in tokens: - name, value = token - try: - more_tokens, words = token_handlers[name](value) - except KeyError: - raise NotImplementedError("Unsupported token type " + name) + def process_token_list(tokens): + for token in tokens: + if isinstance(token, list): + process_token_list(token) + continue + name, value = token + try: + more_tokens, words = token_handlers[name](value) + except KeyError: + raise NotImplementedError("Unsupported token type " + name) + + if more_tokens: + self.process_tokens(more_tokens) - if more_tokens: - self.process_tokens(more_tokens) + if words: + self.process_words(words) - if words: - self.process_words(words) + process_token_list(tokens) def process_words(self, words): """Process a set of 'words' in pyshyacc parlance, which includes |