You've already forked python-uncompyle6
mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2025-08-04 01:09:52 +08:00
Compare commits
19 Commits
release-2.
...
release-2.
Author | SHA1 | Date | |
---|---|---|---|
|
97999c5e67 | ||
|
4563a547bc | ||
|
9cfd7d669e | ||
|
413f5aa5a5 | ||
|
b4426931ef | ||
|
92f5981661 | ||
|
54fe07e989 | ||
|
adc9b99106 | ||
|
1392b18bd7 | ||
|
9ae84092cb | ||
|
85d68a7926 | ||
|
b3359439f9 | ||
|
9be9abc682 | ||
|
c17ac696d6 | ||
|
9e2119f1a9 | ||
|
86305097d2 | ||
|
c8d15e7654 | ||
|
1d7a3c6444 | ||
|
e7778f83f2 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -17,4 +17,5 @@
|
||||
__pycache__
|
||||
build
|
||||
/.venv*
|
||||
/.idea
|
||||
/.idea
|
||||
/.hypothesis
|
||||
|
87
ChangeLog
87
ChangeLog
@@ -1,3 +1,90 @@
|
||||
2017-10-10 rocky <rb@dustyfeet.com>
|
||||
|
||||
* HOW-TO-REPORT-A-BUG.md, test/Makefile, uncompyle6/parser.py,
|
||||
uncompyle6/parsers/parse3.py, uncompyle6/scanners/scanner3.py,
|
||||
uncompyle6/semantics/consts.py, uncompyle6/semantics/pysource.py:
|
||||
Improve parse trace. lambda fixes yet again
|
||||
|
||||
2017-10-10 rocky <rb@dustyfeet.com>
|
||||
|
||||
* test/simple_source/branching/02_ifelse_lambda.py,
|
||||
uncompyle6/semantics/consts.py: Address dead code in lambda ifelse
|
||||
|
||||
2017-10-10 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse24.py, uncompyle6/scanners/scanner3.py:
|
||||
Misc bugs
|
||||
|
||||
2017-10-10 R. Bernstein <rocky@users.noreply.github.com>
|
||||
|
||||
* : Merge pull request #131 from rocky/type2kind-rework Adjust for spark-parser 2.7.0 incompatibilities
|
||||
|
||||
2017-10-10 rocky <rb@dustyfeet.com>
|
||||
|
||||
* __pkginfo__.py, pytest/test_grammar.py, pytest/test_pysource.py,
|
||||
uncompyle6/parser.py, uncompyle6/parsers/astnode.py,
|
||||
uncompyle6/parsers/parse2.py, uncompyle6/parsers/parse24.py,
|
||||
uncompyle6/parsers/parse26.py, uncompyle6/parsers/parse27.py,
|
||||
uncompyle6/parsers/parse3.py, uncompyle6/parsers/parse32.py,
|
||||
uncompyle6/parsers/parse34.py, uncompyle6/parsers/parse35.py,
|
||||
uncompyle6/parsers/parse36.py, uncompyle6/parsers/parse37.py,
|
||||
uncompyle6/scanners/scanner22.py, uncompyle6/scanners/scanner26.py,
|
||||
uncompyle6/scanners/scanner27.py, uncompyle6/scanners/scanner3.py,
|
||||
uncompyle6/scanners/tok.py, uncompyle6/semantics/check_ast.py,
|
||||
uncompyle6/semantics/fragments.py,
|
||||
uncompyle6/semantics/make_function.py,
|
||||
uncompyle6/semantics/pysource.py, uncompyle6/verify.py,
|
||||
uncompyle6/version.py: Adjust for spark-parser 2.7.0
|
||||
incompatabilities
|
||||
|
||||
2017-10-05 rocky <rb@dustyfeet.com>
|
||||
|
||||
* : One more test
|
||||
|
||||
2017-10-05 rocky <rb@dustyfeet.com>
|
||||
|
||||
* : commit b3359439f94c136619b198beaecbfce1b827d2db Author: rocky
|
||||
<rb@dustyfeet.com> Date: Thu Oct 5 11:00:55 2017 -0400
|
||||
|
||||
2017-10-03 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse2.py, uncompyle6/parsers/parse24.py,
|
||||
uncompyle6/parsers/parse26.py: handle newer parser reduction
|
||||
behavior
|
||||
|
||||
2017-10-03 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/semantics/pysource.py: Remove schumutz
|
||||
|
||||
2017-10-03 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/semantics/pysource.py: More table doc tweaks
|
||||
|
||||
2017-10-03 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/semantics/fragments.py,
|
||||
uncompyle6/semantics/pysource.py: Go over table-semantics
|
||||
description yet again
|
||||
|
||||
2017-10-02 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse2.py, uncompyle6/parsers/parse3.py:
|
||||
spark-parser induced changes... reduce rules can be called without token streams.
|
||||
|
||||
2017-09-30 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parser.py, uncompyle6/scanners/scanner2.py,
|
||||
uncompyle6/scanners/scanner3.py: Document hacky customize arg count
|
||||
better.
|
||||
|
||||
2017-09-26 rocky <rb@dustyfeet.com>
|
||||
|
||||
* README.rst: Word hacking
|
||||
|
||||
2017-09-26 rocky <rb@dustyfeet.com>
|
||||
|
||||
* ChangeLog, NEWS: Get ready for release 2.12.0
|
||||
|
||||
2017-09-26 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse3.py: No unicode in Python3. but we need it in Python2. The bug was probably introduced as a
|
||||
|
@@ -3,8 +3,9 @@
|
||||
## The difficulty of the problem
|
||||
|
||||
There is no Python decompiler yet, that I know about that will
|
||||
decompyle everything. This one probably does the
|
||||
best job of *any* Python decompiler. But it is a constant work in progress: Python keeps changing, and so does its code generation.
|
||||
decompyle everything. This one probably does the best job of *any*
|
||||
Python decompiler. But it is a constant work in progress: Python keeps
|
||||
changing, and so does its code generation.
|
||||
|
||||
I have found bugs in *every* Python decompiler I have tried. Even
|
||||
those where authors/maintainers claim that they have used it on
|
||||
@@ -14,6 +15,55 @@ but that the program is *semantically* not equivalent.
|
||||
|
||||
So it is likely you'll find a mistranslation in decompiling.
|
||||
|
||||
|
||||
## Is it really a bug?
|
||||
|
||||
If the code emitted is semantically equivalent, then this isn't a bug.
|
||||
|
||||
For example the code might be
|
||||
|
||||
```
|
||||
if a:
|
||||
if b:
|
||||
x = 1
|
||||
```
|
||||
|
||||
and we might produce:
|
||||
|
||||
```
|
||||
if a and b:
|
||||
x = 1
|
||||
```
|
||||
|
||||
These are equivalent. Sometimes
|
||||
|
||||
```
|
||||
else:
|
||||
if ...
|
||||
|
||||
```
|
||||
|
||||
may out as `elif`.
|
||||
|
||||
|
||||
As mentioned in the README. It is possible that Python changes what
|
||||
you write to be more efficient. For example, for:
|
||||
|
||||
|
||||
```
|
||||
if True:
|
||||
x = 5
|
||||
```
|
||||
|
||||
Python will generate code like:
|
||||
|
||||
```
|
||||
x = 5
|
||||
```
|
||||
|
||||
So just because the text isn't the same, does not
|
||||
necessarily mean there's a bug.
|
||||
|
||||
## What to send (minimum requirements)
|
||||
|
||||
The basic requirement is pretty simple:
|
||||
@@ -21,6 +71,12 @@ The basic requirement is pretty simple:
|
||||
* Python bytecode
|
||||
* Python source text
|
||||
|
||||
Please don't put files on download services that one has to register
|
||||
for. If you can't attach it to the issue, or create a github gist,
|
||||
then the code you are sending is too large.
|
||||
|
||||
Please also try to narrow the bug. See below.
|
||||
|
||||
## What to send (additional helpful information)
|
||||
|
||||
Some kind folks also give the invocation they used and the output
|
||||
|
2
Makefile
2
Makefile
@@ -89,7 +89,7 @@ bdist_egg:
|
||||
|
||||
|
||||
#: Create binary wheel distribution
|
||||
bdist_wheel:
|
||||
wheel:
|
||||
$(PYTHON) ./setup.py bdist_wheel
|
||||
|
||||
|
||||
|
12
NEWS
12
NEWS
@@ -1,9 +1,21 @@
|
||||
uncompyle6 2.13.1 2017-10-11
|
||||
|
||||
- Re-release because Python 2.4 source uploaded rather than 2.6-3.6
|
||||
|
||||
uncompyle6 2.13.0 2017-10-10
|
||||
|
||||
- Fixes in deparsing lambda expressions
|
||||
- Improve table-semantics descriptions
|
||||
- Document hacky customize arg count better (until we can remove it)
|
||||
- Update to use xdis 3.7.0 or greater
|
||||
|
||||
uncompyle6 2.12.0 2017-09-26
|
||||
|
||||
- Use xdis 3.6.0 or greater now
|
||||
- Small semantic table cleanups
|
||||
- Python 3.4's terms a little names better
|
||||
- Slightly more Python 3.7, but still failing a lot
|
||||
- Cross Python 2/3 compatibility with annotation arguments
|
||||
|
||||
uncompyle6 2.11.5 2017-08-31
|
||||
|
||||
|
@@ -4,7 +4,7 @@ uncompyle6
|
||||
==========
|
||||
|
||||
A native Python cross-version Decompiler and Fragment Decompiler.
|
||||
Follows in the tradition of decompyle, uncompyle, and uncompyle2.
|
||||
The successor to decompyle, uncompyle, and uncompyle2.
|
||||
|
||||
|
||||
Introduction
|
||||
|
@@ -39,7 +39,7 @@ entry_points = {
|
||||
'pydisassemble=uncompyle6.bin.pydisassemble:main',
|
||||
]}
|
||||
ftp_url = None
|
||||
install_requires = ['spark-parser >= 1.6.1, < 1.7.0',
|
||||
install_requires = ['spark-parser >= 1.7.0, < 1.8.0',
|
||||
'xdis >= 3.6.0, < 3.7.0', 'six']
|
||||
license = 'MIT'
|
||||
mailing_list = 'python-debugger@googlegroups.com'
|
||||
|
@@ -11,15 +11,16 @@ def test_grammar():
|
||||
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
assert remain_tokens == set([]), \
|
||||
"Remaining tokens %s\n====\n%s" % (remain_tokens, p.dumpGrammar())
|
||||
"Remaining tokens %s\n====\n%s" % (remain_tokens, p.dump_grammar())
|
||||
|
||||
p = get_python_parser(PYTHON_VERSION, is_pypy=IS_PYPY)
|
||||
lhs, rhs, tokens, right_recursive = p.checkSets()
|
||||
lhs, rhs, tokens, right_recursive = p.check_sets()
|
||||
expect_lhs = set(['expr1024', 'pos_arg'])
|
||||
unused_rhs = set(['build_list', 'call_function', 'mkfunc',
|
||||
'mklambda',
|
||||
'unpack', 'unpack_list'])
|
||||
expect_right_recursive = [['designList', ('designator', 'DUP_TOP', 'designList')]]
|
||||
expect_right_recursive = frozenset([('designList',
|
||||
('designator', 'DUP_TOP', 'designList'))])
|
||||
if PYTHON3:
|
||||
expect_lhs.add('load_genexpr')
|
||||
|
||||
@@ -39,13 +40,14 @@ def test_grammar():
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
ignore_set = set(
|
||||
"""
|
||||
JUMP_BACK CONTINUE RETURN_END_IF
|
||||
JUMP_BACK CONTINUE
|
||||
COME_FROM COME_FROM_EXCEPT
|
||||
COME_FROM_EXCEPT_CLAUSE
|
||||
COME_FROM_LOOP COME_FROM_WITH
|
||||
COME_FROM_FINALLY ELSE
|
||||
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP
|
||||
LAMBDA_MARKER RETURN_LAST
|
||||
LAMBDA_MARKER
|
||||
RETURN_END_IF RETURN_END_IF_LAMBDA RETURN_VALUE_LAMBDA RETURN_LAST
|
||||
""".split())
|
||||
if 2.6 <= PYTHON_VERSION <= 2.7:
|
||||
opcode_set = set(s.opc.opname).union(ignore_set)
|
||||
|
@@ -57,7 +57,7 @@ def test_tables():
|
||||
# One arg - should be int or tuple of int
|
||||
if typ == 'c':
|
||||
assert isinstance(entry[arg], int), (
|
||||
"%s[%s][%d] type %s is '%s' should be an int but is %s. "
|
||||
"%s[%s][%d] kind %s is '%s' should be an int but is %s. "
|
||||
"Full entry: %s" %
|
||||
(name, k, arg, typ, entry[arg], type(entry[arg]), entry)
|
||||
)
|
||||
|
3
setup.py
3
setup.py
@@ -24,6 +24,7 @@ setup(
|
||||
py_modules = py_modules,
|
||||
test_suite = 'nose.collector',
|
||||
url = web,
|
||||
tests_require = ['nose>=1.0'],
|
||||
tests_require = ['nose>=1.0'],
|
||||
version = VERSION,
|
||||
python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, <4',
|
||||
zip_safe = zip_safe)
|
||||
|
@@ -47,7 +47,7 @@ check-3.5: check-bytecode
|
||||
|
||||
#: Run working tests from Python 3.6
|
||||
check-3.6: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6 --verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6 --weak-verify $(COMPILE)
|
||||
|
||||
#: Check deparsing only, but from a different Python version
|
||||
check-disasm:
|
||||
|
BIN
test/bytecode_2.7/02_ifelse_lambda.pyc
Normal file
BIN
test/bytecode_2.7/02_ifelse_lambda.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6/02_ifelse_lambda.pyc
Normal file
BIN
test/bytecode_3.6/02_ifelse_lambda.pyc
Normal file
Binary file not shown.
16
test/simple_source/branching/02_ifelse_lambda.py
Normal file
16
test/simple_source/branching/02_ifelse_lambda.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# We have to do contortions here because
|
||||
# lambda's have to be more or less on a line
|
||||
|
||||
f = lambda x: 1 if x<2 else 3
|
||||
f(5)
|
||||
|
||||
# If that wasn't enough ...
|
||||
# Python will create dead code
|
||||
# in the below. So we must make sure
|
||||
# not to include the else expression
|
||||
|
||||
g = lambda: 1 if True else 3
|
||||
g()
|
||||
|
||||
h = lambda: 1 if False else 3
|
||||
h()
|
@@ -44,21 +44,25 @@ class PythonParser(GenericASTBuilder):
|
||||
else:
|
||||
return self.ast_first_offset(ast[0])
|
||||
|
||||
def add_unique_rule(self, rule, opname, count, customize):
|
||||
def add_unique_rule(self, rule, opname, arg_count, customize):
|
||||
"""Add rule to grammar, but only if it hasn't been added previously
|
||||
opname and count are used in the customize() semantic the actions
|
||||
to add the semantic action rule. Often, count is not used.
|
||||
opname and stack_count are used in the customize() semantic
|
||||
the actions to add the semantic action rule. Stack_count is
|
||||
used in custom opcodes like MAKE_FUNCTION to indicate how
|
||||
many arguments it has. Often it is not used.
|
||||
"""
|
||||
if rule not in self.new_rules:
|
||||
# print("XXX ", rule) # debug
|
||||
self.new_rules.add(rule)
|
||||
self.addRule(rule, nop_func)
|
||||
customize[opname] = count
|
||||
customize[opname] = arg_count
|
||||
pass
|
||||
return
|
||||
|
||||
def add_unique_rules(self, rules, customize):
|
||||
"""Add rules (a list of string) to grammar
|
||||
"""Add rules (a list of string) to grammar. Note that
|
||||
the rules must not be those that set arg_count in the
|
||||
custom dictionary.
|
||||
"""
|
||||
for rule in rules:
|
||||
if len(rule) == 0:
|
||||
@@ -68,7 +72,9 @@ class PythonParser(GenericASTBuilder):
|
||||
return
|
||||
|
||||
def add_unique_doc_rules(self, rules_str, customize):
|
||||
"""Add rules (a docstring-like list of rules) to grammar
|
||||
"""Add rules (a docstring-like list of rules) to grammar.
|
||||
Note that the rules must not be those that set arg_count in the
|
||||
custom dictionary.
|
||||
"""
|
||||
rules = [r.strip() for r in rules_str.split("\n")]
|
||||
self.add_unique_rules(rules, customize)
|
||||
@@ -85,14 +91,14 @@ class PythonParser(GenericASTBuilder):
|
||||
for i in dir(self):
|
||||
setattr(self, i, None)
|
||||
|
||||
def debug_reduce(self, rule, tokens, parent, i):
|
||||
def debug_reduce(self, rule, tokens, parent, last_token_pos):
|
||||
"""Customized format and print for our kind of tokens
|
||||
which gets called in debugging grammar reduce rules
|
||||
"""
|
||||
def fix(c):
|
||||
s = str(c)
|
||||
i = s.find('_')
|
||||
return s if i == -1 else s[:i]
|
||||
last_token_pos = s.find('_')
|
||||
return s if last_token_pos == -1 else s[:last_token_pos]
|
||||
|
||||
prefix = ''
|
||||
if parent and tokens:
|
||||
@@ -104,13 +110,13 @@ class PythonParser(GenericASTBuilder):
|
||||
if hasattr(p_token, 'offset'):
|
||||
prefix += "%3s" % fix(p_token.offset)
|
||||
if len(rule[1]) > 1:
|
||||
prefix += '-%-3s ' % fix(tokens[i-1].offset)
|
||||
prefix += '-%-3s ' % fix(tokens[last_token_pos-1].offset)
|
||||
else:
|
||||
prefix += ' '
|
||||
else:
|
||||
prefix = ' '
|
||||
|
||||
print("%s%s ::= %s" % (prefix, rule[0], ' '.join(rule[1])))
|
||||
print("%s%s ::= %s (%d)" % (prefix, rule[0], ' '.join(rule[1]), last_token_pos))
|
||||
|
||||
def error(self, instructions, index):
|
||||
# Find the last line boundary
|
||||
@@ -128,7 +134,7 @@ class PythonParser(GenericASTBuilder):
|
||||
raise ParserError(err_token, err_token.offset)
|
||||
|
||||
def typestring(self, token):
|
||||
return token.type
|
||||
return token.kind
|
||||
|
||||
def nonterminal(self, nt, args):
|
||||
if nt in self.collect and len(args) > 1:
|
||||
@@ -250,8 +256,11 @@ class PythonParser(GenericASTBuilder):
|
||||
|
||||
stmt ::= return_stmt
|
||||
return_stmt ::= ret_expr RETURN_VALUE
|
||||
return_stmt_lambda ::= ret_expr RETURN_VALUE_LAMBDA
|
||||
|
||||
return_stmts ::= return_stmt
|
||||
return_stmts ::= _stmts return_stmt
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -526,7 +535,9 @@ class PythonParser(GenericASTBuilder):
|
||||
stmt ::= return_lambda
|
||||
stmt ::= conditional_lambda
|
||||
|
||||
return_lambda ::= ret_expr RETURN_VALUE LAMBDA_MARKER
|
||||
return_lambda ::= ret_expr RETURN_VALUE_LAMBDA LAMBDA_MARKER
|
||||
return_lambda ::= ret_expr RETURN_VALUE_LAMBDA
|
||||
|
||||
conditional_lambda ::= expr jmp_false return_if_stmt return_stmt LAMBDA_MARKER
|
||||
|
||||
cmp ::= cmp_list
|
||||
@@ -722,7 +733,7 @@ def get_python_parser(
|
||||
else:
|
||||
p = parse3.Python3ParserSingle(debug_parser)
|
||||
p.version = version
|
||||
# p.dumpGrammar() # debug
|
||||
# p.dump_grammar() # debug
|
||||
return p
|
||||
|
||||
class PythonParserSingle(PythonParser):
|
||||
|
@@ -16,7 +16,7 @@ class AST(spark_AST):
|
||||
return self.__repr1__('', None)
|
||||
|
||||
def __repr1__(self, indent, sibNum=None):
|
||||
rv = str(self.type)
|
||||
rv = str(self.kind)
|
||||
if sibNum is not None:
|
||||
rv = "%2d. %s" % (sibNum, rv)
|
||||
enumerate_children = False
|
||||
|
@@ -29,8 +29,8 @@ class Python15ParserSingle(Python21Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python15Parser()
|
||||
p.checkGrammar()
|
||||
p.dumpGrammar()
|
||||
p.check_grammar()
|
||||
p.dump_grammar()
|
||||
|
||||
# local variables:
|
||||
# tab-width: 4
|
||||
|
@@ -417,4 +417,4 @@ class Python2ParserSingle(Python2Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python2Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
|
@@ -33,8 +33,8 @@ class Python21ParserSingle(Python22Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python21Parser()
|
||||
p.checkGrammar()
|
||||
p.dumpGrammar()
|
||||
p.check_grammar()
|
||||
p.dump_grammar()
|
||||
|
||||
# local variables:
|
||||
# tab-width: 4
|
||||
|
@@ -26,8 +26,8 @@ class Python22ParserSingle(Python23Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python22Parser()
|
||||
p.checkGrammar()
|
||||
p.dumpGrammar()
|
||||
p.check_grammar()
|
||||
p.dump_grammar()
|
||||
|
||||
# local variables:
|
||||
# tab-width: 4
|
||||
|
@@ -67,8 +67,8 @@ class Python23ParserSingle(Python23Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python23Parser()
|
||||
p.checkGrammar()
|
||||
p.dumpGrammar()
|
||||
p.check_grammar()
|
||||
p.dump_grammar()
|
||||
|
||||
# local variables:
|
||||
# tab-width: 4
|
||||
|
@@ -58,10 +58,11 @@ class Python24Parser(Python25Parser):
|
||||
if invalid:
|
||||
return invalid
|
||||
|
||||
# FiXME: this code never gets called...
|
||||
lhs = rule[0]
|
||||
if lhs == 'nop_stmt':
|
||||
return not int(tokens[first].pattr) == tokens[last].offset
|
||||
l = len(tokens)
|
||||
if 0 <= l < len(tokens):
|
||||
return not int(tokens[first].pattr) == tokens[last].offset
|
||||
|
||||
return False
|
||||
|
||||
@@ -71,4 +72,4 @@ class Python24ParserSingle(Python24Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python24Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
|
@@ -60,4 +60,4 @@ class Python25ParserSingle(Python26Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python25Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
|
@@ -247,7 +247,9 @@ class Python26Parser(Python2Parser):
|
||||
and ::= expr JUMP_IF_FALSE POP_TOP expr JUMP_IF_FALSE POP_TOP
|
||||
cmp_list ::= expr cmp_list1 ROT_TWO COME_FROM POP_TOP _come_from
|
||||
|
||||
conditional_lambda ::= expr jmp_false_then return_if_stmt return_stmt LAMBDA_MARKER
|
||||
return_if_lambda ::= RETURN_END_IF_LAMBDA POP_TOP
|
||||
conditional_lambda ::= expr jmp_false_then expr return_if_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
"""
|
||||
|
||||
def add_custom_rules(self, tokens, customize):
|
||||
@@ -274,10 +276,10 @@ class Python26ParserSingle(Python2Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python26Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||
if PYTHON_VERSION == 2.6:
|
||||
lhs, rhs, tokens, right_recursive = p.checkSets()
|
||||
lhs, rhs, tokens, right_recursive = p.check_sets()
|
||||
from uncompyle6.scanner import get_scanner
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
opcode_set = set(s.opc.opname).union(set(
|
||||
|
@@ -94,6 +94,10 @@ class Python27Parser(Python2Parser):
|
||||
WITH_CLEANUP END_FINALLY
|
||||
|
||||
# Common with 2.6
|
||||
return_if_lambda ::= RETURN_END_IF_LAMBDA COME_FROM
|
||||
conditional_lambda ::= expr jmp_false expr return_if_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
|
||||
while1stmt ::= SETUP_LOOP return_stmts bp_come_from
|
||||
while1stmt ::= SETUP_LOOP return_stmts COME_FROM
|
||||
"""
|
||||
@@ -125,10 +129,10 @@ class Python27ParserSingle(Python27Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python27Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||
if PYTHON_VERSION == 2.7:
|
||||
lhs, rhs, tokens, right_recursive = p.checkSets()
|
||||
lhs, rhs, tokens, right_recursive = p.check_sets()
|
||||
from uncompyle6.scanner import get_scanner
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
opcode_set = set(s.opc.opname).union(set(
|
||||
@@ -144,4 +148,4 @@ if __name__ == '__main__':
|
||||
for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
# p.dumpGrammar()
|
||||
# p.dump_grammar()
|
||||
|
@@ -157,8 +157,13 @@ class Python3Parser(PythonParser):
|
||||
# of missing "else" clauses. Therefore we include grammar
|
||||
# rules with and without ELSE.
|
||||
|
||||
ifelsestmt ::= testexpr c_stmts_opt JUMP_FORWARD else_suite opt_come_from_except
|
||||
ifelsestmt ::= testexpr c_stmts_opt jump_forward_else else_suite _come_from
|
||||
ifelsestmt ::= testexpr c_stmts_opt JUMP_FORWARD
|
||||
else_suite opt_come_from_except
|
||||
ifelsestmt ::= testexpr c_stmts_opt jump_forward_else
|
||||
else_suite _come_from
|
||||
|
||||
# ifelsestmt ::= testexpr c_stmts_opt jump_forward_else
|
||||
# passstmt _come_from
|
||||
|
||||
ifelsestmtc ::= testexpr c_stmts_opt JUMP_ABSOLUTE else_suitec
|
||||
ifelsestmtc ::= testexpr c_stmts_opt jump_absolute_else else_suitec
|
||||
@@ -254,8 +259,14 @@ class Python3Parser(PythonParser):
|
||||
POP_BLOCK LOAD_CONST COME_FROM_WITH
|
||||
WITH_CLEANUP END_FINALLY
|
||||
|
||||
## FIXME: Right now we have erroneous jump targets
|
||||
## This below is probably not correct when the COME_FROM is put in the right place
|
||||
and ::= expr jmp_false expr COME_FROM
|
||||
or ::= expr jmp_true expr COME_FROM
|
||||
|
||||
# # something like the below is needed when the jump targets are fixed
|
||||
## or ::= expr JUMP_IF_TRUE_OR_POP COME_FROM expr
|
||||
## and ::= expr JUMP_IF_FALSE_OR_POP COME_FROM expr
|
||||
'''
|
||||
|
||||
def p_misc3(self, args):
|
||||
@@ -418,6 +429,13 @@ class Python3Parser(PythonParser):
|
||||
# a JUMP_ABSOLUTE with no COME_FROM
|
||||
conditional ::= expr jmp_false expr jump_absolute_else expr
|
||||
|
||||
return_if_lambda ::= RETURN_END_IF_LAMBDA
|
||||
conditional_lambda ::= expr jmp_false return_stmt_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
conditional_lambda ::= expr jmp_false expr return_if_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
|
||||
|
||||
expr ::= LOAD_CLASSNAME
|
||||
|
||||
# Python 3.4+
|
||||
@@ -428,7 +446,7 @@ class Python3Parser(PythonParser):
|
||||
@staticmethod
|
||||
def call_fn_name(token):
|
||||
"""Customize CALL_FUNCTION to add the number of positional arguments"""
|
||||
return '%s_%i' % (token.type, token.attr)
|
||||
return '%s_%i' % (token.kind, token.attr)
|
||||
|
||||
def custom_build_class_rule(self, opname, i, token, tokens, customize):
|
||||
'''
|
||||
@@ -444,16 +462,16 @@ class Python3Parser(PythonParser):
|
||||
# FIXME: I bet this can be simplified
|
||||
# look for next MAKE_FUNCTION
|
||||
for i in range(i+1, len(tokens)):
|
||||
if tokens[i].type.startswith('MAKE_FUNCTION'):
|
||||
if tokens[i].kind.startswith('MAKE_FUNCTION'):
|
||||
break
|
||||
elif tokens[i].type.startswith('MAKE_CLOSURE'):
|
||||
elif tokens[i].kind.startswith('MAKE_CLOSURE'):
|
||||
break
|
||||
pass
|
||||
assert i < len(tokens), "build_class needs to find MAKE_FUNCTION or MAKE_CLOSURE"
|
||||
assert tokens[i+1].type == 'LOAD_CONST', \
|
||||
assert tokens[i+1].kind == 'LOAD_CONST', \
|
||||
"build_class expecting CONST after MAKE_FUNCTION/MAKE_CLOSURE"
|
||||
for i in range(i, len(tokens)):
|
||||
if tokens[i].type == 'CALL_FUNCTION':
|
||||
if tokens[i].kind == 'CALL_FUNCTION':
|
||||
call_fn_tok = tokens[i]
|
||||
break
|
||||
assert call_fn_tok, "build_class custom rule needs to find CALL_FUNCTION"
|
||||
@@ -494,7 +512,7 @@ class Python3Parser(PythonParser):
|
||||
# Yes, this computation based on instruction name is a little bit hoaky.
|
||||
nak = ( len(opname)-len('CALL_FUNCTION') ) // 3
|
||||
|
||||
token.type = self.call_fn_name(token)
|
||||
token.kind = self.call_fn_name(token)
|
||||
uniq_param = args_kw + args_pos
|
||||
if self.version == 3.5 and opname.startswith('CALL_FUNCTION_VAR'):
|
||||
# Python 3.5 changes the stack position of *args. KW args come
|
||||
@@ -506,33 +524,33 @@ class Python3Parser(PythonParser):
|
||||
kw = ''
|
||||
rule = ('call_function ::= expr expr ' +
|
||||
('pos_arg ' * args_pos) +
|
||||
('kwarg ' * args_kw) + kw + token.type)
|
||||
self.add_unique_rule(rule, token.type, uniq_param, customize)
|
||||
('kwarg ' * args_kw) + kw + token.kind)
|
||||
self.add_unique_rule(rule, token.kind, uniq_param, customize)
|
||||
if self.version >= 3.6 and opname == 'CALL_FUNCTION_EX_KW':
|
||||
rule = ('call_function36 ::= '
|
||||
'expr build_tuple_unpack_with_call build_map_unpack_with_call '
|
||||
'CALL_FUNCTION_EX_KW_1')
|
||||
self.add_unique_rule(rule, token.type, uniq_param, customize)
|
||||
self.add_unique_rule(rule, token.kind, uniq_param, customize)
|
||||
rule = 'call_function ::= call_function36'
|
||||
else:
|
||||
rule = ('call_function ::= expr ' +
|
||||
('pos_arg ' * args_pos) +
|
||||
('kwarg ' * args_kw) +
|
||||
'expr ' * nak + token.type)
|
||||
'expr ' * nak + token.kind)
|
||||
|
||||
self.add_unique_rule(rule, token.type, uniq_param, customize)
|
||||
self.add_unique_rule(rule, token.kind, uniq_param, customize)
|
||||
if self.version >= 3.5:
|
||||
rule = ('async_call_function ::= expr ' +
|
||||
('pos_arg ' * args_pos) +
|
||||
('kwarg ' * args_kw) +
|
||||
'expr ' * nak + token.type +
|
||||
'expr ' * nak + token.kind +
|
||||
' GET_AWAITABLE LOAD_CONST YIELD_FROM')
|
||||
self.add_unique_rule(rule, token.type, uniq_param, customize)
|
||||
self.add_unique_rule('expr ::= async_call_function', token.type, uniq_param, customize)
|
||||
self.add_unique_rule(rule, token.kind, uniq_param, customize)
|
||||
self.add_unique_rule('expr ::= async_call_function', token.kind, uniq_param, customize)
|
||||
|
||||
rule = ('classdefdeco2 ::= LOAD_BUILD_CLASS mkfunc %s%s_%d'
|
||||
% (('expr ' * (args_pos-1)), opname, args_pos))
|
||||
self.add_unique_rule(rule, token.type, uniq_param, customize)
|
||||
self.add_unique_rule(rule, token.kind, uniq_param, customize)
|
||||
|
||||
def add_make_function_rule(self, rule, opname, attr, customize):
|
||||
"""Python 3.3 added a an addtional LOAD_CONST before MAKE_FUNCTION and
|
||||
@@ -606,7 +624,7 @@ class Python3Parser(PythonParser):
|
||||
call_function ::= expr CALL_METHOD
|
||||
"""
|
||||
for i, token in enumerate(tokens):
|
||||
opname = token.type
|
||||
opname = token.kind
|
||||
opname_base = opname[:opname.rfind('_')]
|
||||
|
||||
if opname == 'PyPy':
|
||||
@@ -903,7 +921,8 @@ class Python3Parser(PythonParser):
|
||||
last += 1
|
||||
return tokens[first].attr == tokens[last].offset
|
||||
elif lhs == 'while1stmt':
|
||||
if tokens[last] in ('COME_FROM_LOOP', 'JUMP_BACK'):
|
||||
if (0 <= last < len(tokens)
|
||||
and tokens[last] in ('COME_FROM_LOOP', 'JUMP_BACK')):
|
||||
# jump_back should be right afer SETUP_LOOP. Test?
|
||||
last += 1
|
||||
while last < len(tokens) and isinstance(tokens[last].offset, str):
|
||||
@@ -947,10 +966,10 @@ def info(args):
|
||||
p = Python32Parser()
|
||||
elif arg == '3.0':
|
||||
p = Python30Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
if len(sys.argv) > 1 and sys.argv[1] == 'dump':
|
||||
print('-' * 50)
|
||||
p.dumpGrammar()
|
||||
p.dump_grammar()
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
|
@@ -44,7 +44,7 @@ class Python32Parser(Python3Parser):
|
||||
def add_custom_rules(self, tokens, customize):
|
||||
super(Python32Parser, self).add_custom_rules(tokens, customize)
|
||||
for i, token in enumerate(tokens):
|
||||
opname = token.type
|
||||
opname = token.kind
|
||||
if opname.startswith('MAKE_FUNCTION_A'):
|
||||
args_pos, args_kw, annotate_args = token.attr
|
||||
# Check that there are 2 annotated params?
|
||||
|
@@ -29,10 +29,10 @@ class Python34ParserSingle(Python34Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python34Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||
if PYTHON_VERSION == 3.4:
|
||||
lhs, rhs, tokens, right_recursive = p.checkSets()
|
||||
lhs, rhs, tokens, right_recursive = p.check_sets()
|
||||
from uncompyle6.scanner import get_scanner
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
opcode_set = set(s.opc.opname).union(set(
|
||||
|
@@ -143,7 +143,7 @@ class Python35Parser(Python34Parser):
|
||||
def add_custom_rules(self, tokens, customize):
|
||||
super(Python35Parser, self).add_custom_rules(tokens, customize)
|
||||
for i, token in enumerate(tokens):
|
||||
opname = token.type
|
||||
opname = token.kind
|
||||
if opname == 'BUILD_MAP_UNPACK_WITH_CALL':
|
||||
nargs = token.attr % 256
|
||||
map_unpack_n = "map_unpack_%s" % nargs
|
||||
@@ -153,7 +153,7 @@ class Python35Parser(Python34Parser):
|
||||
self.add_unique_rule(rule, opname, token.attr, customize)
|
||||
call_token = tokens[i+1]
|
||||
if self.version == 3.5:
|
||||
rule = 'call_function ::= expr unmapexpr ' + call_token.type
|
||||
rule = 'call_function ::= expr unmapexpr ' + call_token.kind
|
||||
self.add_unique_rule(rule, opname, token.attr, customize)
|
||||
pass
|
||||
pass
|
||||
@@ -165,10 +165,10 @@ class Python35ParserSingle(Python35Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python35Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||
if PYTHON_VERSION == 3.5:
|
||||
lhs, rhs, tokens, right_recursive = p.checkSets()
|
||||
lhs, rhs, tokens, right_recursive = p.check_sets()
|
||||
from uncompyle6.scanner import get_scanner
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
opcode_set = set(s.opc.opname).union(set(
|
||||
|
@@ -37,7 +37,7 @@ class Python36Parser(Python35Parser):
|
||||
def add_custom_rules(self, tokens, customize):
|
||||
super(Python36Parser, self).add_custom_rules(tokens, customize)
|
||||
for i, token in enumerate(tokens):
|
||||
opname = token.type
|
||||
opname = token.kind
|
||||
|
||||
if opname == 'FORMAT_VALUE':
|
||||
rules_str = """
|
||||
@@ -65,10 +65,10 @@ class Python36Parser(Python35Parser):
|
||||
|
||||
if opname.startswith('CALL_FUNCTION_KW'):
|
||||
values = 'expr ' * token.attr
|
||||
rule = 'call_function ::= expr kwargs_only_36 {token.type}'.format(**locals())
|
||||
self.add_unique_rule(rule, token.type, token.attr, customize)
|
||||
rule = 'call_function ::= expr kwargs_only_36 {token.kind}'.format(**locals())
|
||||
self.add_unique_rule(rule, token.kind, token.attr, customize)
|
||||
rule = 'kwargs_only_36 ::= {values} LOAD_CONST'.format(**locals())
|
||||
self.add_unique_rule(rule, token.type, token.attr, customize)
|
||||
self.add_unique_rule(rule, token.kind, token.attr, customize)
|
||||
else:
|
||||
super(Python36Parser, self).custom_classfunc_rule(opname, token, customize)
|
||||
|
||||
@@ -79,10 +79,10 @@ class Python36ParserSingle(Python36Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python36Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||
if PYTHON_VERSION == 3.6:
|
||||
lhs, rhs, tokens, right_recursive = p.checkSets()
|
||||
lhs, rhs, tokens, right_recursive = p.check_sets()
|
||||
from uncompyle6.scanner import get_scanner
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
opcode_set = set(s.opc.opname).union(set(
|
||||
|
@@ -21,10 +21,10 @@ class Python37ParserSingle(Python37Parser, PythonParserSingle):
|
||||
if __name__ == '__main__':
|
||||
# Check grammar
|
||||
p = Python37Parser()
|
||||
p.checkGrammar()
|
||||
p.check_grammar()
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||
if PYTHON_VERSION == 3.7:
|
||||
lhs, rhs, tokens, right_recursive = p.checkSets()
|
||||
lhs, rhs, tokens, right_recursive = p.check_sets()
|
||||
from uncompyle6.scanner import get_scanner
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
opcode_set = set(s.opc.opname).union(set(
|
||||
|
@@ -93,12 +93,18 @@ class Scanner2(Scanner):
|
||||
for instr in bytecode.get_instructions(co):
|
||||
print(instr._disassemble())
|
||||
|
||||
# Container for tokens
|
||||
# list of tokens/instructions
|
||||
tokens = []
|
||||
|
||||
# "customize" is a dict whose keys are nonterminals
|
||||
# and the value is the argument stack entries for that
|
||||
# nonterminal. The count is a little hoaky. It is mostly
|
||||
# not used, but sometimes it is.
|
||||
# "customize" is a dict whose keys are nonterminals
|
||||
customize = {}
|
||||
|
||||
if self.is_pypy:
|
||||
customize['PyPy'] = 1
|
||||
customize['PyPy'] = 0
|
||||
|
||||
Token = self.Token # shortcut
|
||||
|
||||
|
@@ -30,5 +30,5 @@ class Scanner22(scan.Scanner23):
|
||||
|
||||
def ingest22(self, co, classname=None, code_objects={}, show_asm=None):
|
||||
tokens, customize = self.parent_ingest(co, classname, code_objects, show_asm)
|
||||
tokens = [t for t in tokens if t.type != 'SET_LINENO']
|
||||
tokens = [t for t in tokens if t.kind != 'SET_LINENO']
|
||||
return tokens, customize
|
||||
|
@@ -215,8 +215,8 @@ class Scanner26(scan.Scanner2):
|
||||
# FIXME: this is a hack to catch stuff like:
|
||||
# if x: continue
|
||||
# the "continue" is not on a new line.
|
||||
if len(tokens) and tokens[-1].type == 'JUMP_BACK':
|
||||
tokens[-1].type = intern('CONTINUE')
|
||||
if len(tokens) and tokens[-1].kind == 'JUMP_BACK':
|
||||
tokens[-1].kind = intern('CONTINUE')
|
||||
|
||||
elif op in self.opc.JABS_OPS:
|
||||
pattr = repr(oparg)
|
||||
@@ -256,18 +256,18 @@ class Scanner26(scan.Scanner2):
|
||||
and self.code[offset+3] not in (self.opc.END_FINALLY,
|
||||
self.opc.POP_BLOCK)):
|
||||
if ((offset in self.linestartoffsets and
|
||||
tokens[-1].type == 'JUMP_BACK')
|
||||
tokens[-1].kind == 'JUMP_BACK')
|
||||
or offset not in self.not_continue):
|
||||
op_name = 'CONTINUE'
|
||||
else:
|
||||
# FIXME: this is a hack to catch stuff like:
|
||||
# if x: continue
|
||||
# the "continue" is not on a new line.
|
||||
if tokens[-1].type == 'JUMP_BACK':
|
||||
if tokens[-1].kind == 'JUMP_BACK':
|
||||
# We need 'intern' since we have
|
||||
# already have processed the previous
|
||||
# token.
|
||||
tokens[-1].type = intern('CONTINUE')
|
||||
tokens[-1].kind = intern('CONTINUE')
|
||||
|
||||
elif op == self.opc.LOAD_GLOBAL:
|
||||
if offset in self.load_asserts:
|
||||
|
@@ -94,9 +94,9 @@ class Scanner27(Scanner2):
|
||||
# the "continue" is not on a new line.
|
||||
n = len(tokens)
|
||||
if (n > 2 and
|
||||
tokens[-1].type == 'JUMP_BACK' and
|
||||
tokens[-1].kind == 'JUMP_BACK' and
|
||||
self.code[offset+3] == self.opc.END_FINALLY):
|
||||
tokens[-1].type = intern('CONTINUE')
|
||||
tokens[-1].kind = intern('CONTINUE')
|
||||
|
||||
pass
|
||||
|
||||
|
@@ -169,12 +169,16 @@ class Scanner3(Scanner):
|
||||
for instr in bytecode.get_instructions(co):
|
||||
print(instr._disassemble())
|
||||
|
||||
# Container for tokens
|
||||
# list of tokens/instructions
|
||||
tokens = []
|
||||
|
||||
# "customize" is a dict whose keys are nonterminals
|
||||
# and the value is the argument stack entries for that
|
||||
# nonterminal. The count is a little hoaky. It is mostly
|
||||
# not used, but sometimes it is.
|
||||
customize = {}
|
||||
if self.is_pypy:
|
||||
customize['PyPy'] = 1
|
||||
customize['PyPy'] = 0
|
||||
|
||||
self.code = array('B', co.co_code)
|
||||
self.build_lines_data(co)
|
||||
@@ -390,12 +394,12 @@ class Scanner3(Scanner):
|
||||
# the "continue" is not on a new line.
|
||||
# There are other situations where we don't catch
|
||||
# CONTINUE as well.
|
||||
if tokens[-1].type == 'JUMP_BACK' and tokens[-1].attr <= argval:
|
||||
if tokens[-2].type == 'BREAK_LOOP':
|
||||
if tokens[-1].kind == 'JUMP_BACK' and tokens[-1].attr <= argval:
|
||||
if tokens[-2].kind == 'BREAK_LOOP':
|
||||
del tokens[-1]
|
||||
else:
|
||||
# intern is used because we are changing the *previous* token
|
||||
tokens[-1].type = intern('CONTINUE')
|
||||
tokens[-1].kind = intern('CONTINUE')
|
||||
if last_op_was_break and opname == 'CONTINUE':
|
||||
last_op_was_break = False
|
||||
continue
|
||||
@@ -775,6 +779,10 @@ class Scanner3(Scanner):
|
||||
|
||||
if ((code[prev_op[target]] in self.pop_jump_if_pop) and
|
||||
(target > offset) and prev_op[target] != offset):
|
||||
# FIXME: this is not accurate The commented out below
|
||||
# is what it should be. However grammar rules right now
|
||||
# assume the incorrect offsets.
|
||||
# self.fixed_jumps[offset] = target
|
||||
self.fixed_jumps[offset] = prev_op[target]
|
||||
self.structs.append({'type': 'and/or',
|
||||
'start': start,
|
||||
@@ -943,7 +951,7 @@ class Scanner3(Scanner):
|
||||
return
|
||||
pass
|
||||
pass
|
||||
if code[pre_rtarget] == self.opc.RETURN_VALUE and self.version < 3.5:
|
||||
if code[pre_rtarget] == self.opc.RETURN_VALUE:
|
||||
self.return_end_ifs.add(pre_rtarget)
|
||||
else:
|
||||
self.fixed_jumps[offset] = rtarget
|
||||
@@ -963,7 +971,7 @@ class Scanner3(Scanner):
|
||||
if target > next_offset:
|
||||
next_op = code[next_offset]
|
||||
if (self.opc.JUMP_ABSOLUTE == next_op and
|
||||
END_FINALLY != code[xdis.next_offset(next_op, self.opc, next_offset)]):
|
||||
self.opc.END_FINALLY != code[xdis.next_offset(next_op, self.opc, next_offset)]):
|
||||
self.fixed_jumps[next_offset] = target
|
||||
self.except_targets[target] = next_offset
|
||||
|
||||
@@ -986,7 +994,8 @@ class Scanner3(Scanner):
|
||||
# misclassified as RETURN_END_IF. Handle that here.
|
||||
# In RETURN_VALUE, JUMP_ABSOLUTE, RETURN_VALUE is never RETURN_END_IF
|
||||
if op == self.opc.RETURN_VALUE:
|
||||
if (offset+1 < len(code) and code[offset+1] == self.opc.JUMP_ABSOLUTE and
|
||||
next_offset = xdis.next_offset(op, self.opc, offset)
|
||||
if (next_offset < len(code) and code[next_offset] == self.opc.JUMP_ABSOLUTE and
|
||||
offset in self.return_end_ifs):
|
||||
self.return_end_ifs.remove(offset)
|
||||
pass
|
||||
|
@@ -21,7 +21,7 @@ class Token():
|
||||
# pattr = argrepr
|
||||
def __init__(self, opname, attr=None, pattr=None, offset=-1,
|
||||
linestart=None, op=None, has_arg=None, opc=None):
|
||||
self.type = intern(opname)
|
||||
self.kind = intern(opname)
|
||||
self.op = op
|
||||
self.has_arg = has_arg
|
||||
self.attr = attr
|
||||
@@ -38,18 +38,18 @@ class Token():
|
||||
if isinstance(o, Token):
|
||||
# Both are tokens: compare type and attr
|
||||
# It's okay if offsets are different
|
||||
return (self.type == o.type) and (self.pattr == o.pattr)
|
||||
return (self.kind == o.kind) and (self.pattr == o.pattr)
|
||||
else:
|
||||
return self.type == o
|
||||
return self.kind == o
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.type)
|
||||
return str(self.kind)
|
||||
|
||||
# def __str__(self):
|
||||
# pattr = self.pattr if self.pattr is not None else ''
|
||||
# prefix = '\n%3d ' % self.linestart if self.linestart else (' ' * 6)
|
||||
# return (prefix +
|
||||
# ('%9s %-18s %r' % (self.offset, self.type, pattr)))
|
||||
# ('%9s %-18s %r' % (self.offset, self.kind, pattr)))
|
||||
|
||||
def __str__(self):
|
||||
return self.format(line_prefix='')
|
||||
@@ -57,7 +57,7 @@ class Token():
|
||||
def format(self, line_prefix=''):
|
||||
prefix = ('\n%s%4d ' % (line_prefix, self.linestart)
|
||||
if self.linestart else (' ' * (6 + len(line_prefix))))
|
||||
offset_opname = '%6s %-17s' % (self.offset, self.type)
|
||||
offset_opname = '%6s %-17s' % (self.offset, self.kind)
|
||||
if not self.has_arg:
|
||||
return "%s%s" % (prefix, offset_opname)
|
||||
argstr = "%6d " % self.attr if isinstance(self.attr, int) else (' '*7)
|
||||
@@ -77,14 +77,14 @@ class Token():
|
||||
pattr = self.opc.cmp_op[self.attr]
|
||||
# And so on. See xdis/bytecode.py get_instructions_bytes
|
||||
pass
|
||||
elif re.search('_\d+$', self.type):
|
||||
elif re.search('_\d+$', self.kind):
|
||||
return "%s%s%s" % (prefix, offset_opname, argstr)
|
||||
else:
|
||||
pattr = ''
|
||||
return "%s%s%s %r" % (prefix, offset_opname, argstr, pattr)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.type)
|
||||
return hash(self.kind)
|
||||
|
||||
def __getitem__(self, i):
|
||||
raise IndexError
|
||||
|
@@ -9,16 +9,16 @@ before reduction and don't reduce when there is a problem.
|
||||
"""
|
||||
|
||||
def checker(ast, in_loop, errors):
|
||||
in_loop = in_loop or ast.type in ('while1stmt', 'whileTruestmt',
|
||||
in_loop = in_loop or ast.kind in ('while1stmt', 'whileTruestmt',
|
||||
'whilestmt', 'whileelsestmt', 'while1elsestmt',
|
||||
'for_block')
|
||||
if ast.type in ('augassign1', 'augassign2') and ast[0][0] == 'and':
|
||||
if ast.kind in ('augassign1', 'augassign2') and ast[0][0] == 'and':
|
||||
text = str(ast)
|
||||
error_text = '\n# improper augmented assigment (e.g. +=, *=, ...):\n#\t' + '\n# '.join(text.split("\n")) + '\n'
|
||||
errors.append(error_text)
|
||||
|
||||
for node in ast:
|
||||
if not in_loop and node.type in ('continue_stmt', 'break_stmt'):
|
||||
if not in_loop and node.kind in ('continue_stmt', 'break_stmt'):
|
||||
text = str(node)
|
||||
error_text = '\n# not in loop:\n#\t' + '\n# '.join(text.split("\n"))
|
||||
errors.append(error_text)
|
||||
|
@@ -173,8 +173,8 @@ TABLE_DIRECT = {
|
||||
'ret_cond': ( '%p if %p else %p', (2, 27), (0, 27), (-1, 27) ),
|
||||
'conditionalnot': ( '%p if not %p else %p', (2, 27), (0, 22), (4, 27) ),
|
||||
'ret_cond_not': ( '%p if not %p else %p', (2, 27), (0, 22), (-1, 27) ),
|
||||
'conditional_lambda': ( '(%c if %c else %c)', 2, 0, 3),
|
||||
'return_lambda': ('%c', 0),
|
||||
'conditional_lambda': ( '%c if %c else %c', 2, 0, 4),
|
||||
|
||||
'compare': ( '%p %[-1]{pattr.replace("-", " ")} %p', (0, 19), (1, 19) ),
|
||||
'cmp_list': ( '%p %p', (0, 29), (1, 30)),
|
||||
'cmp_list1': ( '%[3]{pattr} %p %p', (0, 19), (-2, 19)),
|
||||
@@ -209,6 +209,7 @@ TABLE_DIRECT = {
|
||||
'raise_stmt3': ( '%|raise %c, %c, %c\n', 0, 1, 2),
|
||||
# 'yield': ( 'yield %c', 0),
|
||||
# 'return_stmt': ( '%|return %c\n', 0),
|
||||
'return_if_stmt': ( 'return %c\n', 0),
|
||||
|
||||
'ifstmt': ( '%|if %c:\n%+%c%-', 0, 1 ),
|
||||
'iflaststmt': ( '%|if %c:\n%+%c%-', 0, 1 ),
|
||||
@@ -331,6 +332,7 @@ PRECEDENCE = {
|
||||
'ret_or': 26,
|
||||
|
||||
'conditional': 28,
|
||||
'conditional_lamdba': 28,
|
||||
'conditionalnot': 28,
|
||||
'ret_cond': 28,
|
||||
'ret_cond_not': 28,
|
||||
|
@@ -1,6 +1,4 @@
|
||||
# Copyright (c) 2015-2017 by Rocky Bernstein
|
||||
# Copyright (c) 2005 by Dan Pascu <dan@windowmaker.org>
|
||||
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
|
||||
# Copyright (c) 1999 John Aycock
|
||||
|
||||
"""
|
||||
@@ -94,7 +92,6 @@ TABLE_DIRECT_FRAGMENT = {
|
||||
'importfrom': ( '%|from %[2]{pattr}%x import %c\n', (2, (0, 1)), 3),
|
||||
'importmultiple': ( '%|import%b %c%c\n', 0, 2, 3 ),
|
||||
'list_for': (' for %c%x in %c%c', 2, (2, (1, )), 0, 3 ),
|
||||
'forstmt': ( '%|for%b %c%x in %c:\n%+%c%-\n\n', 0, 3, (3, (2, )), 1, 4 ),
|
||||
'forelsestmt': (
|
||||
'%|for %c%x in %c:\n%+%c%-%|else:\n%+%c%-\n\n', 3, (3, (2,)), 1, 4, -2),
|
||||
'forelselaststmt': (
|
||||
@@ -308,11 +305,11 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
def n_expr(self, node):
|
||||
start = len(self.f.getvalue())
|
||||
p = self.prec
|
||||
if node[0].type.startswith('binary_expr'):
|
||||
if node[0].kind.startswith('binary_expr'):
|
||||
n = node[0][-1][0]
|
||||
else:
|
||||
n = node[0]
|
||||
self.prec = PRECEDENCE.get(n.type, -2)
|
||||
self.prec = PRECEDENCE.get(n.kind, -2)
|
||||
if n == 'LOAD_CONST' and repr(n.pattr)[0] == '-':
|
||||
n.parent = node
|
||||
self.set_pos_info(n, start, len(self.f.getvalue()))
|
||||
@@ -405,7 +402,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
def n_ifelsestmtr(self, node):
|
||||
if node[2] == 'COME_FROM':
|
||||
return_stmts_node = node[3]
|
||||
node.type = 'ifelsestmtr2'
|
||||
node.kind = 'ifelsestmtr2'
|
||||
else:
|
||||
return_stmts_node = node[2]
|
||||
if len(return_stmts_node) != 2:
|
||||
@@ -438,7 +435,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
for n in return_stmts_node[0]:
|
||||
if (n[0] == 'ifstmt' and n[0][1][0] == 'return_if_stmts'):
|
||||
if prev_stmt_is_if_ret:
|
||||
n[0].type = 'elifstmt'
|
||||
n[0].kind = 'elifstmt'
|
||||
prev_stmt_is_if_ret = True
|
||||
else:
|
||||
prev_stmt_is_if_ret = False
|
||||
@@ -477,7 +474,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
self.indent_less()
|
||||
|
||||
for n in node[2][0]:
|
||||
n[0].type = 'elifstmt'
|
||||
n[0].kind = 'elifstmt'
|
||||
n.parent = node
|
||||
self.preorder(n)
|
||||
self.println(self.indent, 'else:')
|
||||
@@ -493,7 +490,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
iname = node[0].pattr
|
||||
|
||||
store_import_node = node[-1][-1]
|
||||
assert store_import_node.type.startswith('STORE_')
|
||||
assert store_import_node.kind.startswith('STORE_')
|
||||
|
||||
sname = store_import_node.pattr
|
||||
self.write(iname)
|
||||
@@ -554,7 +551,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
elif n == 'list_if': n = n[2]
|
||||
elif n == 'list_if_not': n= n[2]
|
||||
assert n == 'lc_body'
|
||||
if node[0].type.startswith('BUILD_LIST'):
|
||||
if node[0].kind.startswith('BUILD_LIST'):
|
||||
start = len(self.f.getvalue())
|
||||
self.set_pos_info(node[0], start, start+1)
|
||||
self.write( '[ ')
|
||||
@@ -687,7 +684,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
|
||||
# Python 2.7+ starts including set_comp_body
|
||||
# Python 3.5+ starts including setcomp_func
|
||||
assert n.type in ('lc_body', 'comp_body', 'setcomp_func', 'set_comp_body'), ast
|
||||
assert n.kind in ('lc_body', 'comp_body', 'setcomp_func', 'set_comp_body'), ast
|
||||
assert designator, "Couldn't find designator in list/set comprehension"
|
||||
|
||||
old_name = self.name
|
||||
@@ -714,7 +711,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
self.preorder(if_node)
|
||||
self.prec = p
|
||||
self.name = old_name
|
||||
if node[-1].type.startswith('CALL_FUNCTION'):
|
||||
if node[-1].kind.startswith('CALL_FUNCTION'):
|
||||
self.set_pos_info(node[-1], gen_start, len(self.f.getvalue()))
|
||||
|
||||
def listcomprehension_walk2(self, node):
|
||||
@@ -743,7 +740,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
n = n[3]
|
||||
elif n in ('list_if', 'list_if_not'):
|
||||
# FIXME: just a guess
|
||||
if n[0].type == 'expr':
|
||||
if n[0].kind == 'expr':
|
||||
list_if = n
|
||||
else:
|
||||
list_if = n[1]
|
||||
@@ -786,7 +783,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
start = len(self.f.getvalue())
|
||||
self.set_pos_info(node[0], start-1, start)
|
||||
self.comprehension_walk3(node, 1, 0)
|
||||
elif node[0].type == 'load_closure':
|
||||
elif node[0].kind == 'load_closure':
|
||||
self.setcomprehension_walk3(node, collection_index=4)
|
||||
else:
|
||||
self.comprehension_walk(node, iter_index=4)
|
||||
@@ -805,7 +802,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
self.set_pos_info(node[0], start, len(self.f.getvalue()))
|
||||
self.write(': {')
|
||||
start = len(self.f.getvalue())
|
||||
assert node[0].type.startswith('BUILD_SET')
|
||||
assert node[0].kind.startswith('BUILD_SET')
|
||||
self.set_pos_info(node[0], start-1, start)
|
||||
designator = node[3]
|
||||
assert designator == 'designator'
|
||||
@@ -814,7 +811,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
fin = len(self.f.getvalue())
|
||||
self.set_pos_info(designator, start, fin)
|
||||
for_iter_node = node[2]
|
||||
assert for_iter_node.type == 'FOR_ITER'
|
||||
assert for_iter_node.kind == 'FOR_ITER'
|
||||
self.set_pos_info(for_iter_node, start, fin)
|
||||
self.write(" for ")
|
||||
self.preorder(designator)
|
||||
@@ -833,7 +830,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
|
||||
def n_listcomp(self, node):
|
||||
self.write('[')
|
||||
if node[0].type == 'load_closure':
|
||||
if node[0].kind == 'load_closure':
|
||||
self.listcomprehension_walk2(node)
|
||||
else:
|
||||
if node[0] == 'LOAD_LISTCOMP':
|
||||
@@ -847,7 +844,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
if len(node) > 1:
|
||||
if (node[0] == 'c_stmts_opt' and
|
||||
node[0][0] == 'passstmt' and
|
||||
node[1].type.startswith('JUMP_FORWARD')):
|
||||
node[1].kind.startswith('JUMP_FORWARD')):
|
||||
self.set_pos_info(node[1], node[0][0].start, node[0][0].finish)
|
||||
|
||||
def setcomprehension_walk3(self, node, collection_index):
|
||||
@@ -878,7 +875,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
n = n[3]
|
||||
elif n in ('list_if', 'list_if_not', 'comp_if', 'comp_if_not'):
|
||||
# FIXME: just a guess
|
||||
if n[0].type == 'expr':
|
||||
if n[0].kind == 'expr':
|
||||
list_if = n
|
||||
else:
|
||||
list_if = n[1]
|
||||
@@ -1033,8 +1030,8 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
# NOTE: this differs from behavior in pysource.py
|
||||
|
||||
if len(tokens) >= 2 and not noneInNames:
|
||||
if tokens[-1].type == 'RETURN_VALUE':
|
||||
if tokens[-2].type != 'LOAD_CONST':
|
||||
if tokens[-1].kind == 'RETURN_VALUE':
|
||||
if tokens[-2].kind != 'LOAD_CONST':
|
||||
tokens.append(Token('RETURN_LAST'))
|
||||
if len(tokens) == 0:
|
||||
return
|
||||
@@ -1286,10 +1283,10 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
# as a custom rule
|
||||
start = len(self.f.getvalue())
|
||||
n = len(node)-1
|
||||
assert node[n].type.startswith('CALL_FUNCTION')
|
||||
assert node[n].kind.startswith('CALL_FUNCTION')
|
||||
|
||||
for i in range(n-2, 0, -1):
|
||||
if not node[i].type in ['expr', 'LOAD_CLASSNAME']:
|
||||
if not node[i].kind in ['expr', 'LOAD_CLASSNAME']:
|
||||
break
|
||||
pass
|
||||
|
||||
@@ -1324,7 +1321,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
self.write('{')
|
||||
|
||||
if self.version > 3.0:
|
||||
if node[0].type.startswith('kvlist'):
|
||||
if node[0].kind.startswith('kvlist'):
|
||||
# Python 3.5+ style key/value list in mapexpr
|
||||
kv_node = node[0]
|
||||
l = list(kv_node)
|
||||
@@ -1339,11 +1336,11 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
i += 2
|
||||
pass
|
||||
pass
|
||||
elif node[1].type.startswith('kvlist'):
|
||||
elif node[1].kind.startswith('kvlist'):
|
||||
# Python 3.0..3.4 style key/value list in mapexpr
|
||||
kv_node = node[1]
|
||||
l = list(kv_node)
|
||||
if len(l) > 0 and l[0].type == 'kv3':
|
||||
if len(l) > 0 and l[0].kind == 'kv3':
|
||||
# Python 3.2 does this
|
||||
kv_node = node[1][0]
|
||||
l = list(kv_node)
|
||||
@@ -1366,7 +1363,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
pass
|
||||
else:
|
||||
# Python 2 style kvlist
|
||||
assert node[-1].type.startswith('kvlist')
|
||||
assert node[-1].kind.startswith('kvlist')
|
||||
kv_node = node[-1] # goto kvlist
|
||||
|
||||
for kv in kv_node:
|
||||
@@ -1405,7 +1402,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
p = self.prec
|
||||
self.prec = 100
|
||||
n = node.pop()
|
||||
lastnode = n.type
|
||||
lastnode = n.kind
|
||||
start = len(self.f.getvalue())
|
||||
if lastnode.startswith('BUILD_LIST'):
|
||||
self.write('['); endchar = ']'
|
||||
@@ -1520,7 +1517,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
# for loops have two positions that correspond to a single text
|
||||
# location. In "for i in ..." there is the initialization "i" code as well
|
||||
# as the iteration code with "i"
|
||||
match = re.search(r'^for', startnode.type)
|
||||
match = re.search(r'^for', startnode.kind)
|
||||
if match and entry[arg] == 3:
|
||||
self.set_pos_info(node[0], start, finish)
|
||||
for n in node[2]:
|
||||
@@ -1614,7 +1611,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
# 2. subroutine calls. It the last op is the call and for purposes of printing
|
||||
# we don't need to print anything special there. However it encompases the
|
||||
# entire string of the node fn(...)
|
||||
match = re.search(r'^call_function', startnode.type)
|
||||
match = re.search(r'^call_function', startnode.kind)
|
||||
if match:
|
||||
last_node = startnode[-1]
|
||||
# import traceback; traceback.print_stack()
|
||||
@@ -1755,7 +1752,7 @@ if __name__ == '__main__':
|
||||
nodeInfo = walk.offsets[name, offset]
|
||||
node = nodeInfo.node
|
||||
extractInfo = walk.extract_node_info(node)
|
||||
print("code: %s" % node.type)
|
||||
print("code: %s" % node.kind)
|
||||
# print extractInfo
|
||||
print(extractInfo.selectedText)
|
||||
print(extractInfo.selectedLine)
|
||||
@@ -1765,7 +1762,7 @@ if __name__ == '__main__':
|
||||
print("Contained in...")
|
||||
print(extractInfo.selectedLine)
|
||||
print(extractInfo.markerLine)
|
||||
print("code: %s" % p.type)
|
||||
print("code: %s" % p.kind)
|
||||
print('=' * 40)
|
||||
pass
|
||||
pass
|
||||
@@ -1784,7 +1781,7 @@ if __name__ == '__main__':
|
||||
nodeInfo = walk.offsets[name, offset]
|
||||
node = nodeInfo.node
|
||||
extractInfo = walk.extract_node_info(node)
|
||||
print("code: %s" % node.type)
|
||||
print("code: %s" % node.kind)
|
||||
# print extractInfo
|
||||
print(extractInfo.selectedText)
|
||||
print(extractInfo.selectedLine)
|
||||
@@ -1794,7 +1791,7 @@ if __name__ == '__main__':
|
||||
print("Contained in...")
|
||||
print(extractInfo.selectedLine)
|
||||
print(extractInfo.markerLine)
|
||||
print("code: %s" % p.type)
|
||||
print("code: %s" % p.kind)
|
||||
print('=' * 40)
|
||||
pass
|
||||
pass
|
||||
|
@@ -22,7 +22,7 @@ def find_all_globals(node, globs):
|
||||
for n in node:
|
||||
if isinstance(n, AST):
|
||||
globs = find_all_globals(n, globs)
|
||||
elif n.type in ('STORE_GLOBAL', 'DELETE_GLOBAL', 'LOAD_GLOBAL'):
|
||||
elif n.kind in ('STORE_GLOBAL', 'DELETE_GLOBAL', 'LOAD_GLOBAL'):
|
||||
globs.add(n.pattr)
|
||||
return globs
|
||||
|
||||
@@ -31,7 +31,7 @@ def find_globals(node, globs):
|
||||
for n in node:
|
||||
if isinstance(n, AST):
|
||||
globs = find_globals(n, globs)
|
||||
elif n.type in ('STORE_GLOBAL', 'DELETE_GLOBAL'):
|
||||
elif n.kind in ('STORE_GLOBAL', 'DELETE_GLOBAL'):
|
||||
globs.add(n.pattr)
|
||||
return globs
|
||||
|
||||
@@ -41,7 +41,7 @@ def find_none(node):
|
||||
if n not in ('return_stmt', 'return_if_stmt'):
|
||||
if find_none(n):
|
||||
return True
|
||||
elif n.type == 'LOAD_CONST' and n.pattr is None:
|
||||
elif n.kind == 'LOAD_CONST' and n.pattr is None:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -69,7 +69,7 @@ def make_function3_annotate(self, node, isLambda, nested=1,
|
||||
return name
|
||||
|
||||
# MAKE_FUNCTION_... or MAKE_CLOSURE_...
|
||||
assert node[-1].type.startswith('MAKE_')
|
||||
assert node[-1].kind.startswith('MAKE_')
|
||||
|
||||
annotate_tuple = None
|
||||
for annotate_last in range(len(node)-1, -1, -1):
|
||||
@@ -85,7 +85,7 @@ def make_function3_annotate(self, node, isLambda, nested=1,
|
||||
i = -1
|
||||
j = annotate_last-1
|
||||
l = -len(node)
|
||||
while j >= l and node[j].type in ('annotate_arg' 'annotate_tuple'):
|
||||
while j >= l and node[j].kind in ('annotate_arg' 'annotate_tuple'):
|
||||
annotate_args[annotate_tup[i]] = node[j][0]
|
||||
i -= 1
|
||||
j -= 1
|
||||
@@ -111,7 +111,7 @@ def make_function3_annotate(self, node, isLambda, nested=1,
|
||||
lambda_index = None
|
||||
|
||||
if lambda_index and isLambda and iscode(node[lambda_index].attr):
|
||||
assert node[lambda_index].type == 'LOAD_LAMBDA'
|
||||
assert node[lambda_index].kind == 'LOAD_LAMBDA'
|
||||
code = node[lambda_index].attr
|
||||
else:
|
||||
code = codeNode.attr
|
||||
@@ -320,7 +320,7 @@ def make_function2(self, node, isLambda, nested=1, codeNode=None):
|
||||
return name
|
||||
|
||||
# MAKE_FUNCTION_... or MAKE_CLOSURE_...
|
||||
assert node[-1].type.startswith('MAKE_')
|
||||
assert node[-1].kind.startswith('MAKE_')
|
||||
|
||||
args_node = node[-1]
|
||||
if isinstance(args_node.attr, tuple):
|
||||
@@ -336,7 +336,7 @@ def make_function2(self, node, isLambda, nested=1, codeNode=None):
|
||||
lambda_index = None
|
||||
|
||||
if lambda_index and isLambda and iscode(node[lambda_index].attr):
|
||||
assert node[lambda_index].type == 'LOAD_LAMBDA'
|
||||
assert node[lambda_index].kind == 'LOAD_LAMBDA'
|
||||
code = node[lambda_index].attr
|
||||
else:
|
||||
code = codeNode.attr
|
||||
@@ -472,7 +472,7 @@ def make_function3(self, node, isLambda, nested=1, codeNode=None):
|
||||
return name
|
||||
|
||||
# MAKE_FUNCTION_... or MAKE_CLOSURE_...
|
||||
assert node[-1].type.startswith('MAKE_')
|
||||
assert node[-1].kind.startswith('MAKE_')
|
||||
|
||||
|
||||
# Python 3.3+ adds a qualified name at TOS (-1)
|
||||
@@ -510,7 +510,7 @@ def make_function3(self, node, isLambda, nested=1, codeNode=None):
|
||||
|
||||
|
||||
if lambda_index and isLambda and iscode(node[lambda_index].attr):
|
||||
assert node[lambda_index].type == 'LOAD_LAMBDA'
|
||||
assert node[lambda_index].kind == 'LOAD_LAMBDA'
|
||||
code = node[lambda_index].attr
|
||||
else:
|
||||
code = codeNode.attr
|
||||
@@ -573,7 +573,7 @@ def make_function3(self, node, isLambda, nested=1, codeNode=None):
|
||||
for n in node:
|
||||
if n == 'pos_arg':
|
||||
continue
|
||||
elif self.version >= 3.4 and not (n.type in ('kwargs', 'kwarg')):
|
||||
elif self.version >= 3.4 and not (n.kind in ('kwargs', 'kwarg')):
|
||||
continue
|
||||
else:
|
||||
self.preorder(n)
|
||||
|
@@ -25,21 +25,30 @@ Python.
|
||||
# of the nonterminal is suffixed with "_exit" it will be called after
|
||||
# all of its children are called.
|
||||
#
|
||||
# However if this were done for all of the rules, this file would be even longer
|
||||
# than it is already.
|
||||
# After a while writing methods this way, you'll find many routines which do similar
|
||||
# sorts of things, and soon you'll find you want a short notation to
|
||||
# describe rules and not have to create methods at all.
|
||||
#
|
||||
# Another more compact way to specify a semantic rule for a nonterminal is via
|
||||
# rule given in one of the tables MAP_R0, MAP_R, or MAP_DIRECT.
|
||||
# So another other way to specify a semantic rule for a nonterminal is via
|
||||
# one of the tables MAP_R0, MAP_R, or MAP_DIRECT where the key is the
|
||||
# nonterminal name.
|
||||
#
|
||||
# These uses a printf-like syntax to direct substitution from attributes
|
||||
# of the nonterminal and its children..
|
||||
# These dictionaries use a printf-like syntax to direct substitution
|
||||
# from attributes of the nonterminal and its children..
|
||||
#
|
||||
# The rest of the below describes how table-driven semantic actions work
|
||||
# and gives a list of the format specifiers. The default() and
|
||||
# template_engine() methods implement most of the below.
|
||||
#
|
||||
# Step 1 determines a table (T) and a path to a
|
||||
# table key (K) from the node type (N) (other nodes are shown as O):
|
||||
# We allow for a couple of ways to interact with a node in a tree. So
|
||||
# step 1 after not seeing a custom method for a nonterminal is to
|
||||
# determine from what point of view tree-wise the rule is applied.
|
||||
|
||||
# In the diagram below, N is a nonterminal name, and K also a nonterminal
|
||||
# name but the one used as a key in the table.
|
||||
# we show where those are with respect to each other in the
|
||||
# AST tree for N.
|
||||
#
|
||||
#
|
||||
# N&K N N
|
||||
# / | ... \ / | ... \ / | ... \
|
||||
@@ -48,13 +57,19 @@ Python.
|
||||
# K
|
||||
# TABLE_DIRECT TABLE_R TABLE_R0
|
||||
#
|
||||
# The default is a "TABLE_DIRECT" mapping. The key K is then extracted from the
|
||||
# subtree and used to find a table entry T[K], if any. The result is a
|
||||
# format string and arguments (a la printf()) for the formatting engine.
|
||||
# The default table is TABLE_DIRECT mapping By far, most rules used work this way.
|
||||
# TABLE_R0 is rarely used.
|
||||
#
|
||||
# The key K is then extracted from the subtree and used to find one
|
||||
# of the tables, T listed above. The result after applying T[K] is
|
||||
# a format string and arguments (a la printf()) for the formatting
|
||||
# engine.
|
||||
#
|
||||
# Escapes in the format string are:
|
||||
#
|
||||
# %c evaluate the node recursively. Its argument is a single
|
||||
# integer representing a node index.
|
||||
#
|
||||
# %p like %c but sets the operator precedence.
|
||||
# Its argument then is a tuple indicating the node
|
||||
# index and the precidence value, an integer.
|
||||
@@ -130,7 +145,7 @@ else:
|
||||
|
||||
def is_docstring(node):
|
||||
try:
|
||||
return (node[0][0].type == 'assign' and
|
||||
return (node[0][0].kind == 'assign' and
|
||||
node[0][0][1][0].pattr == '__doc__')
|
||||
except:
|
||||
return False
|
||||
@@ -406,13 +421,13 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
})
|
||||
def n_async_call_function(node):
|
||||
self.f.write('async ')
|
||||
node.type == 'call_function'
|
||||
node.kind == 'call_function'
|
||||
p = self.prec
|
||||
self.prec = 80
|
||||
self.template_engine(('%c(%P)', 0,
|
||||
(1, -4, ', ', 100)), node)
|
||||
self.prec = p
|
||||
node.type == 'async_call_function'
|
||||
node.kind == 'async_call_function'
|
||||
self.prune()
|
||||
self.n_async_call_function = n_async_call_function
|
||||
self.n_build_list_unpack = self.n_build_list
|
||||
@@ -425,13 +440,13 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
for i in mapping[1:]:
|
||||
key = key[i]
|
||||
pass
|
||||
if key.type.startswith('CALL_FUNCTION_VAR_KW'):
|
||||
if key.kind.startswith('CALL_FUNCTION_VAR_KW'):
|
||||
# Python 3.5 changes the stack position of *args. kwargs come
|
||||
# after *args whereas in earlier Pythons, *args is at the end
|
||||
# which simpilfiies things from our perspective.
|
||||
# Python 3.6+ replaces CALL_FUNCTION_VAR_KW with CALL_FUNCTION_EX
|
||||
# We will just swap the order to make it look like earlier Python 3.
|
||||
entry = table[key.type]
|
||||
entry = table[key.kind]
|
||||
kwarg_pos = entry[2][1]
|
||||
args_pos = kwarg_pos - 1
|
||||
# Put last node[args_pos] after subsequent kwargs
|
||||
@@ -632,6 +647,20 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
node == AST('return_stmt',
|
||||
[AST('ret_expr', [NONE]), Token('RETURN_VALUE')]))
|
||||
|
||||
# Python 3.x can have be dead code as a result of its optimization?
|
||||
# So we'll add a # at the end of the return lambda so the rest is ignored
|
||||
def n_return_lambda(self, node):
|
||||
if 1 <= len(node) <= 2:
|
||||
self.preorder(node[0])
|
||||
self.write(' # Avoid dead code: ')
|
||||
self.prune()
|
||||
else:
|
||||
# We can't comment out like above because there may be a trailing ')'
|
||||
# that needs to be written
|
||||
assert len(node) == 3 and node[2] == 'LAMBDA_MARKER'
|
||||
self.preorder(node[0])
|
||||
self.prune()
|
||||
|
||||
def n_return_stmt(self, node):
|
||||
if self.params['isLambda']:
|
||||
self.preorder(node[0])
|
||||
@@ -649,6 +678,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
def n_return_if_stmt(self, node):
|
||||
if self.params['isLambda']:
|
||||
self.write(' return ')
|
||||
self.preorder(node[0])
|
||||
self.prune()
|
||||
else:
|
||||
@@ -705,12 +735,12 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
def n_expr(self, node):
|
||||
p = self.prec
|
||||
if node[0].type.startswith('binary_expr'):
|
||||
if node[0].kind.startswith('binary_expr'):
|
||||
n = node[0][-1][0]
|
||||
else:
|
||||
n = node[0]
|
||||
|
||||
self.prec = PRECEDENCE.get(n.type, -2)
|
||||
self.prec = PRECEDENCE.get(n.kind, -2)
|
||||
if n == 'LOAD_CONST' and repr(n.pattr)[0] == '-':
|
||||
self.prec = 6
|
||||
|
||||
@@ -793,9 +823,9 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.prune()
|
||||
|
||||
def n_delete_subscr(self, node):
|
||||
if node[-2][0] == 'build_list' and node[-2][0][-1].type.startswith('BUILD_TUPLE'):
|
||||
if node[-2][0] == 'build_list' and node[-2][0][-1].kind.startswith('BUILD_TUPLE'):
|
||||
if node[-2][0][-1] != 'BUILD_TUPLE_0':
|
||||
node[-2][0].type = 'build_tuple2'
|
||||
node[-2][0].kind = 'build_tuple2'
|
||||
self.default(node)
|
||||
|
||||
n_store_subscr = n_binary_subscr = n_delete_subscr
|
||||
@@ -804,9 +834,9 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
def n_tryfinallystmt(self, node):
|
||||
if len(node[1][0]) == 1 and node[1][0][0] == 'stmt':
|
||||
if node[1][0][0][0] == 'trystmt':
|
||||
node[1][0][0][0].type = 'tf_trystmt'
|
||||
node[1][0][0][0].kind = 'tf_trystmt'
|
||||
if node[1][0][0][0] == 'tryelsestmt':
|
||||
node[1][0][0][0].type = 'tf_tryelsestmt'
|
||||
node[1][0][0][0].kind = 'tf_tryelsestmt'
|
||||
self.default(node)
|
||||
|
||||
def n_exec_stmt(self, node):
|
||||
@@ -831,26 +861,26 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
if len(n) == 1 == len(n[0]) and n[0] == '_stmts':
|
||||
n = n[0][0][0]
|
||||
elif n[0].type in ('lastc_stmt', 'lastl_stmt'):
|
||||
elif n[0].kind in ('lastc_stmt', 'lastl_stmt'):
|
||||
n = n[0][0]
|
||||
else:
|
||||
if not preprocess:
|
||||
self.default(node)
|
||||
return
|
||||
|
||||
if n.type in ('ifstmt', 'iflaststmt', 'iflaststmtl'):
|
||||
node.type = 'ifelifstmt'
|
||||
n.type = 'elifstmt'
|
||||
elif n.type in ('ifelsestmtr',):
|
||||
node.type = 'ifelifstmt'
|
||||
n.type = 'elifelsestmtr'
|
||||
elif n.type in ('ifelsestmt', 'ifelsestmtc', 'ifelsestmtl'):
|
||||
node.type = 'ifelifstmt'
|
||||
if n.kind in ('ifstmt', 'iflaststmt', 'iflaststmtl'):
|
||||
node.kind = 'ifelifstmt'
|
||||
n.kind = 'elifstmt'
|
||||
elif n.kind in ('ifelsestmtr',):
|
||||
node.kind = 'ifelifstmt'
|
||||
n.kind = 'elifelsestmtr'
|
||||
elif n.kind in ('ifelsestmt', 'ifelsestmtc', 'ifelsestmtl'):
|
||||
node.kind = 'ifelifstmt'
|
||||
self.n_ifelsestmt(n, preprocess=True)
|
||||
if n == 'ifelifstmt':
|
||||
n.type = 'elifelifstmt'
|
||||
elif n.type in ('ifelsestmt', 'ifelsestmtc', 'ifelsestmtl'):
|
||||
n.type = 'elifelsestmt'
|
||||
n.kind = 'elifelifstmt'
|
||||
elif n.kind in ('ifelsestmt', 'ifelsestmtc', 'ifelsestmtl'):
|
||||
n.kind = 'elifelsestmt'
|
||||
if not preprocess:
|
||||
self.default(node)
|
||||
|
||||
@@ -859,7 +889,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
def n_ifelsestmtr(self, node):
|
||||
if node[2] == 'COME_FROM':
|
||||
return_stmts_node = node[3]
|
||||
node.type = 'ifelsestmtr2'
|
||||
node.kind = 'ifelsestmtr2'
|
||||
else:
|
||||
return_stmts_node = node[2]
|
||||
if len(return_stmts_node) != 2:
|
||||
@@ -890,7 +920,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
for n in return_stmts_node[0]:
|
||||
if (n[0] == 'ifstmt' and n[0][1][0] == 'return_if_stmts'):
|
||||
if prev_stmt_is_if_ret:
|
||||
n[0].type = 'elifstmt'
|
||||
n[0].kind = 'elifstmt'
|
||||
prev_stmt_is_if_ret = True
|
||||
else:
|
||||
prev_stmt_is_if_ret = False
|
||||
@@ -910,7 +940,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
def n_elifelsestmtr(self, node):
|
||||
if node[2] == 'COME_FROM':
|
||||
return_stmts_node = node[3]
|
||||
node.type = 'elifelsestmtr2'
|
||||
node.kind = 'elifelsestmtr2'
|
||||
else:
|
||||
return_stmts_node = node[2]
|
||||
|
||||
@@ -930,7 +960,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.indent_less()
|
||||
|
||||
for n in return_stmts_node[0]:
|
||||
n[0].type = 'elifstmt'
|
||||
n[0].kind = 'elifstmt'
|
||||
self.preorder(n)
|
||||
self.println(self.indent, 'else:')
|
||||
self.indent_more()
|
||||
@@ -940,7 +970,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
def n_import_as(self, node):
|
||||
store_node = node[-1][-1]
|
||||
assert store_node.type.startswith('STORE_')
|
||||
assert store_node.kind.startswith('STORE_')
|
||||
iname = node[0].pattr # import name
|
||||
sname = store_node.pattr # store_name
|
||||
if iname and iname == sname or iname.startswith(sname + '.'):
|
||||
@@ -1054,7 +1084,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
"""
|
||||
p = self.prec
|
||||
self.prec = 27
|
||||
if node[-1].type == 'list_iter':
|
||||
if node[-1].kind == 'list_iter':
|
||||
n = node[-1]
|
||||
elif self.is_pypy and node[-1] == 'JUMP_BACK':
|
||||
n = node[-2]
|
||||
@@ -1169,7 +1199,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.write('{')
|
||||
if node[0] in ['LOAD_SETCOMP', 'LOAD_DICTCOMP']:
|
||||
self.comprehension_walk3(node, 1, 0)
|
||||
elif node[0].type == 'load_closure' and self.version >= 3.0:
|
||||
elif node[0].kind == 'load_closure' and self.version >= 3.0:
|
||||
self.setcomprehension_walk3(node, collection_index=4)
|
||||
else:
|
||||
self.comprehension_walk(node, iter_index=4)
|
||||
@@ -1236,7 +1266,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
# Python 2.7+ starts including set_comp_body
|
||||
# Python 3.5+ starts including setcomp_func
|
||||
assert n.type in ('lc_body', 'comp_body', 'setcomp_func', 'set_comp_body'), ast
|
||||
assert n.kind in ('lc_body', 'comp_body', 'setcomp_func', 'set_comp_body'), ast
|
||||
assert designator, "Couldn't find designator in list/set comprehension"
|
||||
|
||||
self.preorder(n[0])
|
||||
@@ -1286,7 +1316,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
n = n[3]
|
||||
elif n in ('list_if', 'list_if_not'):
|
||||
# FIXME: just a guess
|
||||
if n[0].type == 'expr':
|
||||
if n[0].kind == 'expr':
|
||||
list_if = n
|
||||
else:
|
||||
list_if = n[1]
|
||||
@@ -1307,7 +1337,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
def n_listcomp(self, node):
|
||||
self.write('[')
|
||||
if node[0].type == 'load_closure':
|
||||
if node[0].kind == 'load_closure':
|
||||
self.listcomprehension_walk2(node)
|
||||
else:
|
||||
self.comprehension_walk3(node, 1, 0)
|
||||
@@ -1344,7 +1374,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
n = n[3]
|
||||
elif n in ('list_if', 'list_if_not', 'comp_if', 'comp_if_not'):
|
||||
# FIXME: just a guess
|
||||
if n[0].type == 'expr':
|
||||
if n[0].kind == 'expr':
|
||||
list_if = n
|
||||
else:
|
||||
list_if = n[1]
|
||||
@@ -1490,10 +1520,10 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
def print_super_classes3(self, node):
|
||||
n = len(node)-1
|
||||
if node.type != 'expr':
|
||||
assert node[n].type.startswith('CALL_FUNCTION')
|
||||
if node.kind != 'expr':
|
||||
assert node[n].kind.startswith('CALL_FUNCTION')
|
||||
for i in range(n-2, 0, -1):
|
||||
if not node[i].type in ['expr', 'LOAD_CLASSNAME']:
|
||||
if not node[i].kind in ['expr', 'LOAD_CLASSNAME']:
|
||||
break
|
||||
pass
|
||||
|
||||
@@ -1533,7 +1563,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
line_number = self.line_number
|
||||
|
||||
if self.version >= 3.0 and not self.is_pypy:
|
||||
if node[0].type.startswith('kvlist'):
|
||||
if node[0].kind.startswith('kvlist'):
|
||||
# Python 3.5+ style key/value list in mapexpr
|
||||
kv_node = node[0]
|
||||
l = list(kv_node)
|
||||
@@ -1556,11 +1586,11 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
i += 2
|
||||
pass
|
||||
pass
|
||||
elif len(node) > 1 and node[1].type.startswith('kvlist'):
|
||||
elif len(node) > 1 and node[1].kind.startswith('kvlist'):
|
||||
# Python 3.0..3.4 style key/value list in mapexpr
|
||||
kv_node = node[1]
|
||||
l = list(kv_node)
|
||||
if len(l) > 0 and l[0].type == 'kv3':
|
||||
if len(l) > 0 and l[0].kind == 'kv3':
|
||||
# Python 3.2 does this
|
||||
kv_node = node[1][0]
|
||||
l = list(kv_node)
|
||||
@@ -1585,7 +1615,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
i += 3
|
||||
pass
|
||||
pass
|
||||
elif node[-1].type.startswith('BUILD_CONST_KEY_MAP'):
|
||||
elif node[-1].kind.startswith('BUILD_CONST_KEY_MAP'):
|
||||
# Python 3.6+ style const map
|
||||
keys = node[-2].pattr
|
||||
values = node[:-2]
|
||||
@@ -1610,7 +1640,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
pass
|
||||
else:
|
||||
# Python 2 style kvlist
|
||||
assert node[-1].type.startswith('kvlist')
|
||||
assert node[-1].kind.startswith('kvlist')
|
||||
kv_node = node[-1] # goto kvlist
|
||||
|
||||
first_time = True
|
||||
@@ -1676,7 +1706,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
p = self.prec
|
||||
self.prec = 100
|
||||
lastnode = node.pop()
|
||||
lastnodetype = lastnode.type
|
||||
lastnodetype = lastnode.kind
|
||||
|
||||
# If this build list is inside a CALL_FUNCTION_VAR,
|
||||
# then the first * has already been printed.
|
||||
@@ -1746,7 +1776,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.prune()
|
||||
|
||||
def n_unpack(self, node):
|
||||
if node[0].type.startswith('UNPACK_EX'):
|
||||
if node[0].kind.startswith('UNPACK_EX'):
|
||||
# Python 3+
|
||||
before_count, after_count = node[0].attr
|
||||
for i in range(before_count+1):
|
||||
@@ -1761,8 +1791,8 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.prune()
|
||||
return
|
||||
for n in node[1:]:
|
||||
if n[0].type == 'unpack':
|
||||
n[0].type = 'unpack_w_parens'
|
||||
if n[0].kind == 'unpack':
|
||||
n[0].kind = 'unpack_w_parens'
|
||||
self.default(node)
|
||||
|
||||
n_unpack_w_parens = n_unpack
|
||||
@@ -1771,25 +1801,25 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
# A horrible hack for Python 3.0 .. 3.2
|
||||
if 3.0 <= self.version <= 3.2 and len(node) == 2:
|
||||
if (node[0][0] == 'LOAD_FAST' and node[0][0].pattr == '__locals__' and
|
||||
node[1][0].type == 'STORE_LOCALS'):
|
||||
node[1][0].kind == 'STORE_LOCALS'):
|
||||
self.prune()
|
||||
self.default(node)
|
||||
|
||||
def n_assign2(self, node):
|
||||
for n in node[-2:]:
|
||||
if n[0] == 'unpack':
|
||||
n[0].type = 'unpack_w_parens'
|
||||
n[0].kind = 'unpack_w_parens'
|
||||
self.default(node)
|
||||
|
||||
def n_assign3(self, node):
|
||||
for n in node[-3:]:
|
||||
if n[0] == 'unpack':
|
||||
n[0].type = 'unpack_w_parens'
|
||||
n[0].kind = 'unpack_w_parens'
|
||||
self.default(node)
|
||||
|
||||
def n_except_cond2(self, node):
|
||||
if node[-2][0] == 'unpack':
|
||||
node[-2][0].type = 'unpack_w_parens'
|
||||
node[-2][0].kind = 'unpack_w_parens'
|
||||
self.default(node)
|
||||
|
||||
def template_engine(self, entry, startnode):
|
||||
@@ -1798,7 +1828,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
specifications such as %c, %C, and so on.
|
||||
"""
|
||||
|
||||
# self.println("----> ", startnode.type, ', ', entry[0])
|
||||
# self.println("----> ", startnode.kind, ', ', entry[0])
|
||||
fmt = entry[0]
|
||||
arg = 1
|
||||
i = 0
|
||||
@@ -1826,7 +1856,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
# Used mostly on the LHS of an assignment
|
||||
# BUILD_TUPLE_n is pretty printed and may take care of other uses.
|
||||
elif typ == ',':
|
||||
if (node.type in ('unpack', 'unpack_w_parens') and
|
||||
if (node.kind in ('unpack', 'unpack_w_parens') and
|
||||
node[0].attr == 1):
|
||||
self.write(',')
|
||||
elif typ == 'c':
|
||||
@@ -1898,8 +1928,8 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
key = key[i]
|
||||
pass
|
||||
|
||||
if key.type in table:
|
||||
self.template_engine(table[key.type], node)
|
||||
if key.kind in table:
|
||||
self.template_engine(table[key.kind], node)
|
||||
self.prune()
|
||||
|
||||
def customize(self, customize):
|
||||
@@ -2118,6 +2148,11 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
# assert isinstance(tokens[0], Token)
|
||||
|
||||
if isLambda:
|
||||
for t in tokens:
|
||||
if t.kind == 'RETURN_END_IF':
|
||||
t.kind = 'RETURN_END_IF_LAMBDA'
|
||||
elif t.kind == 'RETURN_VALUE':
|
||||
t.kind = 'RETURN_VALUE_LAMBDA'
|
||||
tokens.append(Token('LAMBDA_MARKER'))
|
||||
try:
|
||||
ast = python_parser.parse(self.p, tokens, customize)
|
||||
@@ -2132,10 +2167,10 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
# than fight (with the grammar to not emit "return None").
|
||||
if self.hide_internal:
|
||||
if len(tokens) >= 2 and not noneInNames:
|
||||
if tokens[-1].type == 'RETURN_VALUE':
|
||||
if tokens[-1].kind in ('RETURN_VALUE', 'RETURN_VALUE_LAMBDA'):
|
||||
# Python 3.4's classes can add a "return None" which is
|
||||
# invalid syntax.
|
||||
if tokens[-2].type == 'LOAD_CONST':
|
||||
if tokens[-2].kind == 'LOAD_CONST':
|
||||
if isTopLevel or tokens[-2].pattr is None:
|
||||
del tokens[-2:]
|
||||
else:
|
||||
@@ -2177,7 +2212,7 @@ def deparse_code(version, co, out=sys.stdout, showasm=None, showast=False,
|
||||
debug_parser = dict(PARSER_DEFAULT_DEBUG)
|
||||
if showgrammar:
|
||||
debug_parser['reduce'] = showgrammar
|
||||
debug_parser['errorstack'] = True
|
||||
debug_parser['errorstack'] = 'full'
|
||||
|
||||
# Build AST from disassembly.
|
||||
linestarts = dict(scanner.opc.findlinestarts(co))
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# (C) Copyright 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
|
||||
# (C) Copyright 2015-2016 by Rocky Bernstein
|
||||
# (C) Copyright 2015-2017 by Rocky Bernstein
|
||||
#
|
||||
"""
|
||||
byte-code verification
|
||||
@@ -244,18 +244,18 @@ def cmp_code_objects(version, is_pypy, code_obj1, code_obj2,
|
||||
scanner.resetTokenClass() # restore Token class
|
||||
|
||||
targets1 = dis.findlabels(code_obj1.co_code)
|
||||
tokens1 = [t for t in tokens1 if t.type != 'COME_FROM']
|
||||
tokens2 = [t for t in tokens2 if t.type != 'COME_FROM']
|
||||
tokens1 = [t for t in tokens1 if t.kind != 'COME_FROM']
|
||||
tokens2 = [t for t in tokens2 if t.kind != 'COME_FROM']
|
||||
|
||||
i1 = 0; i2 = 0
|
||||
offset_map = {}; check_jumps = {}
|
||||
while i1 < len(tokens1):
|
||||
if i2 >= len(tokens2):
|
||||
if len(tokens1) == len(tokens2) + 2 \
|
||||
and tokens1[-1].type == 'RETURN_VALUE' \
|
||||
and tokens1[-2].type == 'LOAD_CONST' \
|
||||
and tokens1[-1].kind == 'RETURN_VALUE' \
|
||||
and tokens1[-2].kind == 'LOAD_CONST' \
|
||||
and tokens1[-2].pattr is None \
|
||||
and tokens1[-3].type == 'RETURN_VALUE':
|
||||
and tokens1[-3].kind == 'RETURN_VALUE':
|
||||
break
|
||||
else:
|
||||
raise CmpErrorCodeLen(name, tokens1, tokens2)
|
||||
@@ -267,13 +267,13 @@ def cmp_code_objects(version, is_pypy, code_obj1, code_obj2,
|
||||
raise CmpErrorCode(name, tokens1[idx1].offset, tokens1[idx1],
|
||||
tokens2[idx2], tokens1, tokens2)
|
||||
|
||||
if tokens1[i1].type != tokens2[i2].type:
|
||||
if tokens1[i1].type == 'LOAD_CONST' == tokens2[i2].type:
|
||||
if tokens1[i1].kind != tokens2[i2].kind:
|
||||
if tokens1[i1].kind == 'LOAD_CONST' == tokens2[i2].kind:
|
||||
i = 1
|
||||
while tokens1[i1+i].type == 'LOAD_CONST':
|
||||
while tokens1[i1+i].kind == 'LOAD_CONST':
|
||||
i += 1
|
||||
if tokens1[i1+i].type.startswith(('BUILD_TUPLE', 'BUILD_LIST')) \
|
||||
and i == int(tokens1[i1+i].type.split('_')[-1]):
|
||||
if tokens1[i1+i].kind.startswith(('BUILD_TUPLE', 'BUILD_LIST')) \
|
||||
and i == int(tokens1[i1+i].kind.split('_')[-1]):
|
||||
t = tuple([ elem.pattr for elem in tokens1[i1:i1+i] ])
|
||||
if t != tokens2[i2].pattr:
|
||||
raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1],
|
||||
@@ -281,60 +281,60 @@ def cmp_code_objects(version, is_pypy, code_obj1, code_obj2,
|
||||
i1 += i + 1
|
||||
i2 += 1
|
||||
continue
|
||||
elif i == 2 and tokens1[i1+i].type == 'ROT_TWO' and tokens2[i2+1].type == 'UNPACK_SEQUENCE_2':
|
||||
elif i == 2 and tokens1[i1+i].kind == 'ROT_TWO' and tokens2[i2+1].kind == 'UNPACK_SEQUENCE_2':
|
||||
i1 += 3
|
||||
i2 += 2
|
||||
continue
|
||||
elif i == 2 and tokens1[i1+i].type in BIN_OP_FUNCS:
|
||||
f = BIN_OP_FUNCS[tokens1[i1+i].type]
|
||||
elif i == 2 and tokens1[i1+i].kind in BIN_OP_FUNCS:
|
||||
f = BIN_OP_FUNCS[tokens1[i1+i].kind]
|
||||
if f(tokens1[i1].pattr, tokens1[i1+1].pattr) == tokens2[i2].pattr:
|
||||
i1 += 3
|
||||
i2 += 1
|
||||
continue
|
||||
elif tokens1[i1].type == 'UNARY_NOT':
|
||||
if tokens2[i2].type == 'POP_JUMP_IF_TRUE':
|
||||
if tokens1[i1+1].type == 'POP_JUMP_IF_FALSE':
|
||||
elif tokens1[i1].kind == 'UNARY_NOT':
|
||||
if tokens2[i2].kind == 'POP_JUMP_IF_TRUE':
|
||||
if tokens1[i1+1].kind == 'POP_JUMP_IF_FALSE':
|
||||
i1 += 2
|
||||
i2 += 1
|
||||
continue
|
||||
elif tokens2[i2].type == 'POP_JUMP_IF_FALSE':
|
||||
if tokens1[i1+1].type == 'POP_JUMP_IF_TRUE':
|
||||
elif tokens2[i2].kind == 'POP_JUMP_IF_FALSE':
|
||||
if tokens1[i1+1].kind == 'POP_JUMP_IF_TRUE':
|
||||
i1 += 2
|
||||
i2 += 1
|
||||
continue
|
||||
elif tokens1[i1].type in ('JUMP_FORWARD', 'JUMP_BACK') \
|
||||
and tokens1[i1-1].type == 'RETURN_VALUE' \
|
||||
and tokens2[i2-1].type in ('RETURN_VALUE', 'RETURN_END_IF') \
|
||||
elif tokens1[i1].kind in ('JUMP_FORWARD', 'JUMP_BACK') \
|
||||
and tokens1[i1-1].kind == 'RETURN_VALUE' \
|
||||
and tokens2[i2-1].kind in ('RETURN_VALUE', 'RETURN_END_IF') \
|
||||
and int(tokens1[i1].offset) not in targets1:
|
||||
i1 += 1
|
||||
continue
|
||||
elif tokens1[i1].type == 'JUMP_FORWARD' and tokens2[i2].type == 'JUMP_BACK' \
|
||||
and tokens1[i1+1].type == 'JUMP_BACK' and tokens2[i2+1].type == 'JUMP_BACK' \
|
||||
elif tokens1[i1].kind == 'JUMP_FORWARD' and tokens2[i2].kind == 'JUMP_BACK' \
|
||||
and tokens1[i1+1].kind == 'JUMP_BACK' and tokens2[i2+1].kind == 'JUMP_BACK' \
|
||||
and int(tokens1[i1].pattr) == int(tokens1[i1].offset) + 3:
|
||||
if int(tokens1[i1].pattr) == int(tokens1[i1+1].offset):
|
||||
i1 += 2
|
||||
i2 += 2
|
||||
continue
|
||||
elif tokens1[i1].type == 'LOAD_NAME' and tokens2[i2].type == 'LOAD_CONST' \
|
||||
elif tokens1[i1].kind == 'LOAD_NAME' and tokens2[i2].kind == 'LOAD_CONST' \
|
||||
and tokens1[i1].pattr == 'None' and tokens2[i2].pattr is None:
|
||||
pass
|
||||
elif tokens1[i1].type == 'LOAD_GLOBAL' and tokens2[i2].type == 'LOAD_NAME' \
|
||||
elif tokens1[i1].kind == 'LOAD_GLOBAL' and tokens2[i2].kind == 'LOAD_NAME' \
|
||||
and tokens1[i1].pattr == tokens2[i2].pattr:
|
||||
pass
|
||||
elif tokens1[i1].type == 'LOAD_ASSERT' and tokens2[i2].type == 'LOAD_NAME' \
|
||||
elif tokens1[i1].kind == 'LOAD_ASSERT' and tokens2[i2].kind == 'LOAD_NAME' \
|
||||
and tokens1[i1].pattr == tokens2[i2].pattr:
|
||||
pass
|
||||
elif (tokens1[i1].type == 'RETURN_VALUE' and
|
||||
tokens2[i2].type == 'RETURN_END_IF'):
|
||||
elif (tokens1[i1].kind == 'RETURN_VALUE' and
|
||||
tokens2[i2].kind == 'RETURN_END_IF'):
|
||||
pass
|
||||
elif (tokens1[i1].type == 'BUILD_TUPLE_0' and
|
||||
elif (tokens1[i1].kind == 'BUILD_TUPLE_0' and
|
||||
tokens2[i2].pattr == ()):
|
||||
pass
|
||||
else:
|
||||
raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1],
|
||||
tokens2[i2], tokens1, tokens2)
|
||||
elif tokens1[i1].type in JUMP_OPS and tokens1[i1].pattr != tokens2[i2].pattr:
|
||||
if tokens1[i1].type == 'JUMP_BACK':
|
||||
elif tokens1[i1].kind in JUMP_OPS and tokens1[i1].pattr != tokens2[i2].pattr:
|
||||
if tokens1[i1].kind == 'JUMP_BACK':
|
||||
dest1 = int(tokens1[i1].pattr)
|
||||
dest2 = int(tokens2[i2].pattr)
|
||||
if offset_map[dest1] != dest2:
|
||||
@@ -385,28 +385,28 @@ def cmp_code_objects(version, is_pypy, code_obj1, code_obj2,
|
||||
class Token(scanner.Token):
|
||||
"""Token class with changed semantics for 'cmp()'."""
|
||||
def __cmp__(self, o):
|
||||
t = self.type # shortcut
|
||||
if t == 'BUILD_TUPLE_0' and o.type == 'LOAD_CONST' and o.pattr == ():
|
||||
t = self.kind # shortcut
|
||||
if t == 'BUILD_TUPLE_0' and o.kind == 'LOAD_CONST' and o.pattr == ():
|
||||
return 0
|
||||
if t == 'COME_FROM' == o.type:
|
||||
if t == 'COME_FROM' == o.kind:
|
||||
return 0
|
||||
if t == 'PRINT_ITEM_CONT' and o.type == 'PRINT_ITEM':
|
||||
if t == 'PRINT_ITEM_CONT' and o.kind == 'PRINT_ITEM':
|
||||
return 0
|
||||
if t == 'RETURN_VALUE' and o.type == 'RETURN_END_IF':
|
||||
if t == 'RETURN_VALUE' and o.kind == 'RETURN_END_IF':
|
||||
return 0
|
||||
if t == 'JUMP_IF_FALSE_OR_POP' and o.type == 'POP_JUMP_IF_FALSE':
|
||||
if t == 'JUMP_IF_FALSE_OR_POP' and o.kind == 'POP_JUMP_IF_FALSE':
|
||||
return 0
|
||||
if JUMP_OPS and t in JUMP_OPS:
|
||||
# ignore offset
|
||||
return t == o.type
|
||||
return (t == o.type) or self.pattr == o.pattr
|
||||
return t == o.kind
|
||||
return (t == o.kind) or self.pattr == o.pattr
|
||||
|
||||
def __repr__(self):
|
||||
return '%s %s (%s)' % (str(self.type), str(self.attr),
|
||||
return '%s %s (%s)' % (str(self.kind), str(self.attr),
|
||||
repr(self.pattr))
|
||||
|
||||
def __str__(self):
|
||||
return '%s\t%-17s %r' % (self.offset, self.type, self.pattr)
|
||||
return '%s\t%-17s %r' % (self.offset, self.kind, self.pattr)
|
||||
|
||||
def compare_code_with_srcfile(pyc_filename, src_filename, weak_verify=False):
|
||||
"""Compare a .pyc with a source code file."""
|
||||
@@ -438,4 +438,4 @@ if __name__ == '__main__':
|
||||
t2 = Token('LOAD_CONST', -421, 'code_object _expandLang', 55)
|
||||
print(repr(t1))
|
||||
print(repr(t2))
|
||||
print(t1.type == t2.type, t1.attr == t2.attr)
|
||||
print(t1.kind == t2.kind, t1.attr == t2.attr)
|
||||
|
@@ -1,3 +1,3 @@
|
||||
# This file is suitable for sourcing inside bash as
|
||||
# well as importing into Python
|
||||
VERSION='2.12.0'
|
||||
VERSION='2.13.1'
|
||||
|
Reference in New Issue
Block a user