You've already forked python-uncompyle6
mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2025-08-04 01:09:52 +08:00
Merge branch 'master' into python-2.4
This commit is contained in:
101
ChangeLog
101
ChangeLog
@@ -1,6 +1,105 @@
|
||||
2016-12-04 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/version.py: Get ready for release 2.9.7
|
||||
|
||||
2016-11-28 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse3.py, uncompyle6/parsers/parse36.py:
|
||||
Shorten Python3 grammars with + and *
|
||||
|
||||
2016-11-28 rocky <rb@dustyfeet.com>
|
||||
|
||||
* __pkginfo__.py, uncompyle6/parser.py,
|
||||
uncompyle6/parsers/parse2.py: Try new spark 2.5.1 grammar syntax
|
||||
shortcuts This package I now declare stable
|
||||
|
||||
2016-11-28 R. Bernstein <rocky@users.noreply.github.com>
|
||||
|
||||
* README.rst: Update README.rst
|
||||
|
||||
2016-11-27 rocky <rb@dustyfeet.com>
|
||||
|
||||
* README.rst: Limitations of decompiling control structures.
|
||||
|
||||
2016-11-27 R. Bernstein <rocky@users.noreply.github.com>
|
||||
|
||||
* : Merge pull request #69 from rocky/ast-reduce-checks AST reduce checks
|
||||
|
||||
2016-11-26 rocky <rb@dustyfeet.com>
|
||||
|
||||
* test/simple_source/bug26/03_elif_vs_continue.py,
|
||||
uncompyle6/main.py, uncompyle6/parser.py,
|
||||
uncompyle6/parsers/parse2.py, uncompyle6/scanners/scanner2.py,
|
||||
uncompyle6/scanners/scanner26.py: Misc changes scanner26.py: make scanner2.py and scanner26.py more alike
|
||||
scanner2.py: check that return stmt is last in list. (May change)
|
||||
main.py: show filename on verify error test/*: add more
|
||||
|
||||
2016-11-25 rocky <rb@dustyfeet.com>
|
||||
|
||||
* __pkginfo__.py, test/Makefile, uncompyle6/parser.py,
|
||||
uncompyle6/parsers/parse2.py, uncompyle6/parsers/parse3.py: Start
|
||||
grammar reduction checks
|
||||
|
||||
2016-11-24 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse27.py, uncompyle6/scanners/scanner2.py,
|
||||
uncompyle6/semantics/helper.py, uncompyle6/semantics/pysource.py:
|
||||
2.7 grammar bug workaround. Fix docstring bug
|
||||
|
||||
2016-11-24 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/semantics/pysource.py: Better line number tracking Indent Python 2 list comprehensions, albeit badly. DRY code a
|
||||
little via indent_if_source_nl
|
||||
|
||||
2016-11-24 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse3.py, uncompyle6/scanners/scanner2.py:
|
||||
<2.7 "if" detection and dup Python 3 grammar rule
|
||||
|
||||
2016-11-23 rocky <rb@dustyfeet.com>
|
||||
|
||||
* __pkginfo__.py, pytest/test_grammar.py, uncompyle6/parser.py,
|
||||
uncompyle6/parsers/parse26.py: Python 2.6 grammary bug and.. __pkginfo.py__: Bump spark_parser version for parse_flags 'dups'
|
||||
|
||||
2016-11-23 rocky <rb@dustyfeet.com>
|
||||
|
||||
* __pkginfo__.py: Note that we now work on 2.4 and 2.5
|
||||
|
||||
2016-11-23 rocky <rb@dustyfeet.com>
|
||||
|
||||
* : commit 6aa1531972de83ecab15b4c96b89c873ea5a7458 Author: rocky
|
||||
<rb@dustyfeet.com> Date: Wed Nov 23 00:48:38 2016 -0500
|
||||
|
||||
2016-11-22 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse3.py, uncompyle6/parsers/parse32.py,
|
||||
uncompyle6/parsers/parse33.py, uncompyle6/parsers/parse34.py,
|
||||
uncompyle6/parsers/parse35.py: DRY Python3 grammar
|
||||
|
||||
2016-11-22 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/parsers/parse2.py, uncompyle6/parsers/parse27.py,
|
||||
uncompyle6/scanners/scanner2.py: More detailed COME_FROMs For now we only add COME_FROM_FINALLY and COME_FROM_WITH and even
|
||||
here only on 2.7
|
||||
|
||||
2016-11-22 rocky <rb@dustyfeet.com>
|
||||
|
||||
* circle.yml, pytest/test_grammar.py, tox.ini,
|
||||
uncompyle6/parser.py, uncompyle6/parsers/parse2.py,
|
||||
uncompyle6/parsers/parse27.py: Remove redundant 2.7 (and 2.x)
|
||||
grammar rules
|
||||
|
||||
2016-11-22 rocky <rb@dustyfeet.com>
|
||||
|
||||
* pytest/test_docstring.py, uncompyle6/linenumbers.py,
|
||||
uncompyle6/semantics/fragments.py, uncompyle6/semantics/helper.py,
|
||||
uncompyle6/semantics/make_function.py,
|
||||
uncompyle6/semantics/pysource.py: Split out print_docstring move from pysource.py to new helper.py
|
||||
|
||||
2016-11-20 rocky <rb@dustyfeet.com>
|
||||
|
||||
* uncompyle6/version.py: Get ready for release 2.9.6
|
||||
* ChangeLog, NEWS, uncompyle6/version.py: Get ready for release
|
||||
2.9.6
|
||||
|
||||
2016-11-20 R. Bernstein <rocky@users.noreply.github.com>
|
||||
|
||||
|
8
NEWS
8
NEWS
@@ -1,3 +1,11 @@
|
||||
uncompyle6 2.9.6 2016-12-04
|
||||
|
||||
- Shorten Python3 grammars with + and *
|
||||
this requires spark parser 1.5.1
|
||||
- Add some AST reduction checks to improve
|
||||
decompile accuracy. This too requires
|
||||
spark parser 1.5.1
|
||||
|
||||
uncompyle6 2.9.6 2016-11-20
|
||||
|
||||
- Correct MANIFEST.in
|
||||
|
25
README.rst
25
README.rst
@@ -43,7 +43,8 @@ information.
|
||||
Requirements
|
||||
------------
|
||||
|
||||
This project requires Python 2.6 or later, PyPy 3-2.4, or PyPy-5.0.1.
|
||||
This project requires Python 2.6 or later, PyPy 3-2.4, or PyPy-5.0.1.
|
||||
Python versions 2.3-2.7 are supported in the python-2.4 branch.
|
||||
The bytecode files it can read has been tested on Python bytecodes from
|
||||
versions 2.1-2.7, and 3.2-3.6 and the above-mentioned PyPy versions.
|
||||
|
||||
@@ -97,7 +98,8 @@ Known Bugs/Restrictions
|
||||
-----------------------
|
||||
|
||||
The biggest known and possibly fixable (but hard) problem has to do
|
||||
with handling control flow. In some cases we can detect an erroneous
|
||||
with handling control flow. All of the Python decompilers I have looked
|
||||
at have the same problem. In some cases we can detect an erroneous
|
||||
decompilation and report that.
|
||||
|
||||
About 90% of the decompilation of Python standard library packages in
|
||||
@@ -109,14 +111,17 @@ Other versions drop off in quality too.
|
||||
a Python for that bytecode version, and then comparing the bytecode
|
||||
produced by the decompiled/compiled program. Some allowance is made
|
||||
for inessential differences. But other semantically equivalent
|
||||
differences are not caught. For example ``if x: foo()`` is
|
||||
equivalent to ``x and foo()`` and decompilation may turn one into the
|
||||
other. *Weak Verification* on the other hand doesn't check bytecode
|
||||
for equivalence but does check to see if the resulting decompiled
|
||||
source is a valid Python program by running the Python
|
||||
interpreter. Because the Python language has changed so much, for best
|
||||
results you should use the same Python Version in checking as used in
|
||||
the bytecode.
|
||||
differences are not caught. For example ``1 and 0`` is decompiled to
|
||||
the equivalent ``0``; remnants of the first true evaluation (1) is
|
||||
lost when Python compiles this. When Python next compiles ``0`` the
|
||||
resulting code is simpler.
|
||||
|
||||
*Weak Verification*
|
||||
on the other hand doesn't check bytecode for equivalence but does
|
||||
check to see if the resulting decompiled source is a valid Python
|
||||
program by running the Python interpreter. Because the Python language
|
||||
has changed so much, for best results you should use the same Python
|
||||
Version in checking as used in the bytecode.
|
||||
|
||||
Later distributions average about 200 files. There is some work to do
|
||||
on the lower end Python versions which is more difficult for us to
|
||||
|
@@ -12,7 +12,7 @@ copyright = """
|
||||
Copyright (C) 2015, 2016 Rocky Bernstein <rb@dustyfeet.com>.
|
||||
"""
|
||||
|
||||
classifiers = ['Development Status :: 4 - Beta',
|
||||
classifiers = ['Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Developers',
|
||||
'Operating System :: OS Independent',
|
||||
'Programming Language :: Python',
|
||||
@@ -37,7 +37,7 @@ entry_points={
|
||||
'pydisassemble=uncompyle6.bin.pydisassemble:main',
|
||||
]}
|
||||
ftp_url = None
|
||||
install_requires = ['spark-parser >= 1.4.3, < 1.5.0',
|
||||
install_requires = ['spark-parser >= 1.5.1, < 1.6.0',
|
||||
'xdis >= 3.2.3, < 3.3.0']
|
||||
license = 'MIT'
|
||||
mailing_list = 'python-debugger@googlegroups.com'
|
||||
|
@@ -104,7 +104,7 @@ check-bytecode-2.6:
|
||||
|
||||
#: Check deparsing Python 2.7
|
||||
check-bytecode-2.7:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.7
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.7 --verify
|
||||
|
||||
#: Check deparsing Python 3.0
|
||||
check-bytecode-3.0:
|
||||
|
Binary file not shown.
BIN
test/bytecode_2.6/03_elif_vs_continue.pyc
Normal file
BIN
test/bytecode_2.6/03_elif_vs_continue.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.3/03_while_else.pyc
Normal file
BIN
test/bytecode_3.3/03_while_else.pyc
Normal file
Binary file not shown.
Binary file not shown.
18
test/simple_source/bug26/03_elif_vs_continue.py
Normal file
18
test/simple_source/bug26/03_elif_vs_continue.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Bug was using continue fouling up 1st elif, by confusing
|
||||
# the "pass" for "continue" by not recognizing the if jump
|
||||
# around it. We fixed by ignoring what's done in Python 2.7
|
||||
# Better is better detection of control structures
|
||||
|
||||
def _compile_charset(charset, flags, code, fixup=None):
|
||||
# compile charset subprogram
|
||||
emit = code.append
|
||||
if fixup is None:
|
||||
fixup = 1
|
||||
for op, av in charset:
|
||||
if op is flags:
|
||||
pass
|
||||
elif op is code:
|
||||
emit(fixup(av))
|
||||
else:
|
||||
raise RuntimeError
|
||||
emit(5)
|
8
test/simple_source/bug33/03_while_else.py
Normal file
8
test/simple_source/bug33/03_while_else.py
Normal file
@@ -0,0 +1,8 @@
|
||||
# Bug from 3.4 threading. Bug is handling while/else
|
||||
def acquire(self):
|
||||
with self._cond:
|
||||
while self:
|
||||
rc = False
|
||||
else:
|
||||
rc = True
|
||||
return rc
|
@@ -53,7 +53,6 @@ def uncompyle(
|
||||
is_pypy=is_pypy)
|
||||
|
||||
|
||||
|
||||
def uncompyle_file(filename, outstream=None, showasm=None, showast=False,
|
||||
showgrammar=False):
|
||||
"""
|
||||
@@ -65,7 +64,6 @@ def uncompyle_file(filename, outstream=None, showasm=None, showast=False,
|
||||
(version, timestamp, magic_int, co, is_pypy,
|
||||
source_size) = load_module(filename, code_objects)
|
||||
|
||||
|
||||
if type(co) == list:
|
||||
for con in co:
|
||||
uncompyle(version, con, outstream, showasm, showast,
|
||||
@@ -193,6 +191,8 @@ def main(in_base, out_base, files, codes, outfile=None,
|
||||
print(e)
|
||||
verify_failed_files += 1
|
||||
os.rename(outfile, outfile + '_unverified')
|
||||
sys.stderr.write("### Error Verifying %s\n" % filename)
|
||||
sys.stderr.write(str(e) + "\n")
|
||||
if not outfile:
|
||||
sys.stder.write("### Error Verifiying %s" %
|
||||
filename)
|
||||
|
@@ -67,6 +67,25 @@ class PythonParser(GenericASTBuilder):
|
||||
for i in dir(self):
|
||||
setattr(self, i, None)
|
||||
|
||||
def debug_reduce(self, rule, tokens, parent, i):
|
||||
"""Customized format and print for our kind of tokens
|
||||
which gets called in debugging grammar reduce rules
|
||||
"""
|
||||
prefix = ''
|
||||
if parent and tokens:
|
||||
p_token = tokens[parent]
|
||||
if hasattr(p_token, 'linestart') and p_token.linestart:
|
||||
prefix = 'L.%3d: ' % p_token.linestart
|
||||
else:
|
||||
prefix = ' '
|
||||
if hasattr(p_token, 'offset'):
|
||||
prefix += "%3s " % str(p_token.offset)
|
||||
prefix += " "
|
||||
else:
|
||||
prefix = ' '
|
||||
|
||||
print("%s%s ::= %s" % (prefix, rule[0], ' '.join(rule[1])))
|
||||
|
||||
def error(self, instructions, index):
|
||||
# Find the last line boundary
|
||||
for start in range(index, -1, -1):
|
||||
@@ -118,9 +137,9 @@ class PythonParser(GenericASTBuilder):
|
||||
# print >> sys.stderr, 'resolve', str(list)
|
||||
return GenericASTBuilder.resolve(self, list)
|
||||
|
||||
##############################################
|
||||
## Common Python 2 and Python 3 grammar rules
|
||||
##############################################
|
||||
###############################################
|
||||
# Common Python 2 and Python 3 grammar rules #
|
||||
###############################################
|
||||
def p_start(self, args):
|
||||
'''
|
||||
# The start or goal symbol
|
||||
@@ -139,8 +158,7 @@ class PythonParser(GenericASTBuilder):
|
||||
"""
|
||||
passstmt ::=
|
||||
|
||||
_stmts ::= _stmts stmt
|
||||
_stmts ::= stmt
|
||||
_stmts ::= stmt+
|
||||
|
||||
# statements with continue
|
||||
c_stmts ::= _stmts
|
||||
@@ -252,8 +270,7 @@ class PythonParser(GenericASTBuilder):
|
||||
|
||||
# Zero or one COME_FROM
|
||||
# And/or expressions have this
|
||||
come_from_opt ::= COME_FROM
|
||||
come_from_opt ::=
|
||||
come_from_opt ::= COME_FROM?
|
||||
"""
|
||||
|
||||
def p_dictcomp(self, args):
|
||||
@@ -467,6 +484,8 @@ class PythonParser(GenericASTBuilder):
|
||||
_mklambda ::= load_closure mklambda
|
||||
_mklambda ::= mklambda
|
||||
|
||||
# "and" where the first part of the and is true,
|
||||
# so there is only the 2nd part to evaluate
|
||||
and2 ::= _jump jmp_false COME_FROM expr COME_FROM
|
||||
|
||||
expr ::= conditional
|
||||
|
@@ -23,20 +23,18 @@ class Python2Parser(PythonParser):
|
||||
self.new_rules = set()
|
||||
|
||||
def p_print2(self, args):
|
||||
'''
|
||||
"""
|
||||
stmt ::= print_items_stmt
|
||||
stmt ::= print_nl
|
||||
stmt ::= print_items_nl_stmt
|
||||
|
||||
print_items_stmt ::= expr PRINT_ITEM print_items_opt
|
||||
print_items_nl_stmt ::= expr PRINT_ITEM print_items_opt PRINT_NEWLINE_CONT
|
||||
print_items_opt ::= print_items
|
||||
print_items_opt ::=
|
||||
print_items ::= print_items print_item
|
||||
print_items ::= print_item
|
||||
print_item ::= expr PRINT_ITEM_CONT
|
||||
print_nl ::= PRINT_NEWLINE
|
||||
'''
|
||||
print_items_opt ::= print_items?
|
||||
print_items ::= print_item+
|
||||
print_item ::= expr PRINT_ITEM_CONT
|
||||
print_nl ::= PRINT_NEWLINE
|
||||
"""
|
||||
|
||||
def p_stmt2(self, args):
|
||||
"""
|
||||
@@ -167,8 +165,7 @@ class Python2Parser(PythonParser):
|
||||
try_middle ::= jmp_abs COME_FROM except_stmts
|
||||
END_FINALLY
|
||||
|
||||
except_stmts ::= except_stmts except_stmt
|
||||
except_stmts ::= except_stmt
|
||||
except_stmts ::= except_stmt+
|
||||
|
||||
except_stmt ::= except_cond1 except_suite
|
||||
except_stmt ::= except
|
||||
@@ -239,7 +236,7 @@ class Python2Parser(PythonParser):
|
||||
"""
|
||||
|
||||
def add_custom_rules(self, tokens, customize):
|
||||
'''
|
||||
"""
|
||||
Special handling for opcodes such as those that take a variable number
|
||||
of arguments -- we add a new rule for each:
|
||||
|
||||
@@ -258,7 +255,7 @@ class Python2Parser(PythonParser):
|
||||
expr ::= expr {expr}^n CALL_FUNCTION_KW_n POP_TOP
|
||||
|
||||
PyPy adds custom rules here as well
|
||||
'''
|
||||
"""
|
||||
for opname, v in list(customize.items()):
|
||||
opname_base = opname[:opname.rfind('_')]
|
||||
if opname == 'PyPy':
|
||||
@@ -387,6 +384,26 @@ class Python2Parser(PythonParser):
|
||||
else:
|
||||
raise Exception('unknown customize token %s' % opname)
|
||||
self.add_unique_rule(rule, opname_base, v, customize)
|
||||
pass
|
||||
self.check_reduce['augassign1'] = 'AST'
|
||||
self.check_reduce['augassign2'] = 'AST'
|
||||
self.check_reduce['_stmts'] = 'AST'
|
||||
return
|
||||
|
||||
def reduce_is_invalid(self, rule, ast, tokens, first, last):
|
||||
lhs = rule[0]
|
||||
if lhs in ('augassign1', 'augassign2') and ast[0][0] == 'and':
|
||||
return True
|
||||
elif lhs == '_stmts':
|
||||
for i, stmt in enumerate(ast):
|
||||
if stmt == '_stmts':
|
||||
stmt = stmt[0]
|
||||
assert stmt == 'stmt'
|
||||
if stmt[0] == 'return_stmt':
|
||||
return i+1 != len(ast)
|
||||
pass
|
||||
return False
|
||||
return False
|
||||
|
||||
class Python2ParserSingle(Python2Parser, PythonParserSingle):
|
||||
pass
|
||||
|
@@ -13,7 +13,6 @@ class Python26Parser(Python2Parser):
|
||||
super(Python26Parser, self).__init__(debug_parser)
|
||||
self.customized = {}
|
||||
|
||||
|
||||
def p_try_except26(self, args):
|
||||
"""
|
||||
except_stmt ::= except_cond3 except_suite
|
||||
@@ -246,8 +245,8 @@ if __name__ == '__main__':
|
||||
""".split()))
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
import re
|
||||
remain_tokens = set([re.sub('_\d+$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
# print(sorted(p.rule2name.items()))
|
||||
|
@@ -49,7 +49,10 @@ class Python27Parser(Python2Parser):
|
||||
|
||||
def p_jump27(self, args):
|
||||
"""
|
||||
_ifstmts_jump ::= c_stmts_opt JUMP_FORWARD COME_FROM
|
||||
come_froms ::= come_froms COME_FROM
|
||||
come_froms ::= COME_FROM
|
||||
|
||||
_ifstmts_jump ::= c_stmts_opt JUMP_FORWARD come_froms
|
||||
bp_come_from ::= POP_BLOCK COME_FROM
|
||||
|
||||
# FIXME: Common with 3.0+
|
||||
|
@@ -98,8 +98,7 @@ class Python3Parser(PythonParser):
|
||||
del_stmt ::= expr DELETE_ATTR
|
||||
|
||||
kwarg ::= LOAD_CONST expr
|
||||
kwargs ::= kwargs kwarg
|
||||
kwargs ::=
|
||||
kwargs ::= kwarg*
|
||||
|
||||
classdef ::= build_class designator
|
||||
|
||||
@@ -144,8 +143,6 @@ class Python3Parser(PythonParser):
|
||||
ifelsestmtr ::= testexpr return_if_stmts return_stmts
|
||||
|
||||
ifelsestmtl ::= testexpr c_stmts_opt JUMP_BACK else_suitel
|
||||
ifelsestmtl ::= testexpr c_stmts_opt JUMP_BACK else_suitel JUMP_BACK COME_FROM_LOOP
|
||||
ifelsestmtl ::= testexpr c_stmts_opt JUMP_BACK else_suitel COME_FROM_LOOP
|
||||
|
||||
|
||||
# FIXME: this feels like a hack. Is it just 1 or two
|
||||
@@ -244,7 +241,6 @@ class Python3Parser(PythonParser):
|
||||
c_stmts_opt34 ::= JUMP_BACK JUMP_ABSOLUTE c_stmts_opt
|
||||
"""
|
||||
|
||||
|
||||
def p_def_annotations3(self, args):
|
||||
"""
|
||||
# Annotated functions
|
||||
@@ -333,11 +329,12 @@ class Python3Parser(PythonParser):
|
||||
whilestmt ::= SETUP_LOOP testexpr return_stmts POP_BLOCK
|
||||
COME_FROM_LOOP
|
||||
|
||||
while1elsestmt ::= SETUP_LOOP l_stmts JUMP_BACK
|
||||
else_suite
|
||||
|
||||
whileelsestmt ::= SETUP_LOOP testexpr l_stmts_opt JUMP_BACK POP_BLOCK
|
||||
else_suite COME_FROM_LOOP
|
||||
|
||||
while1elsestmt ::= SETUP_LOOP l_stmts JUMP_BACK
|
||||
else_suite
|
||||
|
||||
whileelselaststmt ::= SETUP_LOOP testexpr l_stmts_opt JUMP_BACK POP_BLOCK
|
||||
else_suitec COME_FROM_LOOP
|
||||
@@ -346,6 +343,7 @@ class Python3Parser(PythonParser):
|
||||
|
||||
# FIXME: Python 3.? starts adding branch optimization? Put this starting there.
|
||||
while1stmt ::= SETUP_LOOP l_stmts
|
||||
while1stmt ::= SETUP_LOOP l_stmts COME_FROM_LOOP
|
||||
|
||||
# FIXME: investigate - can code really produce a NOP?
|
||||
whileTruestmt ::= SETUP_LOOP l_stmts_opt JUMP_BACK NOP
|
||||
@@ -372,9 +370,7 @@ class Python3Parser(PythonParser):
|
||||
# Python 3.4+
|
||||
expr ::= LOAD_CLASSDEREF
|
||||
|
||||
binary_subscr2 ::= expr expr DUP_TOP_TWO BINARY_SUBSCR
|
||||
# Python3 drops slice0..slice3
|
||||
|
||||
'''
|
||||
|
||||
@staticmethod
|
||||
@@ -438,10 +434,10 @@ class Python3Parser(PythonParser):
|
||||
args_kw = (token.attr >> 8) & 0xff
|
||||
nak = ( len(opname)-len('CALL_FUNCTION') ) // 3
|
||||
token.type = self.call_fn_name(token)
|
||||
rule = ('call_function ::= expr '
|
||||
+ ('pos_arg ' * args_pos)
|
||||
+ ('kwarg ' * args_kw)
|
||||
+ 'expr ' * nak + token.type)
|
||||
rule = ('call_function ::= expr ' +
|
||||
('pos_arg ' * args_pos) +
|
||||
('kwarg ' * args_kw) +
|
||||
'expr ' * nak + token.type)
|
||||
self.add_unique_rule(rule, token.type, args_pos, customize)
|
||||
rule = ('classdefdeco2 ::= LOAD_BUILD_CLASS mkfunc %s%s_%d'
|
||||
% (('expr ' * (args_pos-1)), opname, args_pos))
|
||||
@@ -636,10 +632,10 @@ class Python3Parser(PythonParser):
|
||||
|
||||
# number of apply equiv arguments:
|
||||
nak = ( len(opname_base)-len('CALL_METHOD') ) // 3
|
||||
rule = ('call_function ::= expr '
|
||||
+ ('pos_arg ' * args_pos)
|
||||
+ ('kwarg ' * args_kw)
|
||||
+ 'expr ' * nak + opname)
|
||||
rule = ('call_function ::= expr ' +
|
||||
('pos_arg ' * args_pos) +
|
||||
('kwarg ' * args_kw) +
|
||||
'expr ' * nak + opname)
|
||||
self.add_unique_rule(rule, opname, token.attr, customize)
|
||||
elif opname.startswith('MAKE_CLOSURE'):
|
||||
# DRY with MAKE_FUNCTION
|
||||
@@ -683,8 +679,30 @@ class Python3Parser(PythonParser):
|
||||
rule = ('mkfunc ::= %sload_closure LOAD_CONST %s'
|
||||
% ('expr ' * args_pos, opname))
|
||||
self.add_unique_rule(rule, opname, token.attr, customize)
|
||||
pass
|
||||
self.check_reduce['augassign1'] = 'AST'
|
||||
self.check_reduce['augassign2'] = 'AST'
|
||||
self.check_reduce['while1stmt'] = 'noAST'
|
||||
return
|
||||
|
||||
def reduce_is_invalid(self, rule, ast, tokens, first, last):
|
||||
lhs = rule[0]
|
||||
if lhs in ('augassign1', 'augassign2') and ast[0][0] == 'and':
|
||||
return True
|
||||
elif lhs == 'while1stmt':
|
||||
if tokens[last] in ('COME_FROM_LOOP', 'JUMP_BACK'):
|
||||
# jump_back should be right afer SETUP_LOOP. Test?
|
||||
last += 1
|
||||
while last < len(tokens) and isinstance(tokens[last].offset, str):
|
||||
last += 1
|
||||
if last < len(tokens):
|
||||
offset = tokens[last].offset
|
||||
assert tokens[first] == 'SETUP_LOOP'
|
||||
if offset != tokens[first].attr:
|
||||
return True
|
||||
return False
|
||||
return False
|
||||
|
||||
class Python30Parser(Python3Parser):
|
||||
|
||||
def p_30(self, args):
|
||||
|
@@ -8,9 +8,6 @@ from uncompyle6.parsers.parse3 import Python3Parser
|
||||
class Python32Parser(Python3Parser):
|
||||
def p_32to35(self, args):
|
||||
"""
|
||||
# In Python 3.2+, DUP_TOPX is DUP_TOP_TWO
|
||||
binary_subscr2 ::= expr expr DUP_TOP_TWO BINARY_SUBSCR
|
||||
|
||||
# Store locals is only in Python 3.0 to 3.3
|
||||
stmt ::= store_locals
|
||||
store_locals ::= LOAD_FAST STORE_LOCALS
|
||||
|
@@ -18,7 +18,6 @@ class Python33Parser(Python32Parser):
|
||||
# actions that want c_stmts_opt at index 1
|
||||
|
||||
iflaststmt ::= testexpr c_stmts_opt33
|
||||
iflaststmtl ::= testexpr c_stmts_opt
|
||||
c_stmts_opt33 ::= JUMP_BACK JUMP_ABSOLUTE c_stmts_opt
|
||||
_ifstmts_jump ::= c_stmts_opt JUMP_FORWARD _come_from
|
||||
|
||||
|
@@ -17,8 +17,6 @@ class Python34Parser(Python33Parser):
|
||||
"""
|
||||
# Python 3.4+ optimizes the trailing two JUMPS away
|
||||
|
||||
for_block ::= l_stmts
|
||||
|
||||
# Is this 3.4 only?
|
||||
yield_from ::= expr GET_ITER LOAD_CONST YIELD_FROM
|
||||
"""
|
||||
@@ -42,8 +40,8 @@ if __name__ == '__main__':
|
||||
""".split()))
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
import re
|
||||
remain_tokens = set([re.sub('_\d+$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
# print(sorted(p.rule2name.items()))
|
||||
|
@@ -44,13 +44,7 @@ class Python35Parser(Python34Parser):
|
||||
# Python 3.3+ also has yield from. 3.5 does it
|
||||
# differently than 3.3, 3.4
|
||||
|
||||
expr ::= yield_from
|
||||
yield_from ::= expr GET_YIELD_FROM_ITER LOAD_CONST YIELD_FROM
|
||||
|
||||
# Python 3.4+ has more loop optimization that removes
|
||||
# JUMP_FORWARD in some cases, and hence we also don't
|
||||
# see COME_FROM
|
||||
_ifstmts_jump ::= c_stmts_opt
|
||||
"""
|
||||
class Python35ParserSingle(Python35Parser, PythonParserSingle):
|
||||
pass
|
||||
@@ -71,8 +65,8 @@ if __name__ == '__main__':
|
||||
""".split()))
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
import re
|
||||
remain_tokens = set([re.sub('_\d+$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
# print(sorted(p.rule2name.items()))
|
||||
|
@@ -16,8 +16,7 @@ class Python36Parser(Python35Parser):
|
||||
def p_36misc(self, args):
|
||||
"""
|
||||
fstring_multi ::= fstring_expr_or_strs BUILD_STRING
|
||||
fstring_expr_or_strs ::= fstring_expr_or_strs fstring_expr_or_str
|
||||
fstring_expr_or_strs ::= fstring_expr_or_str
|
||||
fstring_expr_or_strs ::= fstring_expr_or_str+
|
||||
"""
|
||||
|
||||
def add_custom_rules(self, tokens, customize):
|
||||
|
@@ -25,7 +25,8 @@ if PYTHON3:
|
||||
intern = sys.intern
|
||||
L65536 = 65536
|
||||
|
||||
def long(l): l
|
||||
def long(l):
|
||||
return l
|
||||
else:
|
||||
L65536 = long(65536) # NOQA
|
||||
|
||||
|
@@ -160,11 +160,15 @@ class Scanner2(scan.Scanner):
|
||||
# we sort them). That way, specific COME_FROM tags will match up
|
||||
# properly. For example, a "loop" with an "if" nested in it should have the
|
||||
# "loop" tag last so the grammar rule matches that properly.
|
||||
# last_offset = -1
|
||||
for jump_offset in sorted(jump_targets[offset], reverse=True):
|
||||
# if jump_offset == last_offset:
|
||||
# continue
|
||||
# last_offset = jump_offset
|
||||
come_from_name = 'COME_FROM'
|
||||
opname = self.opc.opname[self.code[jump_offset]]
|
||||
if opname.startswith('SETUP_') and self.version == 2.7:
|
||||
come_from_type = opname[len('SETUP_'):]
|
||||
op_name = self.opc.opname[self.code[jump_offset]]
|
||||
if op_name.startswith('SETUP_') and self.version == 2.7:
|
||||
come_from_type = op_name[len('SETUP_'):]
|
||||
if come_from_type not in ('LOOP', 'EXCEPT'):
|
||||
come_from_name = 'COME_FROM_%s' % come_from_type
|
||||
pass
|
||||
@@ -175,7 +179,7 @@ class Scanner2(scan.Scanner):
|
||||
jump_idx += 1
|
||||
|
||||
op = self.code[offset]
|
||||
opname = self.opc.opname[op]
|
||||
op_name = self.opc.opname[op]
|
||||
|
||||
oparg = None; pattr = None
|
||||
has_arg = op_has_argument(op, self.opc)
|
||||
@@ -190,14 +194,14 @@ class Scanner2(scan.Scanner):
|
||||
if iscode(const):
|
||||
oparg = const
|
||||
if const.co_name == '<lambda>':
|
||||
assert opname == 'LOAD_CONST'
|
||||
opname = 'LOAD_LAMBDA'
|
||||
assert op_name == 'LOAD_CONST'
|
||||
op_name = 'LOAD_LAMBDA'
|
||||
elif const.co_name == '<genexpr>':
|
||||
opname = 'LOAD_GENEXPR'
|
||||
op_name = 'LOAD_GENEXPR'
|
||||
elif const.co_name == '<dictcomp>':
|
||||
opname = 'LOAD_DICTCOMP'
|
||||
op_name = 'LOAD_DICTCOMP'
|
||||
elif const.co_name == '<setcomp>':
|
||||
opname = 'LOAD_SETCOMP'
|
||||
op_name = 'LOAD_SETCOMP'
|
||||
# verify() uses 'pattr' for comparison, since 'attr'
|
||||
# now holds Code(const) and thus can not be used
|
||||
# for comparison (todo: think about changing this)
|
||||
@@ -233,20 +237,20 @@ class Scanner2(scan.Scanner):
|
||||
self.code[self.prev[offset]] == self.opc.LOAD_CLOSURE:
|
||||
continue
|
||||
else:
|
||||
if self.is_pypy and not oparg and opname == 'BUILD_MAP':
|
||||
opname = 'BUILD_MAP_n'
|
||||
if self.is_pypy and not oparg and op_name == 'BUILD_MAP':
|
||||
op_name = 'BUILD_MAP_n'
|
||||
else:
|
||||
opname = '%s_%d' % (opname, oparg)
|
||||
op_name = '%s_%d' % (op_name, oparg)
|
||||
if op != self.opc.BUILD_SLICE:
|
||||
customize[opname] = oparg
|
||||
elif self.is_pypy and opname in ('LOOKUP_METHOD',
|
||||
customize[op_name] = oparg
|
||||
elif self.is_pypy and op_name in ('LOOKUP_METHOD',
|
||||
'JUMP_IF_NOT_DEBUG',
|
||||
'SETUP_EXCEPT',
|
||||
'SETUP_FINALLY'):
|
||||
# The value in the dict is in special cases in semantic actions, such
|
||||
# as CALL_FUNCTION. The value is not used in these cases, so we put
|
||||
# in arbitrary value 0.
|
||||
customize[opname] = 0
|
||||
customize[op_name] = 0
|
||||
elif op == self.opc.JUMP_ABSOLUTE:
|
||||
# Further classify JUMP_ABSOLUTE into backward jumps
|
||||
# which are used in loops, and "CONTINUE" jumps which
|
||||
@@ -265,16 +269,16 @@ class Scanner2(scan.Scanner):
|
||||
and self.code[offset+3] not in (self.opc.END_FINALLY,
|
||||
self.opc.POP_BLOCK)
|
||||
and offset not in self.not_continue):
|
||||
opname = 'CONTINUE'
|
||||
op_name = 'CONTINUE'
|
||||
else:
|
||||
opname = 'JUMP_BACK'
|
||||
op_name = 'JUMP_BACK'
|
||||
|
||||
elif op == self.opc.LOAD_GLOBAL:
|
||||
if offset in self.load_asserts:
|
||||
opname = 'LOAD_ASSERT'
|
||||
op_name = 'LOAD_ASSERT'
|
||||
elif op == self.opc.RETURN_VALUE:
|
||||
if offset in self.return_end_ifs:
|
||||
opname = 'RETURN_END_IF'
|
||||
op_name = 'RETURN_END_IF'
|
||||
|
||||
if offset in self.linestartoffsets:
|
||||
linestart = self.linestartoffsets[offset]
|
||||
@@ -283,7 +287,7 @@ class Scanner2(scan.Scanner):
|
||||
|
||||
if offset not in replace:
|
||||
tokens.append(Token(
|
||||
opname, oparg, pattr, offset, linestart, op,
|
||||
op_name, oparg, pattr, offset, linestart, op,
|
||||
has_arg, self.opc))
|
||||
else:
|
||||
tokens.append(Token(
|
||||
@@ -778,21 +782,23 @@ class Scanner2(scan.Scanner):
|
||||
if offset in self.ignore_if:
|
||||
return
|
||||
|
||||
if code[pre[rtarget]] == self.opc.JUMP_ABSOLUTE and pre[rtarget] in self.stmts \
|
||||
and pre[rtarget] != offset and pre[pre[rtarget]] != offset:
|
||||
if code[rtarget] == self.opc.JUMP_ABSOLUTE and code[rtarget+3] == self.opc.POP_BLOCK:
|
||||
if code[pre[pre[rtarget]]] != self.opc.JUMP_ABSOLUTE:
|
||||
pass
|
||||
elif self.get_target(pre[pre[rtarget]]) != target:
|
||||
pass
|
||||
if self.version == 2.7:
|
||||
if code[pre[rtarget]] == self.opc.JUMP_ABSOLUTE and pre[rtarget] in self.stmts \
|
||||
and pre[rtarget] != offset and pre[pre[rtarget]] != offset:
|
||||
if code[rtarget] == self.opc.JUMP_ABSOLUTE and code[rtarget+3] == self.opc.POP_BLOCK:
|
||||
if code[pre[pre[rtarget]]] != self.opc.JUMP_ABSOLUTE:
|
||||
pass
|
||||
elif self.get_target(pre[pre[rtarget]]) != target:
|
||||
pass
|
||||
else:
|
||||
rtarget = pre[rtarget]
|
||||
else:
|
||||
rtarget = pre[rtarget]
|
||||
else:
|
||||
rtarget = pre[rtarget]
|
||||
|
||||
# Does the "if" jump just beyond a jump op, then this is probably an if statement
|
||||
pre_rtarget = pre[rtarget]
|
||||
code_pre_rtarget = code[pre_rtarget]
|
||||
|
||||
if code_pre_rtarget in self.jump_forward:
|
||||
if_end = self.get_target(pre_rtarget)
|
||||
|
||||
@@ -820,6 +826,7 @@ class Scanner2(scan.Scanner):
|
||||
self.structs.append({'type': 'if-then',
|
||||
'start': start-3,
|
||||
'end': pre_rtarget})
|
||||
|
||||
self.not_continue.add(pre_rtarget)
|
||||
|
||||
if rtarget < end:
|
||||
@@ -895,7 +902,6 @@ class Scanner2(scan.Scanner):
|
||||
pass
|
||||
pass
|
||||
|
||||
|
||||
# FIXME: All the < 2.7 conditions are is horrible. We need a better way.
|
||||
if label is not None and label != -1:
|
||||
# In Python < 2.7, the POP_TOP in:
|
||||
|
@@ -25,5 +25,5 @@ class Scanner23(scan.Scanner24):
|
||||
# These are the only differences in initialization between
|
||||
# 2.3-2.6
|
||||
self.version = 2.3
|
||||
self.genexpr_name = '<generator expression>';
|
||||
self.genexpr_name = '<generator expression>'
|
||||
return
|
||||
|
@@ -25,5 +25,5 @@ class Scanner24(scan.Scanner25):
|
||||
self.opc = opcode_24
|
||||
self.opname = opcode_24.opname
|
||||
self.version = 2.4
|
||||
self.genexpr_name = '<generator expression>';
|
||||
self.genexpr_name = '<generator expression>'
|
||||
return
|
||||
|
@@ -235,7 +235,7 @@ class Scanner26(scan.Scanner2):
|
||||
if op != self.opc.BUILD_SLICE:
|
||||
customize[op_name] = oparg
|
||||
elif op == self.opc.JUMP_ABSOLUTE:
|
||||
# Further classifhy JUMP_ABSOLUTE into backward jumps
|
||||
# Further classify JUMP_ABSOLUTE into backward jumps
|
||||
# which are used in loops, and "CONTINUE" jumps which
|
||||
# may appear in a "continue" statement. The loop-type
|
||||
# and continue-type jumps will help us classify loop
|
||||
@@ -256,6 +256,9 @@ class Scanner26(scan.Scanner2):
|
||||
# if x: continue
|
||||
# the "continue" is not on a new line.
|
||||
if tokens[-1].type == 'JUMP_BACK':
|
||||
# We need 'intern' since we have
|
||||
# already have processed the previous
|
||||
# token.
|
||||
tokens[-1].type = intern('CONTINUE')
|
||||
|
||||
elif op == self.opc.LOAD_GLOBAL:
|
||||
|
@@ -126,7 +126,6 @@ class Scanner3(Scanner):
|
||||
varargs_ops.add(self.opc.CALL_METHOD)
|
||||
self.varargs_ops = frozenset(varargs_ops)
|
||||
|
||||
|
||||
def opName(self, offset):
|
||||
return self.opc.opname[self.code[offset]]
|
||||
|
||||
@@ -324,9 +323,10 @@ class Scanner3(Scanner):
|
||||
# FIXME: this is a hack to catch stuff like:
|
||||
# if x: continue
|
||||
# the "continue" is not on a new line.
|
||||
# There are other situations were we don't catch
|
||||
# There are other situations where we don't catch
|
||||
# CONTINUE as well.
|
||||
if tokens[-1].type == 'JUMP_BACK':
|
||||
if tokens[-1].type == 'JUMP_BACK' and tokens[-1].attr <= argval:
|
||||
# intern is used because we are changing the *previous* token
|
||||
tokens[-1].type = intern('CONTINUE')
|
||||
|
||||
elif op == self.opc.RETURN_VALUE:
|
||||
|
@@ -29,7 +29,7 @@ class Token:
|
||||
self.pattr = pattr
|
||||
self.offset = offset
|
||||
self.linestart = linestart
|
||||
if has_arg == False:
|
||||
if has_arg is False:
|
||||
self.attr = None
|
||||
self.pattr = None
|
||||
self.opc = opc
|
||||
|
@@ -8,12 +8,13 @@ else:
|
||||
maxint = sys.maxint
|
||||
|
||||
def print_docstring(self, indent, docstring):
|
||||
## FIXME: put this into a testable function.
|
||||
if docstring.find('"""') == -1:
|
||||
quote = '"""'
|
||||
else:
|
||||
quote = "'''"
|
||||
|
||||
try:
|
||||
if docstring.find('"""') == -1:
|
||||
quote = '"""'
|
||||
else:
|
||||
quote = "'''"
|
||||
except:
|
||||
return False
|
||||
self.write(indent)
|
||||
if not PYTHON3 and not isinstance(docstring, str):
|
||||
# Must be unicode in Python2
|
||||
@@ -73,6 +74,7 @@ def print_docstring(self, indent, docstring):
|
||||
for line in trimmed[1:-1]:
|
||||
self.println( indent, line )
|
||||
self.println(indent, trimmed[-1], quote)
|
||||
return True
|
||||
|
||||
# if __name__ == '__main__':
|
||||
# if PYTHON3:
|
||||
|
@@ -33,7 +33,7 @@ def find_globals(node, globs):
|
||||
def find_none(node):
|
||||
for n in node:
|
||||
if isinstance(n, AST):
|
||||
if not n in ('return_stmt', 'return_if_stmt'):
|
||||
if n not in ('return_stmt', 'return_if_stmt'):
|
||||
if find_none(n):
|
||||
return True
|
||||
elif n.type == 'LOAD_CONST' and n.pattr is None:
|
||||
|
@@ -485,6 +485,12 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
return
|
||||
|
||||
def indent_if_source_nl(self, line_number, indent):
|
||||
if (line_number != self.line_number):
|
||||
self.write("\n" + self.indent + INDENT_PER_LEVEL[:-1])
|
||||
return self.line_number
|
||||
|
||||
|
||||
def customize_for_version(self, is_pypy, version):
|
||||
if is_pypy:
|
||||
########################
|
||||
@@ -636,6 +642,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
})
|
||||
|
||||
FSTRING_CONVERSION_MAP = {1: '!s', 2: '!r', 3: '!a'}
|
||||
|
||||
def f_conversion(node):
|
||||
node.conversion = FSTRING_CONVERSION_MAP.get(node.data[1].attr, '')
|
||||
|
||||
@@ -673,9 +680,8 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
None)
|
||||
|
||||
def set_pos_info(self, node):
|
||||
if hasattr(node, 'offset'):
|
||||
if node.offset in self.linestarts:
|
||||
self.line_number = self.linestarts[node.offset]
|
||||
if hasattr(node, 'linestart') and node.linestart:
|
||||
self.line_number = node.linestart
|
||||
|
||||
def preorder(self, node=None):
|
||||
super(SourceWalker, self).preorder(node)
|
||||
@@ -893,7 +899,6 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
pass
|
||||
self.write(')')
|
||||
|
||||
|
||||
def n_LOAD_CONST(self, node):
|
||||
data = node.pattr; datatype = type(data)
|
||||
if isinstance(datatype, int) and data == minint:
|
||||
@@ -1133,6 +1138,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
assert n == 'lc_body'
|
||||
self.write( '[ ')
|
||||
|
||||
|
||||
if self.version >= 2.7:
|
||||
expr = n[0]
|
||||
list_iter = node[-1]
|
||||
@@ -1148,9 +1154,19 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
|
||||
# FIXME: use source line numbers for directing line breaks
|
||||
|
||||
line_number = self.line_number
|
||||
last_line = self.f.getvalue().split("\n")[-1]
|
||||
l = len(last_line)
|
||||
indent = ' ' * (l-1)
|
||||
|
||||
self.preorder(expr)
|
||||
line_number = self.indent_if_source_nl(line_number, indent)
|
||||
self.preorder(list_iter)
|
||||
self.write( ' ]')
|
||||
l2 = self.indent_if_source_nl(line_number, indent)
|
||||
if l2 != line_number:
|
||||
self.write(' ' * (len(indent) - len(self.indent) - 1) + ']')
|
||||
else:
|
||||
self.write( ' ]')
|
||||
self.prec = p
|
||||
self.prune() # stop recursing
|
||||
|
||||
@@ -1653,9 +1669,8 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.write(sep)
|
||||
name = self.traverse(l[i], indent='')
|
||||
if i > 0:
|
||||
if (line_number != self.line_number):
|
||||
self.write("\n" + self.indent + INDENT_PER_LEVEL[:-1])
|
||||
pass
|
||||
line_number = self.indent_if_source_nl(line_number,
|
||||
self.indent + INDENT_PER_LEVEL[:-1])
|
||||
line_number = self.line_number
|
||||
self.write(name, ': ')
|
||||
value = self.traverse(l[i+1], indent=self.indent+(len(name)+2)*' ')
|
||||
@@ -1680,9 +1695,8 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.write(sep)
|
||||
name = self.traverse(l[i+1], indent='')
|
||||
if i > 0:
|
||||
if (line_number != self.line_number):
|
||||
self.write("\n" + self.indent + INDENT_PER_LEVEL[:-1])
|
||||
pass
|
||||
line_number = self.indent_if_source_nl(line_number,
|
||||
self.indent + INDENT_PER_LEVEL[:-1])
|
||||
pass
|
||||
line_number = self.line_number
|
||||
self.write(name, ': ')
|
||||
@@ -1711,13 +1725,12 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
# kv3 ::= expr expr STORE_MAP
|
||||
|
||||
# FIXME: DRY this and the above
|
||||
indent = self.indent + " "
|
||||
if kv == 'kv':
|
||||
self.write(sep)
|
||||
name = self.traverse(kv[-2], indent='')
|
||||
if first_time:
|
||||
if (line_number != self.line_number):
|
||||
self.write("\n" + self.indent + " ")
|
||||
pass
|
||||
line_number = self.indent_if_source_nl(line_number, indent)
|
||||
first_time = False
|
||||
pass
|
||||
line_number = self.line_number
|
||||
@@ -1727,9 +1740,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.write(sep)
|
||||
name = self.traverse(kv[1], indent='')
|
||||
if first_time:
|
||||
if (line_number != self.line_number):
|
||||
self.write("\n" + self.indent + " ")
|
||||
pass
|
||||
line_number = self.indent_if_source_nl(line_number, indent)
|
||||
first_time = False
|
||||
pass
|
||||
line_number = self.line_number
|
||||
@@ -1739,9 +1750,7 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
self.write(sep)
|
||||
name = self.traverse(kv[-2], indent='')
|
||||
if first_time:
|
||||
if (line_number != self.line_number):
|
||||
self.write("\n" + self.indent + " ")
|
||||
pass
|
||||
line_number = self.indent_if_source_nl(line_number, indent)
|
||||
first_time = False
|
||||
pass
|
||||
line_number = self.line_number
|
||||
@@ -1912,18 +1921,9 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
node[0].attr == 1):
|
||||
self.write(',')
|
||||
elif typ == 'c':
|
||||
# FIXME: In Python3 sometimes like from
|
||||
# importfrom
|
||||
# importlist2
|
||||
# import_as
|
||||
# designator
|
||||
# STORE_NAME 'load_entry_point'
|
||||
# POP_TOP '' (2, (0, 1))
|
||||
# we get that weird POP_TOP tuple, e.g (2, (0,1)).
|
||||
# Why? and
|
||||
# Is there some sort of invalid bounds access going on?
|
||||
if isinstance(entry[arg], int):
|
||||
self.preorder(node[entry[arg]])
|
||||
entry_node = node[entry[arg]]
|
||||
self.preorder(entry_node)
|
||||
arg += 1
|
||||
elif typ == 'p':
|
||||
p = self.prec
|
||||
@@ -2142,9 +2142,9 @@ class SourceWalker(GenericASTTraversal, object):
|
||||
docstring = ast[i][0][0][0][0].pattr
|
||||
except:
|
||||
docstring = code.co_consts[0]
|
||||
print_docstring(self, indent, docstring)
|
||||
self.println()
|
||||
del ast[i]
|
||||
if print_docstring(self, indent, docstring):
|
||||
self.println()
|
||||
del ast[i]
|
||||
|
||||
|
||||
# the function defining a class normally returns locals(); we
|
||||
|
@@ -315,7 +315,7 @@ def cmp_code_objects(version, is_pypy, code_obj1, code_obj2,
|
||||
i2 += 2
|
||||
continue
|
||||
elif tokens1[i1].type == 'LOAD_NAME' and tokens2[i2].type == 'LOAD_CONST' \
|
||||
and tokens1[i1].pattr == 'None' and tokens2[i2].pattr == None:
|
||||
and tokens1[i1].pattr == 'None' and tokens2[i2].pattr is None:
|
||||
pass
|
||||
else:
|
||||
raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1],
|
||||
|
@@ -1,3 +1,3 @@
|
||||
# This file is suitable for sourcing inside bash as
|
||||
# well as importing into Python
|
||||
VERSION='2.9.6'
|
||||
VERSION='2.9.7'
|
||||
|
Reference in New Issue
Block a user