You've already forked python-uncompyle6
mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2025-08-04 01:09:52 +08:00
Compare commits
235 Commits
release-2.
...
release-py
Author | SHA1 | Date | |
---|---|---|---|
|
4fb379afb4 | ||
|
eb7484c671 | ||
|
b8baff4290 | ||
|
883f524fe4 | ||
|
835124eba2 | ||
|
4725624d46 | ||
|
5e13077fd2 | ||
|
b552c413f9 | ||
|
dce3de164f | ||
|
cb27f244dc | ||
|
0d32ec028c | ||
|
e193c72d12 | ||
|
40feac749a | ||
|
bd3359b486 | ||
|
1b60f5e63b | ||
|
cbce24d716 | ||
|
9d0bb5e392 | ||
|
71e7120501 | ||
|
bd49fcb001 | ||
|
b873e689db | ||
|
bd8563e212 | ||
|
98f9a7d009 | ||
|
79470ffff7 | ||
|
b2dfe0889a | ||
|
0c670f2f9e | ||
|
8194595ec9 | ||
|
4dbcf0400d | ||
|
44af6c42a2 | ||
|
d7380dc549 | ||
|
b2f6e1cf1a | ||
|
7c9437f0a9 | ||
|
162bb0a85f | ||
|
e44ccd5787 | ||
|
c4612b7484 | ||
|
731c5a2092 | ||
|
7efbd55b69 | ||
|
5dbec5b383 | ||
|
f28ad69c38 | ||
|
49a71819a1 | ||
|
ed7d11525a | ||
|
5b1dcccddc | ||
|
992a08f5ce | ||
|
49ef408699 | ||
|
0487f2fb7a | ||
|
e43c8acd30 | ||
|
97604a93dd | ||
|
d266e9e123 | ||
|
7ac8bf91df | ||
|
772d36015c | ||
|
f381211291 | ||
|
aca4cb233d | ||
|
01ef3b774f | ||
|
9041dead7f | ||
|
4ea308f75a | ||
|
e5f06eb551 | ||
|
c68030e9fa | ||
|
fd95839701 | ||
|
6305023219 | ||
|
c7dda72a84 | ||
|
7caedcb50d | ||
|
1856e09a0c | ||
|
e47568e147 | ||
|
c702ce3802 | ||
|
a37f403410 | ||
|
9248a954bd | ||
|
89a7ad6f81 | ||
|
f432f4f698 | ||
|
5ef2d5cd9f | ||
|
204612ca85 | ||
|
df8c092212 | ||
|
55d2e598db | ||
|
3c67c7b32c | ||
|
5264ffc0e5 | ||
|
27b217a4ed | ||
|
d756548ac3 | ||
|
0171e4d899 | ||
|
a2054fb7dd | ||
|
f07c9c6dcf | ||
|
c677c946ea | ||
|
87063851be | ||
|
516c1a7910 | ||
|
2293f77841 | ||
|
212771244a | ||
|
5fc33aeef5 | ||
|
fff0d1c988 | ||
|
987b5a2290 | ||
|
910d210e52 | ||
|
b719a0ee35 | ||
|
25329d2752 | ||
|
df4d80ff26 | ||
|
13ab06ecb1 | ||
|
72e2d1a2bf | ||
|
c90210c063 | ||
|
21a8726a47 | ||
|
ca7f267103 | ||
|
7b15e54b7d | ||
|
ccd007355c | ||
|
36aba02093 | ||
|
a5dd330218 | ||
|
fc0eb87620 | ||
|
0b9fca2263 | ||
|
0d9464bb92 | ||
|
ff435227e9 | ||
|
fcdc3f67af | ||
|
299936e554 | ||
|
2e192f0467 | ||
|
9062f19a97 | ||
|
f51e40a1de | ||
|
e411024696 | ||
|
01a27e22b4 | ||
|
7553c4aed9 | ||
|
593304bc43 | ||
|
a9ca30fe34 | ||
|
6030730870 | ||
|
b9436e4851 | ||
|
b0a7452d48 | ||
|
5e05e521d9 | ||
|
7a052c349a | ||
|
35aca37557 | ||
|
57fe56d72e | ||
|
218e73540a | ||
|
0965e2cc96 | ||
|
5cf4f0a82f | ||
|
9b0225db60 | ||
|
8c0959de42 | ||
|
ccd71c857f | ||
|
b89dbb0ee7 | ||
|
a5bdc1acd0 | ||
|
a279784d8d | ||
|
3a9f4f2984 | ||
|
51ae8313cf | ||
|
38f04f0073 | ||
|
f3da5d770d | ||
|
24fb13cf23 | ||
|
524e8c8410 | ||
|
52d1e44560 | ||
|
6055c5e165 | ||
|
e0ed187ea6 | ||
|
eafe048c7e | ||
|
c0e553dbb5 | ||
|
7e59987af7 | ||
|
1f012f7c46 | ||
|
d1a3d42ab8 | ||
|
05fd992c48 | ||
|
47f1d888eb | ||
|
ca9c227837 | ||
|
5df384bb71 | ||
|
e80b36347a | ||
|
9e37495493 | ||
|
77b93c5f21 | ||
|
0b198ee881 | ||
|
9e0c65881d | ||
|
c796d6a799 | ||
|
3892fb533a | ||
|
2ea7487ca7 | ||
|
d4f6cec3d0 | ||
|
b1705e283d | ||
|
eee751e22a | ||
|
2b0fefb95f | ||
|
1a627ba207 | ||
|
ea75bcf47e | ||
|
6c6dcab857 | ||
|
0654aed6c8 | ||
|
3447ca0767 | ||
|
1e858efafd | ||
|
ce88a72ea1 | ||
|
7725b8e7de | ||
|
62ddbe320d | ||
|
a694601264 | ||
|
e06f88043f | ||
|
8fc3fd146f | ||
|
ce5066bddb | ||
|
93f18e2449 | ||
|
783e62f3ca | ||
|
c38dc61021 | ||
|
45782bbb39 | ||
|
4c9cd5657e | ||
|
dc627d13b8 | ||
|
ddc3489991 | ||
|
5b24c20331 | ||
|
8bb01143d8 | ||
|
a9635da96a | ||
|
e790cb75fd | ||
|
348afeebbf | ||
|
6888553773 | ||
|
0f489672b9 | ||
|
b7d8cbfaf5 | ||
|
df8d253f78 | ||
|
89b42e3696 | ||
|
22e5a4a283 | ||
|
61810172d1 | ||
|
658c8b4be7 | ||
|
d4dab54c7b | ||
|
5566b9ba6c | ||
|
e56ab2dcd5 | ||
|
d6c45979ba | ||
|
a06e9bf32e | ||
|
7e8f7ba674 | ||
|
09eb7f7f78 | ||
|
f7a910ec66 | ||
|
6d6a73eea7 | ||
|
e4a7641927 | ||
|
b24b46d48c | ||
|
a65d7dce5b | ||
|
718a0a5d34 | ||
|
ea9e3ab3f5 | ||
|
770e988ff8 | ||
|
0fa0641974 | ||
|
c13e23cdae | ||
|
fab4ebb768 | ||
|
89429339fa | ||
|
6ed129bd7a | ||
|
c4fde6b53e | ||
|
a7d93e88b4 | ||
|
9891494142 | ||
|
f8544dfbbe | ||
|
b00651d428 | ||
|
da8dccbaca | ||
|
37272ae827 | ||
|
7f2bee46b7 | ||
|
c8a4dcf72b | ||
|
012ff91cfb | ||
|
e690ddd50a | ||
|
45b7c1948c | ||
|
e2fb7ca3d2 | ||
|
b3bda76582 | ||
|
ab6d322eca | ||
|
1a8a0df107 | ||
|
0a37709b0a | ||
|
98cd1417df | ||
|
460069ceaa | ||
|
316aa44f23 | ||
|
7133540c23 | ||
|
590231741d | ||
|
a9349b8f3d |
@@ -3,13 +3,7 @@ language: python
|
||||
sudo: false
|
||||
|
||||
python:
|
||||
- '3.5'
|
||||
- '2.7.12'
|
||||
- '2.6'
|
||||
- '3.3'
|
||||
- '3.4'
|
||||
- '3.2'
|
||||
- '3.6'
|
||||
- '2.7' # this is a cheat here because travis doesn't do 2.4-2.6
|
||||
|
||||
install:
|
||||
- pip install -e .
|
||||
|
2
Makefile
2
Makefile
@@ -39,7 +39,7 @@ check-3.0 check-3.1 check-3.2 check-3.5 check-3.6:
|
||||
check-3.7: pytest
|
||||
|
||||
#:Tests for Python 2.6 (doesn't have pytest)
|
||||
check-2.6:
|
||||
check-2.4 check-2.5 check-2.6:
|
||||
$(MAKE) -C test $@
|
||||
|
||||
#:PyPy 2.6.1 PyPy 5.0.1, or PyPy 5.8.0-beta0
|
||||
|
2
NEWS
2
NEWS
@@ -6,7 +6,7 @@ uncompyle6 2.14.3 2017-01-19
|
||||
- Fix some 2.7 and 2.6 parser bugs
|
||||
- Fix whilelse parsing bugs
|
||||
- Correct 2.5- decorator parsing
|
||||
- grammar for decorators matchies AST more a little better
|
||||
- grammar for decorators matches AST a little more
|
||||
- better tests in setup.py for running the right version of Python
|
||||
- Fix 2.6- parsing of "for .. try/else" ... with "continue" inside
|
||||
|
||||
|
14
README.rst
14
README.rst
@@ -170,11 +170,15 @@ interpreter for versions 1.5, 1.6, and 2.0.
|
||||
In the Python 3 series, Python support is is strongest around 3.4 or
|
||||
3.3 and drops off as you move further away from those versions. Python
|
||||
3.6 changes things drastically by using word codes rather than byte
|
||||
codes. That has been addressed, but then it also changes function call
|
||||
opcodes and its semantics and has more problems with control flow than
|
||||
3.5 has. Between Python 3.5, 3.6 and 3.7 there have been major changes
|
||||
to the `MAKE_FUNCTION` and `CALL_FUNCTION` instructions. Those are
|
||||
not handled yet.
|
||||
codes. As a result, the jump offset field in a jump instruction
|
||||
argument has been reduced. This makes the `EXTENDED_ARG` instructions
|
||||
are now more prevalent in jump instruction; previously they had been
|
||||
rare. Perhaps to compensate for the additional `EXTENDED_ARG`
|
||||
instructions, additional jump optimization has been added. So in sum
|
||||
handling control flow by ad hoc means as is currently done is worse.
|
||||
|
||||
Also, between Python 3.5, 3.6 and 3.7 there have been major changes to the
|
||||
`MAKE_FUNCTION` and `CALL_FUNCTION` instructions.
|
||||
|
||||
Currently not all Python magic numbers are supported. Specifically in
|
||||
some versions of Python, notably Python 3.6, the magic number has
|
||||
|
@@ -41,7 +41,7 @@ entry_points = {
|
||||
]}
|
||||
ftp_url = None
|
||||
install_requires = ['spark-parser >= 1.8.5, < 1.9.0',
|
||||
'xdis >= 3.6.6, < 3.7.0', 'six']
|
||||
'xdis >= 3.6.6, < 3.7.0']
|
||||
license = 'MIT'
|
||||
mailing_list = 'python-debugger@googlegroups.com'
|
||||
modname = 'uncompyle6'
|
||||
|
0
admin-tools/check-newer-versions.sh
Executable file → Normal file
0
admin-tools/check-newer-versions.sh
Executable file → Normal file
0
admin-tools/check-older-versions.sh
Executable file → Normal file
0
admin-tools/check-older-versions.sh
Executable file → Normal file
46
admin-tools/how-to-make-a-release.txt
Normal file
46
admin-tools/how-to-make-a-release.txt
Normal file
@@ -0,0 +1,46 @@
|
||||
git pull
|
||||
|
||||
Change version in uncompyle6/version.py
|
||||
source uncompyle6/version.py
|
||||
echo $VERSION
|
||||
git commit -m"Get ready for release $VERSION" .
|
||||
|
||||
Update ChangeLog:
|
||||
make ChangeLog
|
||||
|
||||
Update NEWS from ChangeLog
|
||||
make check
|
||||
|
||||
git commit --amend .
|
||||
|
||||
git push
|
||||
|
||||
Make sure pyenv is running
|
||||
# Pyenv
|
||||
|
||||
source admin-tools/check-newer-versions.sh
|
||||
|
||||
|
||||
# Switch to python-2.4 and build that first...
|
||||
source admin-tools/setup-python-2.4
|
||||
|
||||
rm ChangeLog
|
||||
git merge master
|
||||
|
||||
Update NEWS from master branch
|
||||
|
||||
git commit -m"Get ready for release $VERSION" .
|
||||
|
||||
source admin-tools/check-older-versions.sh
|
||||
source admin-tools/check-newer-versions.sh
|
||||
|
||||
make-dist-older.sh
|
||||
|
||||
git tag release-python-2.4-$VERSION
|
||||
|
||||
./make-dist-newer.sh
|
||||
|
||||
git tag release-$VERSION
|
||||
|
||||
|
||||
twine upload dist/uncompyle6-${VERSION}*
|
0
admin-tools/setup-master.sh
Executable file → Normal file
0
admin-tools/setup-master.sh
Executable file → Normal file
0
admin-tools/setup-python-2.4.sh
Executable file → Normal file
0
admin-tools/setup-python-2.4.sh
Executable file → Normal file
3
admin-tools/update-sources.sh
Executable file
3
admin-tools/update-sources.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
cd $(dirname ${BASH_SOURCE[0]})/..
|
||||
git pull
|
@@ -10,4 +10,4 @@ dependencies:
|
||||
- pip install pytest==3.2.5 hypothesis
|
||||
test:
|
||||
override:
|
||||
- python ./setup.py develop && make check-2.7
|
||||
- python ./setup.py develop && make check-2.6
|
||||
|
@@ -29,7 +29,7 @@ def list_comp():
|
||||
[y for y in range(3)]
|
||||
|
||||
def get_parsed_for_fn(fn):
|
||||
code = fn.__code__ if PYTHON3 else fn.func_code
|
||||
code = fn.func_code
|
||||
return deparse(PYTHON_VERSION, code)
|
||||
|
||||
def check_expect(expect, parsed):
|
||||
|
@@ -11,20 +11,14 @@ src_dir = get_srcdir()
|
||||
os.chdir(src_dir)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("test_tuple", "function_to_test"), [
|
||||
(
|
||||
('../test/bytecode_2.7/05_if.pyc', 'testdata/if-2.7.right',),
|
||||
disassemble_file
|
||||
),
|
||||
(
|
||||
('../test/bytecode_2.7/05_ifelse.pyc', 'testdata/ifelse-2.7.right',),
|
||||
disassemble_file
|
||||
),
|
||||
@pytest.mark.parametrize(("test_tuple"), [
|
||||
('../test/bytecode_2.7/05_if.pyc', 'testdata/if-2.7.right',),
|
||||
('../test/bytecode_2.7/05_ifelse.pyc', 'testdata/ifelse-2.7.right',),
|
||||
])
|
||||
def test_funcoutput(capfd, test_tuple, function_to_test):
|
||||
def test_funcoutput(capfd, test_tuple):
|
||||
|
||||
in_file , filename_expected = test_tuple
|
||||
function_to_test(in_file, native=False)
|
||||
in_file, filename_expected = test_tuple
|
||||
disassemble_file(in_file)
|
||||
resout, reserr = capfd.readouterr()
|
||||
expected = open(filename_expected, "r").read()
|
||||
if resout != expected:
|
@@ -10,7 +10,7 @@ else:
|
||||
maxint = sys.maxint
|
||||
from uncompyle6.semantics.helper import print_docstring
|
||||
|
||||
class PrintFake():
|
||||
class PrintFake:
|
||||
def __init__(self):
|
||||
self.pending_newlines = 0
|
||||
self.f = StringIO()
|
||||
|
@@ -22,9 +22,8 @@ def bug_loop(disassemble, tb=None):
|
||||
disassemble(tb)
|
||||
|
||||
def test_if_in_for():
|
||||
code = bug.__code__
|
||||
code = bug.func_code
|
||||
scan = get_scanner(PYTHON_VERSION)
|
||||
print(PYTHON_VERSION)
|
||||
if 2.7 <= PYTHON_VERSION <= 3.0 and not IS_PYPY:
|
||||
n = scan.setup_code(code)
|
||||
bytecode = Bytecode(code, scan.opc)
|
||||
|
@@ -1,150 +0,0 @@
|
||||
# std
|
||||
import os
|
||||
# test
|
||||
import pytest
|
||||
import hypothesis
|
||||
from hypothesis import strategies as st
|
||||
# uncompyle6
|
||||
from uncompyle6 import PYTHON_VERSION, deparse_code
|
||||
|
||||
|
||||
@st.composite
|
||||
def expressions(draw):
|
||||
# todo : would be nice to generate expressions using hypothesis however
|
||||
# this is pretty involved so for now just use a corpus of expressions
|
||||
# from which to select.
|
||||
return draw(st.sampled_from((
|
||||
'abc',
|
||||
'len(items)',
|
||||
'x + 1',
|
||||
'lineno',
|
||||
'container',
|
||||
'self.attribute',
|
||||
'self.method()',
|
||||
# These expressions are failing, I think these are control
|
||||
# flow problems rather than problems with FORMAT_VALUE,
|
||||
# however I need to confirm this...
|
||||
#'sorted(items, key=lambda x: x.name)',
|
||||
#'func(*args, **kwargs)',
|
||||
#'text or default',
|
||||
#'43 if life_the_universe and everything else None'
|
||||
)))
|
||||
|
||||
|
||||
@st.composite
|
||||
def format_specifiers(draw):
|
||||
"""
|
||||
Generate a valid format specifier using the rules:
|
||||
|
||||
format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]
|
||||
fill ::= <any character>
|
||||
align ::= "<" | ">" | "=" | "^"
|
||||
sign ::= "+" | "-" | " "
|
||||
width ::= integer
|
||||
precision ::= integer
|
||||
type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
|
||||
|
||||
See https://docs.python.org/2/library/string.html
|
||||
|
||||
:param draw: Let hypothesis draw from other strategies.
|
||||
|
||||
:return: An example format_specifier.
|
||||
"""
|
||||
alphabet_strategy = st.characters(min_codepoint=ord('a'), max_codepoint=ord('z'))
|
||||
fill = draw(st.one_of(alphabet_strategy, st.none()))
|
||||
align = draw(st.sampled_from(list('<>=^')))
|
||||
fill_align = (fill + align or '') if fill else ''
|
||||
|
||||
type_ = draw(st.sampled_from('bcdeEfFgGnosxX%'))
|
||||
can_have_sign = type_ in 'deEfFgGnoxX%'
|
||||
can_have_comma = type_ in 'deEfFgG%'
|
||||
can_have_precision = type_ in 'fFgG'
|
||||
can_have_pound = type_ in 'boxX%'
|
||||
can_have_zero = type_ in 'oxX'
|
||||
|
||||
sign = draw(st.sampled_from(list('+- ') + [''])) if can_have_sign else ''
|
||||
pound = draw(st.sampled_from(('#', '',))) if can_have_pound else ''
|
||||
zero = draw(st.sampled_from(('0', '',))) if can_have_zero else ''
|
||||
|
||||
int_strategy = st.integers(min_value=1, max_value=1000)
|
||||
|
||||
width = draw(st.one_of(int_strategy, st.none()))
|
||||
width = str(width) if width is not None else ''
|
||||
|
||||
comma = draw(st.sampled_from((',', '',))) if can_have_comma else ''
|
||||
if can_have_precision:
|
||||
precision = draw(st.one_of(int_strategy, st.none()))
|
||||
precision = '.' + str(precision) if precision else ''
|
||||
else:
|
||||
precision = ''
|
||||
|
||||
return ''.join((fill_align, sign, pound, zero, width, comma, precision, type_,))
|
||||
|
||||
|
||||
@st.composite
|
||||
def fstrings(draw):
|
||||
"""
|
||||
Generate a valid f-string.
|
||||
See https://www.python.org/dev/peps/pep-0498/#specification
|
||||
|
||||
:param draw: Let hypothsis draw from other strategies.
|
||||
|
||||
:return: A valid f-string.
|
||||
"""
|
||||
character_strategy = st.characters(
|
||||
blacklist_characters='\r\n\'\\s{}',
|
||||
min_codepoint=1,
|
||||
max_codepoint=1000,
|
||||
)
|
||||
is_raw = draw(st.booleans())
|
||||
integer_strategy = st.integers(min_value=0, max_value=3)
|
||||
expression_count = draw(integer_strategy)
|
||||
content = []
|
||||
for _ in range(expression_count):
|
||||
expression = draw(expressions())
|
||||
conversion = draw(st.sampled_from(('', '!s', '!r', '!a',)))
|
||||
has_specifier = draw(st.booleans())
|
||||
specifier = ':' + draw(format_specifiers()) if has_specifier else ''
|
||||
content.append('{{{}{}}}'.format(expression, conversion, specifier))
|
||||
content.append(draw(st.text(character_strategy)))
|
||||
content = ''.join(content)
|
||||
return "f{}'{}'".format('r' if is_raw else '', content)
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@hypothesis.given(format_specifiers())
|
||||
def test_format_specifiers(format_specifier):
|
||||
"""Verify that format_specifiers generates valid specifiers"""
|
||||
try:
|
||||
exec('"{:' + format_specifier + '}".format(0)')
|
||||
except ValueError as e:
|
||||
if 'Unknown format code' not in str(e):
|
||||
raise
|
||||
|
||||
|
||||
def run_test(text):
|
||||
hypothesis.assume(len(text))
|
||||
hypothesis.assume("f'{" in text)
|
||||
expr = text + '\n'
|
||||
code = compile(expr, '<string>', 'single')
|
||||
deparsed = deparse_code(PYTHON_VERSION, code, compile_mode='single')
|
||||
recompiled = compile(deparsed.text, '<string>', 'single')
|
||||
if recompiled != code:
|
||||
assert 'dis(' + deparsed.text.strip('\n') + ')' == 'dis(' + expr.strip('\n') + ')'
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@hypothesis.given(fstrings())
|
||||
def test_uncompyle_fstring(fstring):
|
||||
"""Verify uncompyling fstring bytecode"""
|
||||
run_test(fstring)
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@pytest.mark.parametrize('fstring', [
|
||||
"f'{abc}{abc!s}'",
|
||||
"f'{abc}0'",
|
||||
])
|
||||
def test_uncompyle_direct(fstring):
|
||||
"""useful for debugging"""
|
||||
run_test(fstring)
|
@@ -1,175 +0,0 @@
|
||||
# std
|
||||
import string
|
||||
# 3rd party
|
||||
from hypothesis import given, assume, example, settings, strategies as st
|
||||
import pytest
|
||||
# uncompyle
|
||||
from validate import validate_uncompyle
|
||||
from test_fstring import expressions
|
||||
|
||||
|
||||
alpha = st.sampled_from(string.ascii_lowercase)
|
||||
numbers = st.sampled_from(string.digits)
|
||||
alphanum = st.sampled_from(string.ascii_lowercase + string.digits)
|
||||
|
||||
|
||||
@st.composite
|
||||
def function_calls(draw,
|
||||
min_keyword_args=0, max_keyword_args=5,
|
||||
min_positional_args=0, max_positional_args=5,
|
||||
min_star_args=0, max_star_args=1,
|
||||
min_double_star_args=0, max_double_star_args=1):
|
||||
"""
|
||||
Strategy factory for generating function calls.
|
||||
|
||||
:param draw: Callable which draws examples from other strategies.
|
||||
|
||||
:return: The function call text.
|
||||
"""
|
||||
st_positional_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_positional_args,
|
||||
max_size=max_positional_args
|
||||
)
|
||||
st_keyword_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_keyword_args,
|
||||
max_size=max_keyword_args
|
||||
)
|
||||
st_star_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_star_args,
|
||||
max_size=max_star_args
|
||||
)
|
||||
st_double_star_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_double_star_args,
|
||||
max_size=max_double_star_args
|
||||
)
|
||||
|
||||
positional_args = draw(st_positional_args)
|
||||
keyword_args = draw(st_keyword_args)
|
||||
st_values = st.lists(
|
||||
expressions(),
|
||||
min_size=len(keyword_args),
|
||||
max_size=len(keyword_args)
|
||||
)
|
||||
keyword_args = [
|
||||
x + '=' + e
|
||||
for x, e in
|
||||
zip(keyword_args, draw(st_values))
|
||||
]
|
||||
star_args = ['*' + x for x in draw(st_star_args)]
|
||||
double_star_args = ['**' + x for x in draw(st_double_star_args)]
|
||||
|
||||
arguments = positional_args + keyword_args + star_args + double_star_args
|
||||
draw(st.randoms()).shuffle(arguments)
|
||||
arguments = ','.join(arguments)
|
||||
|
||||
function_call = 'fn({arguments})'.format(arguments=arguments)
|
||||
try:
|
||||
# TODO: Figure out the exact rules for ordering of positional, keyword,
|
||||
# star args, double star args and in which versions the various
|
||||
# types of arguments are supported so we don't need to check that the
|
||||
# expression compiles like this.
|
||||
compile(function_call, '<string>', 'single')
|
||||
except:
|
||||
assume(False)
|
||||
return function_call
|
||||
|
||||
|
||||
def test_function_no_args():
|
||||
validate_uncompyle("fn()")
|
||||
|
||||
|
||||
def isolated_function_calls(which):
|
||||
"""
|
||||
Returns a strategy for generating function calls, but isolated to
|
||||
particular types of arguments, for example only positional arguments.
|
||||
|
||||
This can help reason about debugging errors in specific types of function
|
||||
calls.
|
||||
|
||||
:param which: One of 'keyword', 'positional', 'star', 'double_star'
|
||||
|
||||
:return: Strategy for generating an function call isolated to specific
|
||||
argument types.
|
||||
"""
|
||||
kwargs = dict(
|
||||
max_keyword_args=0,
|
||||
max_positional_args=0,
|
||||
max_star_args=0,
|
||||
max_double_star_args=0,
|
||||
)
|
||||
kwargs['_'.join(('min', which, 'args'))] = 1
|
||||
kwargs['_'.join(('max', which, 'args'))] = 5 if 'star' not in which else 1
|
||||
return function_calls(**kwargs)
|
||||
|
||||
|
||||
with settings(max_examples=25):
|
||||
|
||||
@given(isolated_function_calls('positional'))
|
||||
@example("fn(0)")
|
||||
def test_function_positional_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@given(isolated_function_calls('keyword'))
|
||||
@example("fn(a=0)")
|
||||
def test_function_call_keyword_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@given(isolated_function_calls('star'))
|
||||
@example("fn(*items)")
|
||||
def test_function_call_star_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@given(isolated_function_calls('double_star'))
|
||||
@example("fn(**{})")
|
||||
def test_function_call_double_star_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_CONST_KEY_MAP_BUILD_MAP_UNPACK_WITH_CALL_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(w=0,m=0,**v)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_BUILD_MAP_UNPACK_WITH_CALL_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(a=0,**g)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*g,**j)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*z,u=0)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(**a)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_BUILD_TUPLE_BUILD_TUPLE_UNPACK_WITH_CALL_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(b,b,b=0,*a)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_TUPLE_BUILD_TUPLE_UNPACK_WITH_CALL_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*c,v)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_CONST_KEY_MAP_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(i=0,y=0,*p)")
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='skipping property based test until all individual tests are passing')
|
||||
@given(function_calls())
|
||||
def test_function_call(function_call):
|
||||
validate_uncompyle(function_call)
|
@@ -66,9 +66,9 @@ def test_grammar():
|
||||
expect_dup_rhs = frozenset([('COME_FROM',), ('CONTINUE',), ('JUMP_ABSOLUTE',),
|
||||
('LOAD_CONST',),
|
||||
('JUMP_BACK',), ('JUMP_FORWARD',)])
|
||||
reduced_dup_rhs = {k: dup_rhs[k] for k in dup_rhs if k not in expect_dup_rhs}
|
||||
for k in reduced_dup_rhs:
|
||||
print(k, reduced_dup_rhs[k])
|
||||
# reduced_dup_rhs = {k: dup_rhs[k] for k in dup_rhs if k not in expect_dup_rhs}
|
||||
# for k in reduced_dup_rhs:
|
||||
# print(k, reduced_dup_rhs[k])
|
||||
# assert not reduced_dup_rhs, reduced_dup_rhs
|
||||
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
|
@@ -8,12 +8,8 @@ from uncompyle6.semantics.consts import (
|
||||
|
||||
if PYTHON3:
|
||||
from io import StringIO
|
||||
def iteritems(d):
|
||||
return d.items()
|
||||
else:
|
||||
from StringIO import StringIO
|
||||
def iteritems(d):
|
||||
return d.iteritems()
|
||||
|
||||
from uncompyle6.semantics.pysource import SourceWalker as SourceWalker
|
||||
|
||||
@@ -30,7 +26,7 @@ def test_template_engine():
|
||||
# FIXME: and so on...
|
||||
|
||||
from uncompyle6.semantics.consts import (
|
||||
TABLE_DIRECT, TABLE_R,
|
||||
TABLE_R, TABLE_DIRECT,
|
||||
)
|
||||
|
||||
from uncompyle6.semantics.fragments import (
|
||||
@@ -44,7 +40,7 @@ def test_tables():
|
||||
(TABLE_DIRECT, 'TABLE_DIRECT', False),
|
||||
(TABLE_R, 'TABLE_R', False),
|
||||
(TABLE_DIRECT_FRAGMENT, 'TABLE_DIRECT_FRAGMENT', True)):
|
||||
for k, entry in iteritems(t):
|
||||
for k, entry in t.iteritems():
|
||||
if k in skip_for_now:
|
||||
continue
|
||||
fmt = entry[0]
|
||||
|
@@ -1,19 +1,19 @@
|
||||
import pytest
|
||||
from uncompyle6 import PYTHON_VERSION, PYTHON3, deparse_code
|
||||
from uncompyle6 import PYTHON_VERSION, deparse_code
|
||||
|
||||
def test_single_mode():
|
||||
single_expressions = (
|
||||
'i = 1',
|
||||
'i and (j or k)',
|
||||
'i += 1',
|
||||
'i = j % 4',
|
||||
'i = {}',
|
||||
'i = []',
|
||||
'for i in range(10):\n i\n',
|
||||
'for i in range(10):\n for j in range(10):\n i + j\n',
|
||||
'try:\n i\nexcept Exception:\n j\nelse:\n k\n'
|
||||
)
|
||||
if PYTHON_VERSION >= 2.5:
|
||||
def test_single_mode():
|
||||
single_expressions = (
|
||||
'i = 1',
|
||||
'i and (j or k)',
|
||||
'i += 1',
|
||||
'i = j % 4',
|
||||
'i = {}',
|
||||
'i = []',
|
||||
'for i in range(10):\n i\n',
|
||||
'for i in range(10):\n for j in range(10):\n i + j\n',
|
||||
'try:\n i\nexcept Exception:\n j\nelse:\n k\n'
|
||||
)
|
||||
|
||||
for expr in single_expressions:
|
||||
code = compile(expr + '\n', '<string>', 'single')
|
||||
assert deparse_code(PYTHON_VERSION, code, compile_mode='single').text == expr + '\n'
|
||||
for expr in single_expressions:
|
||||
code = compile(expr + '\n', '<string>', 'single')
|
||||
assert deparse_code(PYTHON_VERSION, code, compile_mode='single').text == expr + '\n'
|
||||
|
2
pytest/testdata/if-2.7.right
vendored
2
pytest/testdata/if-2.7.right
vendored
@@ -9,4 +9,4 @@
|
||||
12 JUMP_FORWARD 0 'to 15'
|
||||
15_0 COME_FROM 12 '12'
|
||||
15 LOAD_CONST 0 ''
|
||||
18 RETURN_VALUE
|
||||
18 RETURN_VALUE
|
||||
|
2
pytest/testdata/ifelse-2.7.right
vendored
2
pytest/testdata/ifelse-2.7.right
vendored
@@ -12,4 +12,4 @@
|
||||
18 STORE_NAME 2 'd'
|
||||
21_0 COME_FROM 12 '12'
|
||||
21 LOAD_CONST 2 ''
|
||||
24 RETURN_VALUE
|
||||
24 RETURN_VALUE
|
||||
|
@@ -1,24 +1,25 @@
|
||||
# future
|
||||
from __future__ import print_function
|
||||
# std
|
||||
import os
|
||||
import difflib
|
||||
import subprocess
|
||||
import tempfile
|
||||
import functools
|
||||
# compatability
|
||||
import six
|
||||
|
||||
from StringIO import StringIO
|
||||
# uncompyle6 / xdis
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY, deparse_code
|
||||
# TODO : I think we can get xdis to support the dis api (python 3 version) by doing something like this there
|
||||
from xdis.bytecode import Bytecode
|
||||
from xdis.main import get_opcode
|
||||
opc = get_opcode(PYTHON_VERSION, IS_PYPY)
|
||||
Bytecode = functools.partial(Bytecode, opc=opc)
|
||||
|
||||
try:
|
||||
import functools
|
||||
Bytecode = functools.partial(Bytecode, opc=opc)
|
||||
def _dis_to_text(co):
|
||||
return Bytecode(co).dis()
|
||||
except:
|
||||
pass
|
||||
|
||||
def _dis_to_text(co):
|
||||
return Bytecode(co).dis()
|
||||
|
||||
|
||||
def print_diff(original, uncompyled):
|
||||
@@ -42,8 +43,11 @@ def print_diff(original, uncompyled):
|
||||
print('\nTo display diff highlighting run:\n pip install BeautifulSoup4')
|
||||
diff = difflib.HtmlDiff().make_table(*args)
|
||||
|
||||
with tempfile.NamedTemporaryFile(delete=False) as f:
|
||||
f = tempfile.NamedTemporaryFile(delete=False)
|
||||
try:
|
||||
f.write(str(diff).encode('utf-8'))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
try:
|
||||
print()
|
||||
@@ -60,8 +64,7 @@ def print_diff(original, uncompyled):
|
||||
print('\nFor side by side diff install elinks')
|
||||
diff = difflib.Differ().compare(original_lines, uncompyled_lines)
|
||||
print('\n'.join(diff))
|
||||
finally:
|
||||
os.unlink(f.name)
|
||||
os.unlink(f.name)
|
||||
|
||||
|
||||
def are_instructions_equal(i1, i2):
|
||||
@@ -123,8 +126,9 @@ def validate_uncompyle(text, mode='exec'):
|
||||
original_text = text
|
||||
|
||||
deparsed = deparse_code(PYTHON_VERSION, original_code,
|
||||
|
||||
compile_mode=mode,
|
||||
out=six.StringIO(),
|
||||
out=StringIO(),
|
||||
is_pypy=IS_PYPY)
|
||||
uncompyled_text = deparsed.text
|
||||
uncompyled_code = compile(uncompyled_text, '<string>', 'exec')
|
||||
|
@@ -1,3 +1,3 @@
|
||||
pytest>=3.0.0
|
||||
flake8
|
||||
hypothesis
|
||||
hypothesis<=3.8.3
|
||||
|
16
setup.py
16
setup.py
@@ -1,16 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
|
||||
"""Setup script for the 'uncompyle6' distribution."""
|
||||
|
||||
import sys
|
||||
|
||||
SYS_VERSION = sys.version_info[0:2]
|
||||
if not ((2, 6) <= SYS_VERSION <= (3, 7)) or ((3, 0) <= SYS_VERSION <= (3, 1)):
|
||||
mess = "Python Release 2.6 .. 3.7 excluding 3.0 and 3.1 are supported in this code branch."
|
||||
if ((2, 4) <= SYS_VERSION <= (2, 7)):
|
||||
mess += ("\nFor your Python, version %s, use the python-2.4 code/branch." %
|
||||
if not ((2, 4) <= SYS_VERSION <= (2, 7)):
|
||||
mess = "Python Release 2.4 .. 2.7 are supported in this code branch."
|
||||
if ((3, 2) <= SYS_VERSION <= (3, 7)):
|
||||
mess += ("\nFor your Python, version %s, use the master code/branch." %
|
||||
sys.version[0:3])
|
||||
elif SYS_VERSION < (2, 4) or ((3, 0) <= SYS_VERSION <= (3, 1)):
|
||||
mess += ("\nThis package is not supported for Python version %s."
|
||||
else:
|
||||
mess += ("\nThis package is not supported before Python 2.4. Your Python version is %s."
|
||||
% sys.version[0:3])
|
||||
print(mess)
|
||||
raise Exception(mess)
|
||||
|
55
test-unit/test_grammar.py
Normal file
55
test-unit/test_grammar.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import re
|
||||
import unittest
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY # , PYTHON_VERSION
|
||||
from uncompyle6.parser import get_python_parser, python_parser
|
||||
|
||||
class TestGrammar(unittest.TestCase):
|
||||
def test_grammar(self):
|
||||
|
||||
def check_tokens(tokens, opcode_set):
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
remain_tokens = set([re.sub('_\d+$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
self.assertEqual(remain_tokens, set([]),
|
||||
"Remaining tokens %s\n====\n%s" % (remain_tokens, p.dump_grammar()))
|
||||
|
||||
p = get_python_parser(PYTHON_VERSION, is_pypy=IS_PYPY)
|
||||
(lhs, rhs, tokens,
|
||||
right_recursive, dup_rhs) = p.check_sets()
|
||||
expect_lhs = set(['expr1024', 'pos_arg'])
|
||||
unused_rhs = set(['list', 'call', 'mkfunc',
|
||||
'mklambda',
|
||||
'unpack',])
|
||||
|
||||
expect_right_recursive = frozenset([('designList',
|
||||
('store', 'DUP_TOP', 'designList'))])
|
||||
expect_lhs.add('kwarg')
|
||||
|
||||
self.assertEqual(expect_lhs, set(lhs))
|
||||
self.assertEqual(unused_rhs, set(rhs))
|
||||
self.assertEqual(expect_right_recursive, right_recursive)
|
||||
|
||||
expect_dup_rhs = frozenset([('COME_FROM',), ('CONTINUE',), ('JUMP_ABSOLUTE',),
|
||||
('LOAD_CONST',),
|
||||
('JUMP_BACK',), ('JUMP_FORWARD',)])
|
||||
|
||||
reduced_dup_rhs = {}
|
||||
for k in dup_rhs:
|
||||
if k not in expect_dup_rhs:
|
||||
reduced_dup_rhs[k] = dup_rhs[k]
|
||||
pass
|
||||
pass
|
||||
for k in reduced_dup_rhs:
|
||||
print(k, reduced_dup_rhs[k])
|
||||
# assert not reduced_dup_rhs, reduced_dup_rhs
|
||||
|
||||
def test_dup_rule(self):
|
||||
import inspect
|
||||
python_parser(PYTHON_VERSION, inspect.currentframe().f_code,
|
||||
is_pypy=IS_PYPY,
|
||||
parser_debug={
|
||||
'dups': True, 'transition': False, 'reduce': False,
|
||||
'rules': False, 'errorstack': None, 'context': True})
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@@ -28,7 +28,7 @@ check:
|
||||
$(MAKE) check-$(PYTHON_VERSION)
|
||||
|
||||
#: Run working tests from Python 2.6 or 2.7
|
||||
check-2.6 check-2.7: check-bytecode-2 check-bytecode-3 check-bytecode-1 check-native-short
|
||||
check-2.4 check-2.5 check-2.6 check-2.7: check-bytecode-2 check-bytecode-3 check-bytecode-1 check-native-short
|
||||
|
||||
#: Run working tests from Python 3.0
|
||||
check-3.0: check-bytecode
|
||||
@@ -86,7 +86,7 @@ check-bytecode: check-bytecode-3
|
||||
$(PYTHON) test_pythonlib.py \
|
||||
--bytecode-2.1 --bytecode-2.2 --bytecode-2.3 --bytecode-2.4 \
|
||||
--bytecode-2.5 --bytecode-2.6 --bytecode-2.7 \
|
||||
--bytecode-pypy2.7 --bytecode-1
|
||||
--bytecode-pypy2.7
|
||||
|
||||
|
||||
#: Check deparsing bytecode 1.5 only
|
||||
@@ -113,6 +113,12 @@ check-bytecode-2.4:
|
||||
check-bytecode-2.5:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.5
|
||||
|
||||
#: Get grammar coverage for Python 2.4
|
||||
grammar-coverage-2.4:
|
||||
-rm $(COVER_DIR)/spark-grammar-24.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-24.cover $(PYTHON) test_pythonlib.py --bytecode-2.4
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-24.cover $(PYTHON) test_pyenvlib.py --2.4.6
|
||||
|
||||
#: Get grammar coverage for Python 2.5
|
||||
grammar-coverage-2.5:
|
||||
-rm $(COVER_DIR)/spark-grammar-25.cover
|
||||
@@ -171,10 +177,12 @@ grammar-coverage-3.5:
|
||||
#: Check deparsing Python 2.6
|
||||
check-bytecode-2.6:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.6 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.6-run --verify-run
|
||||
|
||||
#: Check deparsing Python 2.7
|
||||
check-bytecode-2.7:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.7 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.7-run --verify-run
|
||||
|
||||
#: Check deparsing Python 3.0
|
||||
check-bytecode-3.0:
|
||||
@@ -191,22 +199,31 @@ check-bytecode-3.2:
|
||||
#: Check deparsing Python 3.3
|
||||
check-bytecode-3.3:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3-run --verify-run
|
||||
|
||||
#: Check deparsing Python 3.4
|
||||
check-bytecode-3.4:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4-run --verify-run
|
||||
|
||||
#: Check deparsing Python 3.5
|
||||
check-bytecode-3.5:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.5 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.5-run --verify-run
|
||||
|
||||
#: Check deparsing Python 3.6
|
||||
check-bytecode-3.6:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6-run --verify-run
|
||||
|
||||
#: short tests for bytecodes only for this version of Python
|
||||
check-native-short:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION) --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION)-run --verify-run $(COMPILE)
|
||||
|
||||
#: Run longer Python 2.6's lib files known to be okay
|
||||
check-2.4-ok:
|
||||
$(PYTHON) test_pythonlib.py --ok-2.4 --verify $(COMPILE)
|
||||
|
||||
#: Run longer Python 2.6's lib files known to be okay
|
||||
check-2.6-ok:
|
||||
|
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.2/10_del.pyc
Normal file
BIN
test/bytecode_2.2/10_del.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/01_ops.pyc
Normal file
BIN
test/bytecode_2.4/01_ops.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/01_triple_compare.pyc
Normal file
BIN
test/bytecode_2.4/01_triple_compare.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/02_except_as.pyc
Normal file
BIN
test/bytecode_2.4/02_except_as.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/02_unary_convert.pyc
Normal file
BIN
test/bytecode_2.4/02_unary_convert.pyc
Normal file
Binary file not shown.
Binary file not shown.
5
test/bytecode_2.4_run/README
Normal file
5
test/bytecode_2.4_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 2.4
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 2.4 interpreter, they will give an error if they
|
||||
are miscompiled.
|
Binary file not shown.
BIN
test/bytecode_2.5/01_inplace_true_divide.pyc
Normal file
BIN
test/bytecode_2.5/01_inplace_true_divide.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.5/01_ops.pyc
Normal file
BIN
test/bytecode_2.5/01_ops.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.5/02_true_divide.pyc
Normal file
BIN
test/bytecode_2.5/02_true_divide.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.5/06_setif_comprehension.pyc
Normal file
BIN
test/bytecode_2.5/06_setif_comprehension.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.5/10_del.pyc
Normal file
BIN
test/bytecode_2.5/10_del.pyc
Normal file
Binary file not shown.
5
test/bytecode_2.5_run/README
Normal file
5
test/bytecode_2.5_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 2.5.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 2.5 interpreter, they will give an error if they
|
||||
are miscompiled.
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
5
test/bytecode_2.6_run/README
Normal file
5
test/bytecode_2.6_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 2.6.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 2.6 interpreter, they will give an error if they
|
||||
are miscompiled.
|
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.7/02_ifelsetmtl.pyc
Normal file
BIN
test/bytecode_2.7/02_ifelsetmtl.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
5
test/bytecode_2.7_run/README
Normal file
5
test/bytecode_2.7_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 2.7.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 2.7 interpreter, they will give an error if they
|
||||
are miscompiled.
|
1
test/bytecode_3.0/README
Normal file
1
test/bytecode_3.0/README
Normal file
@@ -0,0 +1 @@
|
||||
These are byte-compiled programs compiled by Python 3.0
|
5
test/bytecode_3.0_run/README
Normal file
5
test/bytecode_3.0_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 3.0.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 3.0 interpreter, they will give an error if they
|
||||
are miscompiled.
|
Binary file not shown.
Binary file not shown.
1
test/bytecode_3.1/README
Normal file
1
test/bytecode_3.1/README
Normal file
@@ -0,0 +1 @@
|
||||
These are byte-compiled programs compiled by Python 3.1
|
5
test/bytecode_3.1_run/README
Normal file
5
test/bytecode_3.1_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 3.1.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 3.1 interpreter, they will give an error if they
|
||||
are miscompiled.
|
Binary file not shown.
5
test/bytecode_3.2_run/README
Normal file
5
test/bytecode_3.2_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 3.2.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 3.2 interpreter, they will give an error if they
|
||||
are miscompiled.
|
5
test/bytecode_3.3_run/README
Normal file
5
test/bytecode_3.3_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 3.3.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 3.3 interpreter, they will give an error if they
|
||||
are miscompiled.
|
5
test/bytecode_3.4_run/README
Normal file
5
test/bytecode_3.4_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 3.4.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 3.4 interpreter, they will give an error if they
|
||||
are miscompiled.
|
5
test/bytecode_3.5_run/README
Normal file
5
test/bytecode_3.5_run/README
Normal file
@@ -0,0 +1,5 @@
|
||||
These are byte-compiled programs compiled by Python 3.5.
|
||||
|
||||
Furthrmore the programs here are self-checking: when decompiled and
|
||||
then run again in a 3.5 interpreter, they will give an error if they
|
||||
are miscompiled.
|
BIN
test/bytecode_3.6/02_kwargs.pyc
Normal file
BIN
test/bytecode_3.6/02_kwargs.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6/10_long_pop_jump.pyc
Normal file
BIN
test/bytecode_3.6/10_long_pop_jump.pyc
Normal file
Binary file not shown.
@@ -1,9 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
# Mode: -*- python -*-
|
||||
#
|
||||
# Copyright (c) 2015 by Rocky Bernstein <rb@dustyfeet.com>
|
||||
# Copyright (c) 2015, 2017 by Rocky Bernstein <rb@dustyfeet.com>
|
||||
#
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import dis, os.path
|
||||
|
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6 import uncompyle
|
||||
import sys, inspect
|
||||
|
||||
|
11
test/simple_source/bug27+/02_ifelsetmtl.py
Normal file
11
test/simple_source/bug27+/02_ifelsetmtl.py
Normal file
@@ -0,0 +1,11 @@
|
||||
# Issue #148 on 2.7
|
||||
# Bug is in handling CONTINUE like JUMP_BACK
|
||||
# Similar code is probably found in a 2.7 stdlib. mapurl?
|
||||
def reduce_url(url):
|
||||
atoms = []
|
||||
for atom in url:
|
||||
if atom == '.':
|
||||
pass # JUMP_BACK is patched as CONTINUE here
|
||||
elif atom == '..':
|
||||
atoms.push()
|
||||
return atoms
|
5
test/simple_source/bug36/02_kwargs.py
Normal file
5
test/simple_source/bug36/02_kwargs.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# From 3.6 _markupbase _parse_doctype_subset()
|
||||
def bug(self, j):
|
||||
self.parse_comment(j, report=0)
|
||||
self.parse_comment(j, report=1, foo=2)
|
||||
self.parse_comment(a, b, report=3)
|
50
test/simple_source/bug36/10_long_pop_jump.py
Normal file
50
test/simple_source/bug36/10_long_pop_jump.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# From 3.6 _markupbase.py
|
||||
|
||||
# Bug is that the routine is long enough that POP_JUMP_IF_FALSE instruciton has an
|
||||
# EXTENDED_ARG intruction before it and we weren't picking out the jump offset properly
|
||||
|
||||
def parse_declaration(self, i):
|
||||
if rawdata[j:j] in ("-", ""):
|
||||
return -1
|
||||
n = len(rawdata)
|
||||
if rawdata[j:j+2] == '-':
|
||||
return self.parse_comment(i)
|
||||
elif rawdata[j] == '[':
|
||||
return self.parse_marked_section(i)
|
||||
else:
|
||||
decltype, j = self._scan_name(j, i)
|
||||
if j < 0:
|
||||
return j
|
||||
if decltype == "d":
|
||||
self._decl_otherchars = ''
|
||||
while j < n:
|
||||
c = rawdata[j]
|
||||
if c == ">":
|
||||
data = rawdata[i+2:j]
|
||||
if decltype == "d":
|
||||
self.handle_decl(data)
|
||||
else:
|
||||
self.unknown_decl(data)
|
||||
return j + 1
|
||||
if c in "'":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if not m:
|
||||
return -1
|
||||
j = m.end()
|
||||
elif c in "a":
|
||||
name, j = self._scan_name(j, i)
|
||||
elif c:
|
||||
j += 1
|
||||
elif c == "[":
|
||||
if decltype == "d":
|
||||
j = self._parse_doctype_subset(j + 1, i)
|
||||
elif decltype in {"attlist", "linktype", "link", "element"}:
|
||||
self.error("unsupported '[' char in %s declaration" % decltype)
|
||||
else:
|
||||
self.error("unexpected '[' char in declaration")
|
||||
else:
|
||||
self.error(
|
||||
"unexpected %r char in declaration" % rawdata[j])
|
||||
if j < 0:
|
||||
return j
|
||||
return -1
|
@@ -60,6 +60,7 @@ case $PYVERSION in
|
||||
;;
|
||||
2.7)
|
||||
SKIP_TESTS=(
|
||||
[test_curses.py]=1 # Possibly fails on its own but not detected
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
[test_doctest.py]=1
|
||||
[test_grammar.py]=1 # Too many stmts. Handle large stmts
|
||||
|
@@ -19,8 +19,7 @@ Step 2: Run the test:
|
||||
test_pyenvlib --mylib --verify # decompile verify 'mylib'
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6 import main, PYTHON3
|
||||
import os, time, re, shutil, sys
|
||||
from fnmatch import fnmatch
|
||||
|
||||
@@ -129,17 +128,19 @@ if __name__ == '__main__':
|
||||
test_options_keys = list(test_options.keys())
|
||||
test_options_keys.sort()
|
||||
opts, args = getopt.getopt(sys.argv[1:], '',
|
||||
['start-with=', 'verify', 'weak-verify',
|
||||
['start-with=', 'verify', 'verify-run', 'weak-verify',
|
||||
'max=', 'coverage', 'all', ] \
|
||||
+ test_options_keys )
|
||||
vers = ''
|
||||
|
||||
for opt, val in opts:
|
||||
if opt == '--verify':
|
||||
do_verify = True
|
||||
if opt == '--weak-verify':
|
||||
do_verify = 'strong'
|
||||
elif opt == '--weak-verify':
|
||||
do_verify = 'weak'
|
||||
if opt == '--coverage':
|
||||
elif opt == '--verify-run':
|
||||
do_verify = 'verify-run'
|
||||
elif opt == '--coverage':
|
||||
do_coverage = True
|
||||
elif opt == '--start-with':
|
||||
start_with = val
|
||||
|
@@ -27,8 +27,6 @@ Step 2: Run the test:
|
||||
test_pythonlib.py --mylib --verify # decompile verify 'mylib'
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import getopt, os, py_compile, sys, shutil, tempfile, time
|
||||
|
||||
from uncompyle6 import PYTHON_VERSION
|
||||
@@ -85,6 +83,9 @@ for vers in (1.5,
|
||||
bytecode = "bytecode_%s" % vers
|
||||
key = "bytecode-%s" % vers
|
||||
test_options[key] = (bytecode, PYC, bytecode, vers)
|
||||
bytecode = "bytecode_%s_run" % vers
|
||||
key = "bytecode-%s-run" % vers
|
||||
test_options[key] = (bytecode, PYC, bytecode, vers)
|
||||
key = "%s" % vers
|
||||
pythonlib = "python%s" % vers
|
||||
if isinstance(vers, float) and vers >= 3.0:
|
||||
@@ -127,8 +128,10 @@ def do_tests(src_dir, obj_patterns, target_dir, opts):
|
||||
if opts['do_compile']:
|
||||
compiled_version = opts['compiled_version']
|
||||
if compiled_version and PYTHON_VERSION != compiled_version:
|
||||
print("Not compiling: desired Python version is %s but we are running %s" %
|
||||
(compiled_version, PYTHON_VERSION), file=sys.stderr)
|
||||
sys.stderr.write("Not compiling: "
|
||||
"desired Python version is %s "
|
||||
"but we are running %s" %
|
||||
(compiled_version, PYTHON_VERSION))
|
||||
else:
|
||||
for root, dirs, basenames in os.walk(src_dir):
|
||||
file_matches(files, root, basenames, PY)
|
||||
@@ -146,8 +149,8 @@ def do_tests(src_dir, obj_patterns, target_dir, opts):
|
||||
file_matches(files, dirname, basenames, obj_patterns)
|
||||
|
||||
if not files:
|
||||
print("Didn't come up with any files to test! Try with --compile?",
|
||||
file=sys.stderr)
|
||||
sys.stderr.write("Didn't come up with any files to test! "
|
||||
"Try with --compile?")
|
||||
exit(1)
|
||||
|
||||
os.chdir(cwd)
|
||||
@@ -161,9 +164,9 @@ def do_tests(src_dir, obj_patterns, target_dir, opts):
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
print(time.ctime())
|
||||
print('Source directory: ', src_dir)
|
||||
print('Output directory: ', target_dir)
|
||||
print time.ctime()
|
||||
print 'Source directory: ', src_dir
|
||||
print 'Output directory: ', target_dir
|
||||
try:
|
||||
_, _, failed_files, failed_verify = \
|
||||
main(src_dir, target_dir, files, [],
|
||||
@@ -189,8 +192,9 @@ if __name__ == '__main__':
|
||||
test_options_keys = list(test_options.keys())
|
||||
test_options_keys.sort()
|
||||
opts, args = getopt.getopt(sys.argv[1:], '',
|
||||
['start-with=', 'verify', 'weak-verify', 'all', 'compile',
|
||||
'coverage',
|
||||
['start-with=', 'verify', 'verify-run',
|
||||
'weak-verify', 'all',
|
||||
'compile', 'coverage',
|
||||
'no-rm'] \
|
||||
+ test_options_keys )
|
||||
if not opts: help()
|
||||
@@ -205,9 +209,11 @@ if __name__ == '__main__':
|
||||
|
||||
for opt, val in opts:
|
||||
if opt == '--verify':
|
||||
test_opts['do_verify'] = True
|
||||
test_opts['do_verify'] = 'strong'
|
||||
elif opt == '--weak-verify':
|
||||
test_opts['do_verify'] = 'weak'
|
||||
elif opt == '--verify-run':
|
||||
test_opts['do_verify'] = 'verify-run'
|
||||
elif opt == '--compile':
|
||||
test_opts['do_compile'] = True
|
||||
elif opt == '--start-with':
|
||||
@@ -236,14 +242,13 @@ if __name__ == '__main__':
|
||||
if os.path.isdir(src_dir):
|
||||
checked_dirs.append([src_dir, pattern, target_dir])
|
||||
else:
|
||||
print("Can't find directory %s. Skipping" % src_dir,
|
||||
file=sys.stderr)
|
||||
sys.stderr.write("Can't find directory %s. Skipping" % src_dir)
|
||||
continue
|
||||
last_compile_version = compiled_version
|
||||
pass
|
||||
|
||||
if not checked_dirs:
|
||||
print("No directories found to check", file=sys.stderr)
|
||||
sys.stderr.write("No directories found to check\n")
|
||||
sys.exit(1)
|
||||
|
||||
test_opts['compiled_version'] = last_compile_version
|
||||
|
@@ -3,7 +3,6 @@
|
||||
#
|
||||
# Copyright (c) 2015-2016 by Rocky Bernstein <rb@dustyfeet.com>
|
||||
#
|
||||
from __future__ import print_function
|
||||
import sys, os, getopt
|
||||
|
||||
from uncompyle6.disas import disassemble_file
|
||||
@@ -26,7 +25,7 @@ Options:
|
||||
-V | --version show version and stop
|
||||
-h | --help show this message
|
||||
|
||||
""".format(program)
|
||||
""" % (program, program)
|
||||
|
||||
PATTERNS = ('*.pyc', '*.pyo')
|
||||
|
||||
@@ -37,15 +36,15 @@ Type -h for for full help.""" % program
|
||||
native = True
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
print("No file(s) given", file=sys.stderr)
|
||||
print(Usage_short, file=sys.stderr)
|
||||
sys.stderr.write("No file(s) given\n")
|
||||
sys.stderr.write(Usage_short)
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
opts, files = getopt.getopt(sys.argv[1:], 'hVU',
|
||||
['help', 'version', 'uncompyle6'])
|
||||
except getopt.GetoptError as e:
|
||||
print('%s: %s' % (os.path.basename(sys.argv[0]), e), file=sys.stderr)
|
||||
except getopt.GetoptError(e):
|
||||
sys.stderr.write('%s: %s' % (os.path.basename(sys.argv[0]), e))
|
||||
sys.exit(-1)
|
||||
|
||||
for opt, val in opts:
|
||||
@@ -59,15 +58,14 @@ Type -h for for full help.""" % program
|
||||
native = False
|
||||
else:
|
||||
print(opt)
|
||||
print(Usage_short, file=sys.stderr)
|
||||
sys.stderr.write(Usage_short)
|
||||
sys.exit(1)
|
||||
|
||||
for file in files:
|
||||
if os.path.exists(files[0]):
|
||||
disassemble_file(file, sys.stdout, native)
|
||||
else:
|
||||
print("Can't read %s - skipping" % files[0],
|
||||
file=sys.stderr)
|
||||
sys.stderr.write("Can't read %s - skipping\n" % files[0])
|
||||
pass
|
||||
pass
|
||||
return
|
||||
|
@@ -4,7 +4,6 @@
|
||||
# Copyright (c) 2015-2017 by Rocky Bernstein
|
||||
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
|
||||
#
|
||||
from __future__ import print_function
|
||||
import sys, os, getopt, time
|
||||
|
||||
program = 'uncompyle6'
|
||||
@@ -35,6 +34,8 @@ Options:
|
||||
-p <integer> use <integer> number of processes
|
||||
-r recurse directories looking for .pyc and .pyo files
|
||||
--verify compare generated source with input byte-code
|
||||
--verify-run compile generated source, run it and check exit code
|
||||
--weak-verify compile generated source
|
||||
--linemaps generated line number correspondencies between byte-code
|
||||
and generated source output
|
||||
--help show this message
|
||||
@@ -58,18 +59,18 @@ from uncompyle6.version import VERSION
|
||||
|
||||
def usage():
|
||||
print("""usage:
|
||||
%s [--verify] [--asm] [--tree] [--grammar] [-o <path>] FILE|DIR...
|
||||
%s [--verify | --weak-verify ] [--asm] [--tree] [--grammar] [-o <path>] FILE|DIR...
|
||||
%s [--help | -h | --version | -V]
|
||||
""" % (program, program))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main_bin():
|
||||
if not (sys.version_info[0:2] in ((2, 6), (2, 7),
|
||||
(3, 1), (3, 2), (3, 3),
|
||||
if not (sys.version_info[0:2] in ((2, 4), (2, 5), (2, 6), (2, 7),
|
||||
(3, 2), (3, 3),
|
||||
(3, 4), (3, 5), (3, 6))):
|
||||
print('Error: %s requires Python 2.6-2.7, or 3.1-3.6' % program,
|
||||
file=sys.stderr)
|
||||
sys.stderr.write('Error: %s requires Python 2.4 2.5 2.6, 2.7, '
|
||||
'3.2, 3.3, 3.4, 3.5, or 3.6' % program)
|
||||
sys.exit(-1)
|
||||
|
||||
do_verify = recurse_dirs = False
|
||||
@@ -83,9 +84,9 @@ def main_bin():
|
||||
try:
|
||||
opts, files = getopt.getopt(sys.argv[1:], 'hagtdrVo:c:p:',
|
||||
'help asm grammar linemaps recurse timestamp tree '
|
||||
'verify version showgrammar'.split(' '))
|
||||
except getopt.GetoptError as e:
|
||||
print('%s: %s' % (os.path.basename(sys.argv[0]), e), file=sys.stderr)
|
||||
'verify verify-run version showgrammar'.split(' '))
|
||||
except getopt.GetoptError, e:
|
||||
sys.stderr.write('%s: %s\n' % (os.path.basename(sys.argv[0]), e))
|
||||
sys.exit(-1)
|
||||
|
||||
options = {}
|
||||
@@ -97,15 +98,19 @@ def main_bin():
|
||||
print("%s %s" % (program, VERSION))
|
||||
sys.exit(0)
|
||||
elif opt == '--verify':
|
||||
options['do_verify'] = True
|
||||
options['do_verify'] = 'strong'
|
||||
elif opt == '--weak-verify':
|
||||
options['do_verify'] = 'weak'
|
||||
elif opt == '--verify-run':
|
||||
options['do_verify'] = 'verify-run'
|
||||
elif opt == '--linemaps':
|
||||
options['do_linemaps'] = True
|
||||
elif opt in ('--asm', '-a'):
|
||||
options['showasm'] = 'after'
|
||||
options['do_verify'] = False
|
||||
options['do_verify'] = None
|
||||
elif opt in ('--tree', '-t'):
|
||||
options['showast'] = True
|
||||
options['do_verify'] = False
|
||||
options['do_verify'] = None
|
||||
elif opt in ('--grammar', '-g'):
|
||||
options['showgrammar'] = True
|
||||
elif opt == '-o':
|
||||
@@ -119,7 +124,7 @@ def main_bin():
|
||||
elif opt in ('--recurse', '-r'):
|
||||
recurse_dirs = True
|
||||
else:
|
||||
print(opt, file=sys.stderr)
|
||||
sys.stderr.write(opt)
|
||||
usage()
|
||||
|
||||
# expand directory if specified
|
||||
@@ -144,7 +149,7 @@ def main_bin():
|
||||
files = [f[sb_len:] for f in files]
|
||||
|
||||
if not files:
|
||||
print("No files given", file=sys.stderr)
|
||||
sys.stderr.write("No files given\n")
|
||||
usage()
|
||||
|
||||
if outfile == '-':
|
||||
@@ -162,7 +167,7 @@ def main_bin():
|
||||
result = main(src_base, out_base, files, codes, outfile,
|
||||
**options)
|
||||
if len(files) > 1:
|
||||
mess = status_msg(do_verify, *result)
|
||||
mess = status_msg(do_verify, result, do_verify)
|
||||
print('# ' + mess)
|
||||
pass
|
||||
except (KeyboardInterrupt):
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2015-2016 by Rocky Bernstein
|
||||
# Copyright (c) 2015-2016, 2818 by Rocky Bernstein
|
||||
# Copyright (c) 2005 by Dan Pascu <dan@windowmaker.org>
|
||||
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
|
||||
# Copyright (c) 1999 John Aycock
|
||||
@@ -13,17 +13,14 @@ that is doing the extraction.
|
||||
|
||||
Second, we need structured instruction information for the
|
||||
(de)-parsing step. Python 3.4 and up provides this, but we still do
|
||||
want to run on Python 2.7.
|
||||
want to run on earlier Python versions.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
from collections import deque
|
||||
|
||||
import uncompyle6
|
||||
|
||||
from xdis.main import disassemble_file as xdisassemble_file
|
||||
from xdis.code import iscode
|
||||
from xdis.load import check_object_path, load_module
|
||||
from uncompyle6.scanner import get_scanner
|
||||
@@ -37,10 +34,9 @@ def disco(version, co, out=None, is_pypy=False):
|
||||
|
||||
# store final output stream for case of error
|
||||
real_out = out or sys.stdout
|
||||
print('# Python %s' % version, file=real_out)
|
||||
real_out.write('# Python %s\n' % version)
|
||||
if co.co_filename:
|
||||
print('# Embedded file name: %s' % co.co_filename,
|
||||
file=real_out)
|
||||
real_out.write('# Embedded file name: %s\n' % co.co_filename)
|
||||
|
||||
scanner = get_scanner(version, is_pypy=is_pypy)
|
||||
|
||||
@@ -52,30 +48,25 @@ def disco_loop(disasm, queue, real_out):
|
||||
while len(queue) > 0:
|
||||
co = queue.popleft()
|
||||
if co.co_name != '<module>':
|
||||
print('\n# %s line %d of %s' %
|
||||
(co.co_name, co.co_firstlineno, co.co_filename),
|
||||
file=real_out)
|
||||
real_out.write('\n# %s line %d of %s\n' %
|
||||
(co.co_name, co.co_firstlineno, co.co_filename))
|
||||
tokens, customize = disasm(co)
|
||||
for t in tokens:
|
||||
if iscode(t.pattr):
|
||||
queue.append(t.pattr)
|
||||
elif iscode(t.attr):
|
||||
queue.append(t.attr)
|
||||
print(t, file=real_out)
|
||||
real_out.write(t)
|
||||
pass
|
||||
pass
|
||||
|
||||
def disassemble_file(filename, outstream=None, native=False):
|
||||
def disassemble_file(filename, outstream=None):
|
||||
"""
|
||||
disassemble Python byte-code file (.pyc)
|
||||
|
||||
If given a Python source file (".py") file, we'll
|
||||
try to find the corresponding compiled object.
|
||||
"""
|
||||
if native:
|
||||
xdisassemble_file(filename, outstream)
|
||||
return
|
||||
|
||||
filename = check_object_path(filename)
|
||||
(version, timestamp, magic_int, co, is_pypy,
|
||||
source_size) = load_module(filename)
|
||||
@@ -90,14 +81,14 @@ def _test():
|
||||
"""Simple test program to disassemble a file."""
|
||||
argc = len(sys.argv)
|
||||
if argc != 2:
|
||||
if argc == 1 and uncompyle6.PYTHON3:
|
||||
if argc == 1:
|
||||
fn = __file__
|
||||
else:
|
||||
sys.stderr.write("usage: %s [-|CPython compiled file]\n" % __file__)
|
||||
sys.exit(2)
|
||||
else:
|
||||
fn = sys.argv[1]
|
||||
disassemble_file(fn, native=True)
|
||||
disassemble_file(fn)
|
||||
|
||||
if __name__ == "__main__":
|
||||
_test()
|
||||
|
@@ -10,7 +10,7 @@ def line_number_mapping(pyc_filename, src_filename):
|
||||
source_size) = load_module(pyc_filename)
|
||||
try:
|
||||
code2 = load_file(src_filename)
|
||||
except SyntaxError as e:
|
||||
except SyntaxError, e:
|
||||
return str(e)
|
||||
|
||||
queue = deque([code1, code2])
|
||||
|
@@ -1,7 +1,6 @@
|
||||
from __future__ import print_function
|
||||
import datetime, os, subprocess, sys, tempfile
|
||||
|
||||
from uncompyle6 import verify, IS_PYPY
|
||||
from uncompyle6 import verify, IS_PYPY, PYTHON_VERSION
|
||||
from xdis.code import iscode
|
||||
from uncompyle6.disas import check_object_path
|
||||
from uncompyle6.semantics import pysource
|
||||
@@ -9,73 +8,124 @@ from uncompyle6.parser import ParserError
|
||||
from uncompyle6.version import VERSION
|
||||
from uncompyle6.linenumbers import line_number_mapping
|
||||
|
||||
from uncompyle6.semantics.pysource import deparse_code
|
||||
from uncompyle6.semantics.linemap import deparse_code_with_map
|
||||
|
||||
from xdis.load import load_module
|
||||
|
||||
def _get_outstream(outfile):
|
||||
dir = os.path.dirname(outfile)
|
||||
failed_file = outfile + '_failed'
|
||||
if os.path.exists(failed_file):
|
||||
os.remove(failed_file)
|
||||
try:
|
||||
os.makedirs(dir)
|
||||
except OSError:
|
||||
pass
|
||||
return open(outfile, 'w')
|
||||
|
||||
def decompile(
|
||||
bytecode_version, co, out=None, showasm=None, showast=False,
|
||||
timestamp=None, showgrammar=False, code_objects={},
|
||||
source_size=None, is_pypy=False, magic_int=None):
|
||||
source_size=None, is_pypy=False, magic_int=None,
|
||||
mapstream=None):
|
||||
"""
|
||||
ingests and deparses a given code block 'co'
|
||||
"""
|
||||
assert iscode(co)
|
||||
|
||||
Caller is responsible for closing `out` and `mapstream`
|
||||
"""
|
||||
# store final output stream for case of error
|
||||
real_out = out or sys.stdout
|
||||
co_pypy_str = 'PyPy ' if is_pypy else ''
|
||||
run_pypy_str = 'PyPy ' if IS_PYPY else ''
|
||||
print('# uncompyle6 version %s\n'
|
||||
|
||||
def write(s):
|
||||
s += '\n'
|
||||
real_out.write(s)
|
||||
|
||||
assert iscode(co)
|
||||
|
||||
if is_pypy:
|
||||
co_pypy_str = 'PyPy '
|
||||
else:
|
||||
co_pypy_str = ''
|
||||
|
||||
if IS_PYPY:
|
||||
run_pypy_str = 'PyPy '
|
||||
else:
|
||||
run_pypy_str = ''
|
||||
|
||||
if magic_int:
|
||||
m = str(magic_int)
|
||||
else:
|
||||
m = ""
|
||||
|
||||
sys_version_lines = sys.version.split('\n')
|
||||
write('# uncompyle6 version %s\n'
|
||||
'# %sPython bytecode %s%s\n# Decompiled from: %sPython %s' %
|
||||
(VERSION, co_pypy_str, bytecode_version,
|
||||
" (%d)" % magic_int if magic_int else "",
|
||||
run_pypy_str, '\n# '.join(sys.version.split('\n'))),
|
||||
file=real_out)
|
||||
" (%s)" % m, run_pypy_str,
|
||||
'\n# '.join(sys_version_lines)))
|
||||
if co.co_filename:
|
||||
print('# Embedded file name: %s' % co.co_filename,
|
||||
file=real_out)
|
||||
write('# Embedded file name: %s' % co.co_filename,)
|
||||
if timestamp:
|
||||
print('# Compiled at: %s' % datetime.datetime.fromtimestamp(timestamp),
|
||||
file=real_out)
|
||||
write('# Compiled at: %s' %
|
||||
datetime.datetime.fromtimestamp(timestamp))
|
||||
if source_size:
|
||||
print('# Size of source mod 2**32: %d bytes' % source_size,
|
||||
file=real_out)
|
||||
real_out.write('# Size of source mod 2**32: %d bytes\n' %
|
||||
source_size)
|
||||
|
||||
try:
|
||||
pysource.deparse_code(bytecode_version, co, out, showasm, showast,
|
||||
showgrammar, code_objects=code_objects,
|
||||
is_pypy=is_pypy)
|
||||
except pysource.SourceWalkerError as e:
|
||||
if mapstream:
|
||||
if isinstance(mapstream, str):
|
||||
mapstream = _get_outstream(mapstream)
|
||||
|
||||
deparsed = deparse_code_with_map(bytecode_version, co, out, showasm, showast,
|
||||
showgrammar,
|
||||
code_objects = code_objects,
|
||||
is_pypy = is_pypy,
|
||||
)
|
||||
header_count = 3+len(sys_version_lines)
|
||||
linemap = [(line_no, deparsed.source_linemap[line_no]+header_count)
|
||||
for line_no in
|
||||
sorted(deparsed.source_linemap.keys())]
|
||||
mapstream.write("\n\n# %s\n" % linemap)
|
||||
else:
|
||||
deparsed = deparse_code(bytecode_version, co, out, showasm, showast,
|
||||
showgrammar, code_objects=code_objects,
|
||||
is_pypy=is_pypy)
|
||||
pass
|
||||
return deparsed
|
||||
except pysource.SourceWalkerError, e:
|
||||
# deparsing failed
|
||||
raise pysource.SourceWalkerError(str(e))
|
||||
|
||||
# For compatiblity
|
||||
uncompyle = decompile
|
||||
|
||||
def decompile_file(filename, outstream=None, showasm=None, showast=False,
|
||||
showgrammar=False):
|
||||
showgrammar=False, mapstream=None):
|
||||
"""
|
||||
decompile Python byte-code file (.pyc)
|
||||
decompile Python byte-code file (.pyc). Return objects to
|
||||
all of the deparsed objects found in `filename`.
|
||||
"""
|
||||
|
||||
filename = check_object_path(filename)
|
||||
code_objects = {}
|
||||
(version, timestamp, magic_int, co, is_pypy,
|
||||
source_size) = load_module(filename, code_objects)
|
||||
source_size) = load_module(filename, code_objects)
|
||||
|
||||
if type(co) == list:
|
||||
if isinstance(co, list):
|
||||
deparsed = []
|
||||
for con in co:
|
||||
decompile(version, con, outstream, showasm, showast,
|
||||
timestamp, showgrammar, code_objects=code_objects,
|
||||
is_pypy=is_pypy, magic_int=magic_int)
|
||||
deparsed.append(
|
||||
decompile(version, con, outstream, showasm, showast,
|
||||
timestamp, showgrammar, code_objects=code_objects,
|
||||
is_pypy=is_pypy, magic_int=magic_int),
|
||||
mapstream=mapstream)
|
||||
else:
|
||||
decompile(version, co, outstream, showasm, showast,
|
||||
timestamp, showgrammar,
|
||||
code_objects=code_objects, source_size=source_size,
|
||||
is_pypy=is_pypy, magic_int=magic_int)
|
||||
deparsed = [decompile(version, co, outstream, showasm, showast,
|
||||
timestamp, showgrammar,
|
||||
code_objects=code_objects, source_size=source_size,
|
||||
is_pypy=is_pypy, magic_int=magic_int,
|
||||
mapstream=mapstream)]
|
||||
co = None
|
||||
|
||||
# For compatiblity
|
||||
uncompyle_file = decompile_file
|
||||
return deparsed
|
||||
|
||||
|
||||
# FIXME: combine into an options parameter
|
||||
@@ -94,19 +144,9 @@ def main(in_base, out_base, files, codes, outfile=None,
|
||||
- files below out_base out_base=...
|
||||
- stdout out_base=None, outfile=None
|
||||
"""
|
||||
def _get_outstream(outfile):
|
||||
dir = os.path.dirname(outfile)
|
||||
failed_file = outfile + '_failed'
|
||||
if os.path.exists(failed_file):
|
||||
os.remove(failed_file)
|
||||
try:
|
||||
os.makedirs(dir)
|
||||
except OSError:
|
||||
pass
|
||||
return open(outfile, 'w')
|
||||
|
||||
tot_files = okay_files = failed_files = verify_failed_files = 0
|
||||
current_outfile = outfile
|
||||
linemap_stream = None
|
||||
|
||||
for filename in files:
|
||||
infile = os.path.join(in_base, filename)
|
||||
@@ -115,22 +155,31 @@ def main(in_base, out_base, files, codes, outfile=None,
|
||||
% infile)
|
||||
continue
|
||||
|
||||
if do_linemaps:
|
||||
linemap_stream = infile + '.pymap'
|
||||
pass
|
||||
|
||||
# print (infile, file=sys.stderr)
|
||||
|
||||
if outfile: # outfile was given as parameter
|
||||
outstream = _get_outstream(outfile)
|
||||
elif out_base is None:
|
||||
outstream = sys.stdout
|
||||
if do_linemaps or do_verify:
|
||||
prefix = os.path.basename(filename)
|
||||
if do_linemaps:
|
||||
linemap_stream = sys.stdout
|
||||
if do_verify:
|
||||
prefix = os.path.basename(filename) + '-'
|
||||
if prefix.endswith('.py'):
|
||||
prefix = prefix[:-len('.py')]
|
||||
junk, outfile = tempfile.mkstemp(suffix=".py",
|
||||
prefix=prefix)
|
||||
|
||||
# Unbuffer output if possible
|
||||
buffering = -1 if sys.stdout.isatty() else 0
|
||||
if sys.stdout.isatty():
|
||||
buffering = -1
|
||||
else:
|
||||
buffering = 0
|
||||
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', buffering)
|
||||
tee = subprocess.Popen(["tee", outfile], stdin=subprocess.PIPE)
|
||||
tee = subprocess.Popen(["tee", current_outfile],
|
||||
stdin=subprocess.PIPE)
|
||||
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
|
||||
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
|
||||
else:
|
||||
@@ -138,16 +187,20 @@ def main(in_base, out_base, files, codes, outfile=None,
|
||||
current_outfile = os.path.join(out_base, filename[0:-1])
|
||||
else:
|
||||
current_outfile = os.path.join(out_base, filename) + '_dis'
|
||||
pass
|
||||
pass
|
||||
|
||||
outstream = _get_outstream(current_outfile)
|
||||
|
||||
# print(current_outfile, file=sys.stderr)
|
||||
|
||||
# Try to uncompile the input file
|
||||
try:
|
||||
decompile_file(infile, outstream, showasm, showast, showgrammar)
|
||||
decompile_file(infile, outstream, showasm, showast, showgrammar, linemap_stream)
|
||||
tot_files += 1
|
||||
except (ValueError, SyntaxError, ParserError, pysource.SourceWalkerError) as e:
|
||||
except (ValueError, SyntaxError, ParserError, pysource.SourceWalkerError):
|
||||
sys.stdout.write("\n")
|
||||
sys.stderr.write("\n# file %s\n# %s\n" % (infile, e))
|
||||
sys.stderr.write("# file %s\n" % (infile))
|
||||
failed_files += 1
|
||||
except KeyboardInterrupt:
|
||||
if outfile:
|
||||
@@ -166,22 +219,16 @@ def main(in_base, out_base, files, codes, outfile=None,
|
||||
# sys.stderr.write("\n# Can't uncompile %s\n" % infile)
|
||||
else: # uncompile successful
|
||||
if current_outfile:
|
||||
if do_linemaps:
|
||||
mapping = line_number_mapping(infile, current_outfile)
|
||||
outstream.write("\n\n## Line number correspondences\n")
|
||||
import pprint
|
||||
s = pprint.pformat(mapping, indent=2, width=80)
|
||||
s2 = '##' + '\n##'.join(s.split("\n")) + "\n"
|
||||
outstream.write(s2)
|
||||
outstream.close()
|
||||
|
||||
if do_verify:
|
||||
weak_verify = do_verify == 'weak'
|
||||
try:
|
||||
msg = verify.compare_code_with_srcfile(infile, current_outfile, weak_verify=weak_verify)
|
||||
msg = verify.compare_code_with_srcfile(infile,
|
||||
current_outfile,
|
||||
do_verify)
|
||||
if not current_outfile:
|
||||
if not msg:
|
||||
print('\n# okay decompiling %s' % infile)
|
||||
print '\n# okay decompiling %s' % infile
|
||||
okay_files += 1
|
||||
else:
|
||||
verify_failed_files += 1
|
||||
@@ -190,13 +237,16 @@ def main(in_base, out_base, files, codes, outfile=None,
|
||||
else:
|
||||
okay_files += 1
|
||||
pass
|
||||
except verify.VerifyCmpError as e:
|
||||
except verify.VerifyCmpError, e:
|
||||
print(e)
|
||||
verify_failed_files += 1
|
||||
os.rename(current_outfile, current_outfile + '_unverified')
|
||||
sys.stderr.write("### Error Verifying %s\n" % filename)
|
||||
sys.stderr.write(str(e) + "\n")
|
||||
if not outfile:
|
||||
sys.stderr.write("### Error Verifiying %s" %
|
||||
filename)
|
||||
sys.stderr.write(e)
|
||||
if raise_on_error:
|
||||
raise
|
||||
pass
|
||||
@@ -206,14 +256,15 @@ def main(in_base, out_base, files, codes, outfile=None,
|
||||
okay_files += 1
|
||||
pass
|
||||
elif do_verify:
|
||||
sys.stderr.write("\n### uncompile successful, but no file to compare against\n")
|
||||
sys.stderr.write("\n### uncompile successful, "
|
||||
"but no file to compare against")
|
||||
pass
|
||||
else:
|
||||
okay_files += 1
|
||||
if not current_outfile:
|
||||
mess = '\n# okay decompiling'
|
||||
# mem_usage = __memUsage()
|
||||
print(mess, infile)
|
||||
print mess, infile
|
||||
if current_outfile:
|
||||
sys.stdout.write("%s\r" %
|
||||
status_msg(do_verify, tot_files, okay_files, failed_files,
|
||||
@@ -240,19 +291,21 @@ else:
|
||||
def status_msg(do_verify, tot_files, okay_files, failed_files,
|
||||
verify_failed_files, weak_verify):
|
||||
if weak_verify == 'weak':
|
||||
verification_type = 'weak'
|
||||
verification_type = 'weak '
|
||||
elif weak_verify == 'verify-run':
|
||||
verification_type = 'run '
|
||||
else:
|
||||
verification_type = 'strong'
|
||||
verification_type = ''
|
||||
if tot_files == 1:
|
||||
if failed_files:
|
||||
return "\n# decompile failed"
|
||||
elif verify_failed_files:
|
||||
return "\n# decompile %s verification failed" % verification_type
|
||||
return "\n# decompile %sverification failed" % verification_type
|
||||
else:
|
||||
return "\n# Successfully decompiled file"
|
||||
pass
|
||||
pass
|
||||
mess = "decompiled %i files: %i okay, %i failed" % (tot_files, okay_files, failed_files)
|
||||
if do_verify:
|
||||
mess += (", %i %s verification failed" % (verify_failed_files, verification_type))
|
||||
mess += (", %i %sverification failed" % (verify_failed_files, verification_type))
|
||||
return mess
|
||||
|
@@ -6,8 +6,6 @@
|
||||
Common uncompyle6 parser routines.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
from xdis.code import iscode
|
||||
@@ -168,7 +166,7 @@ class PythonParser(GenericASTBuilder):
|
||||
indent = ' '
|
||||
else:
|
||||
indent = '-> '
|
||||
print("%s%s" % (indent, instructions[i]))
|
||||
print "%s%s" % (indent, instructions[i])
|
||||
raise ParserError(err_token, err_token.offset)
|
||||
else:
|
||||
raise ParserError(None, -1)
|
||||
@@ -768,4 +766,4 @@ if __name__ == '__main__':
|
||||
ast = python_parser(PYTHON_VERSION, co, showasm=True, is_pypy=IS_PYPY)
|
||||
print(ast)
|
||||
return
|
||||
parse_test(parse_test.__code__)
|
||||
# parse_test(parse_test.__code__)
|
||||
|
@@ -12,8 +12,6 @@ If we succeed in creating a parse tree, then we have a Python program
|
||||
that a later phase can turn into a sequence of ASCII text.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6.parser import PythonParser, PythonParserSingle, nop_func
|
||||
from uncompyle6.parsers.astnode import AST
|
||||
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
|
||||
|
@@ -69,6 +69,7 @@ class Python24Parser(Python25Parser):
|
||||
super(Python24Parser, self).customize_grammar_rules(tokens, customize)
|
||||
if self.version == 2.4:
|
||||
self.check_reduce['nop_stmt'] = 'tokens'
|
||||
self.check_reduce['try_except'] = 'tokens'
|
||||
|
||||
def reduce_is_invalid(self, rule, ast, tokens, first, last):
|
||||
invalid = super(Python24Parser,
|
||||
|
@@ -19,6 +19,9 @@ class Python25Parser(Python26Parser):
|
||||
|
||||
return_if_stmt ::= ret_expr RETURN_END_IF JUMP_BACK
|
||||
|
||||
# We have no jumps to jumps, so no "come_froms" but a single "COME_FROM"
|
||||
ifelsestmt ::= testexpr c_stmts_opt jf_cf_pop else_suite COME_FROM
|
||||
|
||||
# Python 2.6 uses ROT_TWO instead of the STORE_xxx
|
||||
# withas is allowed as a "from future" in 2.5
|
||||
# 2.6 and 2.7 do something slightly different
|
||||
@@ -36,6 +39,8 @@ class Python25Parser(Python26Parser):
|
||||
# loop. FIXME: should "come_froms" below be a single COME_FROM?
|
||||
tryelsestmt ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
|
||||
except_handler else_suite come_froms
|
||||
tryelsestmtl ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
|
||||
except_handler else_suitel
|
||||
|
||||
# Python 2.6 omits the LOAD_FAST DELETE_FAST below
|
||||
# withas is allowed as a "from future" in 2.5
|
||||
@@ -54,6 +59,9 @@ class Python25Parser(Python26Parser):
|
||||
def customize_grammar_rules(self, tokens, customize):
|
||||
# Remove grammar rules inherited from Python 2.6 or Python 2
|
||||
self.remove_rules("""
|
||||
# No jump to jumps in 2.4 so we have a single "COME_FROM", not "come_froms"
|
||||
ifelsestmt ::= testexpr c_stmts_opt jf_cf_pop else_suite come_froms
|
||||
|
||||
setupwith ::= DUP_TOP LOAD_ATTR ROT_TWO LOAD_ATTR CALL_FUNCTION_0 POP_TOP
|
||||
withstmt ::= expr setupwith SETUP_FINALLY suite_stmts_opt
|
||||
POP_BLOCK LOAD_CONST COME_FROM WITH_CLEANUP END_FINALLY
|
||||
|
@@ -131,6 +131,7 @@ class Python27Parser(Python2Parser):
|
||||
ifelsestmt ::= testexpr c_stmts_opt JUMP_FORWARD else_suite COME_FROM
|
||||
ifelsestmtc ::= testexpr c_stmts_opt JUMP_ABSOLUTE else_suitec
|
||||
ifelsestmtl ::= testexpr c_stmts_opt JUMP_BACK else_suitel
|
||||
ifelsestmtl ::= testexpr c_stmts_opt CONTINUE else_suitel
|
||||
|
||||
# Common with 2.6
|
||||
return_if_lambda ::= RETURN_END_IF_LAMBDA COME_FROM
|
||||
@@ -193,4 +194,5 @@ if __name__ == '__main__':
|
||||
for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
# p.dump_grammar()
|
||||
p.check_grammar()
|
||||
p.dump_grammar()
|
||||
|
@@ -19,6 +19,7 @@ from uncompyle6.parser import PythonParser, PythonParserSingle, nop_func
|
||||
from uncompyle6.parsers.astnode import AST
|
||||
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
|
||||
from xdis import PYTHON3
|
||||
from itertools import islice,chain,repeat
|
||||
|
||||
class Python3Parser(PythonParser):
|
||||
|
||||
@@ -1092,7 +1093,7 @@ class Python3Parser(PythonParser):
|
||||
|
||||
if tokens[cfl-1] != 'JUMP_BACK':
|
||||
cfl_offset = tokens[cfl-1].offset
|
||||
insn = next(i for i in self.insts if cfl_offset == i.offset)
|
||||
insn = chain((i for i in self.insts if cfl_offset == i.offset), repeat(None)).next()
|
||||
if insn and insn.is_jump_target:
|
||||
return True
|
||||
|
||||
|
@@ -2,7 +2,6 @@
|
||||
"""
|
||||
spark grammar differences over Python 3.1 for Python 3.0.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6.parser import PythonParserSingle
|
||||
from uncompyle6.parsers.parse31 import Python31Parser
|
||||
|
@@ -2,7 +2,6 @@
|
||||
"""
|
||||
spark grammar differences over Python 3.2 for Python 3.1.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6.parser import PythonParserSingle
|
||||
from uncompyle6.parsers.parse32 import Python32Parser
|
||||
|
@@ -2,8 +2,6 @@
|
||||
"""
|
||||
spark grammar differences over Python 3 for Python 3.2.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6.parser import PythonParserSingle
|
||||
from uncompyle6.parsers.parse3 import Python3Parser
|
||||
|
||||
|
@@ -2,7 +2,6 @@
|
||||
"""
|
||||
spark grammar differences over Python 3.2 for Python 3.3.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6.parser import PythonParserSingle
|
||||
from uncompyle6.parsers.parse32 import Python32Parser
|
||||
|
@@ -2,7 +2,6 @@
|
||||
"""
|
||||
spark grammar differences over Python 3.4 for Python 3.5.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6.parser import PythonParserSingle, nop_func
|
||||
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
|
||||
|
@@ -2,7 +2,6 @@
|
||||
"""
|
||||
spark grammar differences over Python 3.5 for Python 3.6.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6.parser import PythonParserSingle, nop_func
|
||||
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
|
||||
@@ -135,9 +134,9 @@ class Python36Parser(Python35Parser):
|
||||
if opname.startswith('CALL_FUNCTION_KW'):
|
||||
self.addRule("expr ::= call_kw", nop_func)
|
||||
values = 'expr ' * token.attr
|
||||
rule = 'call_kw ::= expr kwargs_36 {token.kind}'.format(**locals())
|
||||
rule = 'call_kw ::= expr kwargs_36 %s' % token.kind
|
||||
self.addRule(rule, nop_func)
|
||||
rule = 'kwargs_36 ::= {values} LOAD_CONST'.format(**locals())
|
||||
rule = 'kwargs_36 ::= %s LOAD_CONST' % values
|
||||
self.add_unique_rule(rule, token.kind, token.attr, customize)
|
||||
elif opname == 'CALL_FUNCTION_EX_KW':
|
||||
self.addRule("""expr ::= call_ex_kw
|
||||
|
@@ -2,7 +2,6 @@
|
||||
"""
|
||||
spark grammar differences over Python 3.6 for Python 3.7
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from uncompyle6.parser import PythonParserSingle
|
||||
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user