Compare commits

..

15 Commits

Author SHA1 Message Date
rocky
a5c388c13b Another 2.7 "if" with return fix
This works is in conjunction with the commit before the previous release commit.
2018-02-17 11:11:32 -05:00
rocky
c82095e6ac Get ready for release 2.16.0 2018-02-17 07:30:22 -05:00
rocky
67ad08fd4a Beter 2.7 end_if and COME_FROM determination
Fixes #149

... Add more tests too
2018-02-17 07:16:14 -05:00
rocky
fa4f614295 Wierd comprehension bug seen via new loctraceback 2018-02-15 12:15:49 -05:00
rocky
f7f0aa5ea9 Merge branch 'master' of github.com:rocky/python-uncompyle6 2018-02-15 10:42:29 -05:00
rocky
083ae5f3fd Add deparsed_find() used by the trepan debuggers 2018-02-15 10:42:00 -05:00
rocky
a01091a46e Misc pydisasm fixes 2018-02-15 07:30:01 -05:00
rocky
b5a825f4d8 Fix up 3.6+ CALL_FUNCTION_EX 2018-02-12 07:45:20 -05:00
rocky
730b0549d5 Handle 3.6+ FUNCTION_EX a little more generally 2018-02-12 04:26:52 -05:00
rocky
e431e49d77 Isolate Python 3.5 custom parse rules...
are isolated into parse35.py now and removed from parse3.py

This causes some code duplicated from parse3.py into parse3{5,6}.py

We will deal with that later.
2018-02-12 03:59:44 -05:00
rocky
230a38d537 Fix Python 3.5 CALL_FUNCTION_VAR handling 2018-02-12 03:07:03 -05:00
rocky
6d29ed9077 Python 3.5 CALL_FUNCTION_VAR bugs 2018-02-09 16:48:11 -05:00
rocky
bb45be2dc7 Start to handle 3.5+ BUILD_LIST_UNPACK in call ..
to implement multple star arguments
2018-02-09 03:41:13 -05:00
rocky
f7999d2754 Add custom 3.5 handling for f(*a, *b, *c) 2018-02-08 08:42:38 -05:00
rocky
5aeb0424fc Administrivia 2018-02-05 06:37:50 -05:00
109 changed files with 856 additions and 492 deletions

View File

@@ -3,7 +3,13 @@ language: python
sudo: false
python:
- '2.7' # this is a cheat here because travis doesn't do 2.4-2.6
- '3.5'
- '2.7.12'
- '2.6'
- '3.3'
- '3.4'
- '3.2'
- '3.6'
install:
- pip install -e .

View File

@@ -39,7 +39,7 @@ check-3.0 check-3.1 check-3.2 check-3.5 check-3.6:
check-3.7: pytest
#:Tests for Python 2.6 (doesn't have pytest)
check-2.4 check-2.5 check-2.6:
check-2.6:
$(MAKE) -C test $@
#:PyPy 2.6.1 PyPy 5.0.1, or PyPy 5.8.0-beta0

16
NEWS
View File

@@ -1,3 +1,19 @@
uncompyle6 2.16.0 2018-02-17
- API additions:
- add fragments.op_at_code_loc() and
- fragments.deparsed_find()_
- Better 2.7 end_if and COME_FROM determination
- Fix up 3.6+ CALL_FUNCTION_EX
- Misc pydisasm fixes
- Wierd comprehension bug seen via new loctraceback
- Fix Python 3.5+ CALL_FUNCTION_VAR and BUILD_LIST_UNPACK in call; with this
we can can handle 3.5+ f(a, b, *c, *d, *e) now
uncompyle6 2.15.1 2018-02-05
- More bug fixes and revert an improper bug fix in 2.15.0
uncompyle6 2.15.0 2018-02-05 pycon2018.co
- Bug fixes

View File

@@ -41,7 +41,8 @@ entry_points = {
]}
ftp_url = None
install_requires = ['spark-parser >= 1.8.5, < 1.9.0',
'xdis >= 3.6.9, < 3.7.0']
'xdis >= 3.6.9, < 3.7.0', 'six']
license = 'MIT'
mailing_list = 'python-debugger@googlegroups.com'
modname = 'uncompyle6'

0
admin-tools/check-newer-versions.sh Normal file → Executable file
View File

0
admin-tools/check-older-versions.sh Normal file → Executable file
View File

View File

@@ -45,6 +45,8 @@
$ source admin-tools/setup-python-2.4.sh
$ git merge master
# Add and fix merge conflicts
$ git commit
# Check against older versions

View File

@@ -1,46 +0,0 @@
git pull
Change version in uncompyle6/version.py
source uncompyle6/version.py
echo $VERSION
git commit -m"Get ready for release $VERSION" .
Update ChangeLog:
make ChangeLog
Update NEWS from ChangeLog
make check
git commit --amend .
git push
Make sure pyenv is running
# Pyenv
source admin-tools/check-newer-versions.sh
# Switch to python-2.4 and build that first...
source admin-tools/setup-python-2.4
rm ChangeLog
git merge master
Update NEWS from master branch
git commit -m"Get ready for release $VERSION" .
source admin-tools/check-older-versions.sh
source admin-tools/check-newer-versions.sh
make-dist-older.sh
git tag release-python-2.4-$VERSION
./make-dist-newer.sh
git tag release-$VERSION
twine upload dist/uncompyle6-${VERSION}*

0
admin-tools/setup-master.sh Normal file → Executable file
View File

0
admin-tools/setup-python-2.4.sh Normal file → Executable file
View File

View File

@@ -1,3 +0,0 @@
#!/bin/bash
cd $(dirname ${BASH_SOURCE[0]})/..
git pull

View File

@@ -11,4 +11,4 @@ dependencies:
test:
override:
- python ./setup.py develop && make check-2.7
- cd ./test/stdlib && pyenv local 2.7.10 && bash ./runtests.sh 'test_[f-i]*.py'
- cd ./test/stdlib && pyenv local 2.7.10 && bash ./runtests.sh 'test_[p-z]*.py'

View File

@@ -1,5 +1,5 @@
import pytest
from uncompyle6.semantics.fragments import deparse_code as deparse
from uncompyle6.semantics.fragments import deparse_code as deparse, deparsed_find
from uncompyle6 import PYTHON_VERSION, PYTHON3
def map_stmts(x, y):
@@ -30,7 +30,7 @@ def list_comp():
[y for y in range(3)]
def get_parsed_for_fn(fn):
code = fn.func_code
code = fn.__code__ if PYTHON3 else fn.func_code
return deparse(PYTHON_VERSION, code)
def check_expect(expect, parsed, fn_name):
@@ -42,6 +42,7 @@ def check_expect(expect, parsed, fn_name):
"%s: ran out if items in testing node" % fn_name)
nodeInfo = parsed.offsets[name, offset]
node = nodeInfo.node
nodeInfo2 = deparsed_find((name, offset), parsed, code)
extractInfo = parsed.extract_node_info(node)
assert expect[i] == extractInfo.selectedLine, \

View File

@@ -10,7 +10,7 @@ else:
maxint = sys.maxint
from uncompyle6.semantics.helper import print_docstring
class PrintFake:
class PrintFake():
def __init__(self):
self.pending_newlines = 0
self.f = StringIO()

View File

@@ -22,8 +22,9 @@ def bug_loop(disassemble, tb=None):
disassemble(tb)
def test_if_in_for():
code = bug.func_code
code = bug.__code__
scan = get_scanner(PYTHON_VERSION)
print(PYTHON_VERSION)
if 2.7 <= PYTHON_VERSION <= 3.0 and not IS_PYPY:
n = scan.setup_code(code)
bytecode = Bytecode(code, scan.opc)

150
pytest/test_fstring.py Normal file
View File

@@ -0,0 +1,150 @@
# std
import os
# test
import pytest
import hypothesis
from hypothesis import strategies as st
# uncompyle6
from uncompyle6 import PYTHON_VERSION, deparse_code
@st.composite
def expressions(draw):
# todo : would be nice to generate expressions using hypothesis however
# this is pretty involved so for now just use a corpus of expressions
# from which to select.
return draw(st.sampled_from((
'abc',
'len(items)',
'x + 1',
'lineno',
'container',
'self.attribute',
'self.method()',
# These expressions are failing, I think these are control
# flow problems rather than problems with FORMAT_VALUE,
# however I need to confirm this...
#'sorted(items, key=lambda x: x.name)',
#'func(*args, **kwargs)',
#'text or default',
#'43 if life_the_universe and everything else None'
)))
@st.composite
def format_specifiers(draw):
"""
Generate a valid format specifier using the rules:
format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]
fill ::= <any character>
align ::= "<" | ">" | "=" | "^"
sign ::= "+" | "-" | " "
width ::= integer
precision ::= integer
type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
See https://docs.python.org/2/library/string.html
:param draw: Let hypothesis draw from other strategies.
:return: An example format_specifier.
"""
alphabet_strategy = st.characters(min_codepoint=ord('a'), max_codepoint=ord('z'))
fill = draw(st.one_of(alphabet_strategy, st.none()))
align = draw(st.sampled_from(list('<>=^')))
fill_align = (fill + align or '') if fill else ''
type_ = draw(st.sampled_from('bcdeEfFgGnosxX%'))
can_have_sign = type_ in 'deEfFgGnoxX%'
can_have_comma = type_ in 'deEfFgG%'
can_have_precision = type_ in 'fFgG'
can_have_pound = type_ in 'boxX%'
can_have_zero = type_ in 'oxX'
sign = draw(st.sampled_from(list('+- ') + [''])) if can_have_sign else ''
pound = draw(st.sampled_from(('#', '',))) if can_have_pound else ''
zero = draw(st.sampled_from(('0', '',))) if can_have_zero else ''
int_strategy = st.integers(min_value=1, max_value=1000)
width = draw(st.one_of(int_strategy, st.none()))
width = str(width) if width is not None else ''
comma = draw(st.sampled_from((',', '',))) if can_have_comma else ''
if can_have_precision:
precision = draw(st.one_of(int_strategy, st.none()))
precision = '.' + str(precision) if precision else ''
else:
precision = ''
return ''.join((fill_align, sign, pound, zero, width, comma, precision, type_,))
@st.composite
def fstrings(draw):
"""
Generate a valid f-string.
See https://www.python.org/dev/peps/pep-0498/#specification
:param draw: Let hypothsis draw from other strategies.
:return: A valid f-string.
"""
character_strategy = st.characters(
blacklist_characters='\r\n\'\\s{}',
min_codepoint=1,
max_codepoint=1000,
)
is_raw = draw(st.booleans())
integer_strategy = st.integers(min_value=0, max_value=3)
expression_count = draw(integer_strategy)
content = []
for _ in range(expression_count):
expression = draw(expressions())
conversion = draw(st.sampled_from(('', '!s', '!r', '!a',)))
has_specifier = draw(st.booleans())
specifier = ':' + draw(format_specifiers()) if has_specifier else ''
content.append('{{{}{}}}'.format(expression, conversion, specifier))
content.append(draw(st.text(character_strategy)))
content = ''.join(content)
return "f{}'{}'".format('r' if is_raw else '', content)
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
@hypothesis.given(format_specifiers())
def test_format_specifiers(format_specifier):
"""Verify that format_specifiers generates valid specifiers"""
try:
exec('"{:' + format_specifier + '}".format(0)')
except ValueError as e:
if 'Unknown format code' not in str(e):
raise
def run_test(text):
hypothesis.assume(len(text))
hypothesis.assume("f'{" in text)
expr = text + '\n'
code = compile(expr, '<string>', 'single')
deparsed = deparse_code(PYTHON_VERSION, code, compile_mode='single')
recompiled = compile(deparsed.text, '<string>', 'single')
if recompiled != code:
assert 'dis(' + deparsed.text.strip('\n') + ')' == 'dis(' + expr.strip('\n') + ')'
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
@hypothesis.given(fstrings())
def test_uncompyle_fstring(fstring):
"""Verify uncompyling fstring bytecode"""
run_test(fstring)
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
@pytest.mark.parametrize('fstring', [
"f'{abc}{abc!s}'",
"f'{abc}0'",
])
def test_uncompyle_direct(fstring):
"""useful for debugging"""
run_test(fstring)

View File

@@ -0,0 +1,175 @@
# std
import string
# 3rd party
from hypothesis import given, assume, example, settings, strategies as st
import pytest
# uncompyle
from validate import validate_uncompyle
from test_fstring import expressions
alpha = st.sampled_from(string.ascii_lowercase)
numbers = st.sampled_from(string.digits)
alphanum = st.sampled_from(string.ascii_lowercase + string.digits)
@st.composite
def function_calls(draw,
min_keyword_args=0, max_keyword_args=5,
min_positional_args=0, max_positional_args=5,
min_star_args=0, max_star_args=1,
min_double_star_args=0, max_double_star_args=1):
"""
Strategy factory for generating function calls.
:param draw: Callable which draws examples from other strategies.
:return: The function call text.
"""
st_positional_args = st.lists(
alpha,
min_size=min_positional_args,
max_size=max_positional_args
)
st_keyword_args = st.lists(
alpha,
min_size=min_keyword_args,
max_size=max_keyword_args
)
st_star_args = st.lists(
alpha,
min_size=min_star_args,
max_size=max_star_args
)
st_double_star_args = st.lists(
alpha,
min_size=min_double_star_args,
max_size=max_double_star_args
)
positional_args = draw(st_positional_args)
keyword_args = draw(st_keyword_args)
st_values = st.lists(
expressions(),
min_size=len(keyword_args),
max_size=len(keyword_args)
)
keyword_args = [
x + '=' + e
for x, e in
zip(keyword_args, draw(st_values))
]
star_args = ['*' + x for x in draw(st_star_args)]
double_star_args = ['**' + x for x in draw(st_double_star_args)]
arguments = positional_args + keyword_args + star_args + double_star_args
draw(st.randoms()).shuffle(arguments)
arguments = ','.join(arguments)
function_call = 'fn({arguments})'.format(arguments=arguments)
try:
# TODO: Figure out the exact rules for ordering of positional, keyword,
# star args, double star args and in which versions the various
# types of arguments are supported so we don't need to check that the
# expression compiles like this.
compile(function_call, '<string>', 'single')
except:
assume(False)
return function_call
def test_function_no_args():
validate_uncompyle("fn()")
def isolated_function_calls(which):
"""
Returns a strategy for generating function calls, but isolated to
particular types of arguments, for example only positional arguments.
This can help reason about debugging errors in specific types of function
calls.
:param which: One of 'keyword', 'positional', 'star', 'double_star'
:return: Strategy for generating an function call isolated to specific
argument types.
"""
kwargs = dict(
max_keyword_args=0,
max_positional_args=0,
max_star_args=0,
max_double_star_args=0,
)
kwargs['_'.join(('min', which, 'args'))] = 1
kwargs['_'.join(('max', which, 'args'))] = 5 if 'star' not in which else 1
return function_calls(**kwargs)
with settings(max_examples=25):
@given(isolated_function_calls('positional'))
@example("fn(0)")
def test_function_positional_only(expr):
validate_uncompyle(expr)
@given(isolated_function_calls('keyword'))
@example("fn(a=0)")
def test_function_call_keyword_only(expr):
validate_uncompyle(expr)
@given(isolated_function_calls('star'))
@example("fn(*items)")
def test_function_call_star_only(expr):
validate_uncompyle(expr)
@given(isolated_function_calls('double_star'))
@example("fn(**{})")
def test_function_call_double_star_only(expr):
validate_uncompyle(expr)
@pytest.mark.xfail()
def test_BUILD_CONST_KEY_MAP_BUILD_MAP_UNPACK_WITH_CALL_BUILD_TUPLE_CALL_FUNCTION_EX():
validate_uncompyle("fn(w=0,m=0,**v)")
@pytest.mark.xfail()
def test_BUILD_MAP_BUILD_MAP_UNPACK_WITH_CALL_BUILD_TUPLE_CALL_FUNCTION_EX():
validate_uncompyle("fn(a=0,**g)")
@pytest.mark.xfail()
def test_CALL_FUNCTION_EX():
validate_uncompyle("fn(*g,**j)")
@pytest.mark.xfail()
def test_BUILD_MAP_CALL_FUNCTION_EX():
validate_uncompyle("fn(*z,u=0)")
@pytest.mark.xfail()
def test_BUILD_TUPLE_CALL_FUNCTION_EX():
validate_uncompyle("fn(**a)")
@pytest.mark.xfail()
def test_BUILD_MAP_BUILD_TUPLE_BUILD_TUPLE_UNPACK_WITH_CALL_CALL_FUNCTION_EX():
validate_uncompyle("fn(b,b,b=0,*a)")
@pytest.mark.xfail()
def test_BUILD_TUPLE_BUILD_TUPLE_UNPACK_WITH_CALL_CALL_FUNCTION_EX():
validate_uncompyle("fn(*c,v)")
@pytest.mark.xfail()
def test_BUILD_CONST_KEY_MAP_CALL_FUNCTION_EX():
validate_uncompyle("fn(i=0,y=0,*p)")
@pytest.mark.skip(reason='skipping property based test until all individual tests are passing')
@given(function_calls())
def test_function_call(function_call):
validate_uncompyle(function_call)

View File

@@ -66,9 +66,9 @@ def test_grammar():
expect_dup_rhs = frozenset([('COME_FROM',), ('CONTINUE',), ('JUMP_ABSOLUTE',),
('LOAD_CONST',),
('JUMP_BACK',), ('JUMP_FORWARD',)])
# reduced_dup_rhs = {k: dup_rhs[k] for k in dup_rhs if k not in expect_dup_rhs}
# for k in reduced_dup_rhs:
# print(k, reduced_dup_rhs[k])
reduced_dup_rhs = {k: dup_rhs[k] for k in dup_rhs if k not in expect_dup_rhs}
for k in reduced_dup_rhs:
print(k, reduced_dup_rhs[k])
# assert not reduced_dup_rhs, reduced_dup_rhs
s = get_scanner(PYTHON_VERSION, IS_PYPY)

View File

@@ -8,8 +8,12 @@ from uncompyle6.semantics.consts import (
if PYTHON3:
from io import StringIO
def iteritems(d):
return d.items()
else:
from StringIO import StringIO
def iteritems(d):
return d.iteritems()
from uncompyle6.semantics.pysource import SourceWalker as SourceWalker
@@ -26,7 +30,7 @@ def test_template_engine():
# FIXME: and so on...
from uncompyle6.semantics.consts import (
TABLE_R, TABLE_DIRECT,
TABLE_DIRECT, TABLE_R,
)
from uncompyle6.semantics.fragments import (
@@ -40,7 +44,7 @@ def test_tables():
(TABLE_DIRECT, 'TABLE_DIRECT', False),
(TABLE_R, 'TABLE_R', False),
(TABLE_DIRECT_FRAGMENT, 'TABLE_DIRECT_FRAGMENT', True)):
for k, entry in t.iteritems():
for k, entry in iteritems(t):
if k in skip_for_now:
continue
fmt = entry[0]

View File

@@ -1,19 +1,19 @@
from uncompyle6 import PYTHON_VERSION, deparse_code
import pytest
from uncompyle6 import PYTHON_VERSION, PYTHON3, deparse_code
if PYTHON_VERSION >= 2.5:
def test_single_mode():
single_expressions = (
'i = 1',
'i and (j or k)',
'i += 1',
'i = j % 4',
'i = {}',
'i = []',
'for i in range(10):\n i\n',
'for i in range(10):\n for j in range(10):\n i + j\n',
'try:\n i\nexcept Exception:\n j\nelse:\n k\n'
)
def test_single_mode():
single_expressions = (
'i = 1',
'i and (j or k)',
'i += 1',
'i = j % 4',
'i = {}',
'i = []',
'for i in range(10):\n i\n',
'for i in range(10):\n for j in range(10):\n i + j\n',
'try:\n i\nexcept Exception:\n j\nelse:\n k\n'
)
for expr in single_expressions:
code = compile(expr + '\n', '<string>', 'single')
assert deparse_code(PYTHON_VERSION, code, compile_mode='single').text == expr + '\n'
for expr in single_expressions:
code = compile(expr + '\n', '<string>', 'single')
assert deparse_code(PYTHON_VERSION, code, compile_mode='single').text == expr + '\n'

View File

@@ -9,4 +9,4 @@
12 JUMP_FORWARD 0 'to 15'
15_0 COME_FROM 12 '12'
15 LOAD_CONST 0 ''
18 RETURN_VALUE
18 RETURN_VALUE

View File

@@ -12,4 +12,4 @@
18 STORE_NAME 2 'd'
21_0 COME_FROM 12 '12'
21 LOAD_CONST 2 ''
24 RETURN_VALUE
24 RETURN_VALUE

View File

@@ -1,25 +1,24 @@
# future
from __future__ import print_function
# std
import os
import difflib
import subprocess
import tempfile
from StringIO import StringIO
import functools
# compatability
import six
# uncompyle6 / xdis
from uncompyle6 import PYTHON_VERSION, IS_PYPY, deparse_code
# TODO : I think we can get xdis to support the dis api (python 3 version) by doing something like this there
from xdis.bytecode import Bytecode
from xdis.main import get_opcode
opc = get_opcode(PYTHON_VERSION, IS_PYPY)
Bytecode = functools.partial(Bytecode, opc=opc)
try:
import functools
Bytecode = functools.partial(Bytecode, opc=opc)
def _dis_to_text(co):
return Bytecode(co).dis()
except:
pass
def _dis_to_text(co):
return Bytecode(co).dis()
def print_diff(original, uncompyled):
@@ -43,11 +42,8 @@ def print_diff(original, uncompyled):
print('\nTo display diff highlighting run:\n pip install BeautifulSoup4')
diff = difflib.HtmlDiff().make_table(*args)
f = tempfile.NamedTemporaryFile(delete=False)
try:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(str(diff).encode('utf-8'))
finally:
f.close()
try:
print()
@@ -64,7 +60,8 @@ def print_diff(original, uncompyled):
print('\nFor side by side diff install elinks')
diff = difflib.Differ().compare(original_lines, uncompyled_lines)
print('\n'.join(diff))
os.unlink(f.name)
finally:
os.unlink(f.name)
def are_instructions_equal(i1, i2):
@@ -126,9 +123,8 @@ def validate_uncompyle(text, mode='exec'):
original_text = text
deparsed = deparse_code(PYTHON_VERSION, original_code,
compile_mode=mode,
out=StringIO(),
out=six.StringIO(),
is_pypy=IS_PYPY)
uncompyled_text = deparsed.text
uncompyled_code = compile(uncompyled_text, '<string>', 'exec')

View File

@@ -1,16 +1,16 @@
#!/usr/bin/env python
"""Setup script for the 'uncompyle6' distribution."""
import sys
"""Setup script for the 'uncompyle6' distribution."""
SYS_VERSION = sys.version_info[0:2]
if not ((2, 4) <= SYS_VERSION <= (2, 7)):
mess = "Python Release 2.4 .. 2.7 are supported in this code branch."
if ((3, 2) <= SYS_VERSION <= (3, 7)):
mess += ("\nFor your Python, version %s, use the master code/branch." %
if not ((2, 6) <= SYS_VERSION <= (3, 7)) or ((3, 0) <= SYS_VERSION <= (3, 1)):
mess = "Python Release 2.6 .. 3.7 excluding 3.0 and 3.1 are supported in this code branch."
if ((2, 4) <= SYS_VERSION <= (2, 7)):
mess += ("\nFor your Python, version %s, use the python-2.4 code/branch." %
sys.version[0:3])
else:
mess += ("\nThis package is not supported before Python 2.4. Your Python version is %s."
elif SYS_VERSION < (2, 4) or ((3, 0) <= SYS_VERSION <= (3, 1)):
mess += ("\nThis package is not supported for Python version %s."
% sys.version[0:3])
print(mess)
raise Exception(mess)

View File

@@ -1,55 +0,0 @@
import re
import unittest
from uncompyle6 import PYTHON_VERSION, IS_PYPY # , PYTHON_VERSION
from uncompyle6.parser import get_python_parser, python_parser
class TestGrammar(unittest.TestCase):
def test_grammar(self):
def check_tokens(tokens, opcode_set):
remain_tokens = set(tokens) - opcode_set
remain_tokens = set([re.sub('_\d+$','', t) for t in remain_tokens])
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
remain_tokens = set(remain_tokens) - opcode_set
self.assertEqual(remain_tokens, set([]),
"Remaining tokens %s\n====\n%s" % (remain_tokens, p.dump_grammar()))
p = get_python_parser(PYTHON_VERSION, is_pypy=IS_PYPY)
(lhs, rhs, tokens,
right_recursive, dup_rhs) = p.check_sets()
expect_lhs = set(['expr1024', 'pos_arg'])
unused_rhs = set(['list', 'call', 'mkfunc',
'mklambda',
'unpack',])
expect_right_recursive = frozenset([('designList',
('store', 'DUP_TOP', 'designList'))])
expect_lhs.add('kwarg')
self.assertEqual(expect_lhs, set(lhs))
self.assertEqual(unused_rhs, set(rhs))
self.assertEqual(expect_right_recursive, right_recursive)
expect_dup_rhs = frozenset([('COME_FROM',), ('CONTINUE',), ('JUMP_ABSOLUTE',),
('LOAD_CONST',),
('JUMP_BACK',), ('JUMP_FORWARD',)])
reduced_dup_rhs = {}
for k in dup_rhs:
if k not in expect_dup_rhs:
reduced_dup_rhs[k] = dup_rhs[k]
pass
pass
for k in reduced_dup_rhs:
print(k, reduced_dup_rhs[k])
# assert not reduced_dup_rhs, reduced_dup_rhs
def test_dup_rule(self):
import inspect
python_parser(PYTHON_VERSION, inspect.currentframe().f_code,
is_pypy=IS_PYPY,
parser_debug={
'dups': True, 'transition': False, 'reduce': False,
'rules': False, 'errorstack': None, 'context': True})
if __name__ == '__main__':
unittest.main()

View File

@@ -28,7 +28,7 @@ check:
$(MAKE) check-$(PYTHON_VERSION)
#: Run working tests from Python 2.6 or 2.7
check-2.4 check-2.5 check-2.6 check-2.7: check-bytecode-2 check-bytecode-3 check-bytecode-1 check-native-short
check-2.6 check-2.7: check-bytecode-2 check-bytecode-3 check-bytecode-1 check-native-short
#: Run working tests from Python 3.0
check-3.0: check-bytecode
@@ -113,12 +113,6 @@ check-bytecode-2.4:
check-bytecode-2.5:
$(PYTHON) test_pythonlib.py --bytecode-2.5
#: Get grammar coverage for Python 2.4
grammar-coverage-2.4:
-rm $(COVER_DIR)/spark-grammar-24.cover
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-24.cover $(PYTHON) test_pythonlib.py --bytecode-2.4
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-24.cover $(PYTHON) test_pyenvlib.py --2.4.6
#: Get grammar coverage for Python 2.5
grammar-coverage-2.5:
-rm $(COVER_DIR)/spark-grammar-25.cover
@@ -221,10 +215,6 @@ check-native-short:
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION) --weak-verify $(COMPILE)
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION)-run --verify-run $(COMPILE)
#: Run longer Python 2.6's lib files known to be okay
check-2.4-ok:
$(PYTHON) test_pythonlib.py --ok-2.4 --verify $(COMPILE)
#: Run longer Python 2.6's lib files known to be okay
check-2.6-ok:
$(PYTHON) test_pythonlib.py --ok-2.6 --weak-verify $(COMPILE)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

0
test/bytecode_3.6_run/.gitignore vendored Normal file
View File

View File

@@ -1,8 +1,9 @@
#!/usr/bin/env python
# Mode: -*- python -*-
#
# Copyright (c) 2015, 2017 by Rocky Bernstein <rb@dustyfeet.com>
# Copyright (c) 2015 by Rocky Bernstein <rb@dustyfeet.com>
#
from __future__ import print_function
import dis, os.path

View File

@@ -1,5 +1,7 @@
#!/usr/bin/env python
from __future__ import print_function
from uncompyle6 import uncompyle
import sys, inspect

View File

@@ -1,9 +1,16 @@
# Bug in Python 3.5 is getting the two star'd arguments right.
def sum(a,b,c,d):
# Bug in 3.5 is detecting when there is more than
# one * in a call. There is a "BUILD_UNPACK_LIST" instruction used
# to unpack star arguments
def sum(a, b, c, d):
return a + b + c + d
args=(1,2)
args, a, b, c = (1, 2), 1, 2, 3
sum(*args, *args)
sum(*args, *args, *args)
sum(a, *args, *args)
sum(a, b, *args)
sum(a, b, *args, *args)
# FIXME: this is handled incorrectly
# (*c,) = (3,4)

View File

@@ -19,7 +19,8 @@ Step 2: Run the test:
test_pyenvlib --mylib --verify # decompile verify 'mylib'
"""
from uncompyle6 import main, PYTHON3
from __future__ import print_function
import os, time, re, shutil, sys
from fnmatch import fnmatch

View File

@@ -27,6 +27,8 @@ Step 2: Run the test:
test_pythonlib.py --mylib --verify # decompile verify 'mylib'
"""
from __future__ import print_function
import getopt, os, py_compile, sys, shutil, tempfile, time
from uncompyle6 import PYTHON_VERSION
@@ -128,10 +130,8 @@ def do_tests(src_dir, obj_patterns, target_dir, opts):
if opts['do_compile']:
compiled_version = opts['compiled_version']
if compiled_version and PYTHON_VERSION != compiled_version:
sys.stderr.write("Not compiling: "
"desired Python version is %s "
"but we are running %s" %
(compiled_version, PYTHON_VERSION))
print("Not compiling: desired Python version is %s but we are running %s" %
(compiled_version, PYTHON_VERSION), file=sys.stderr)
else:
for root, dirs, basenames in os.walk(src_dir):
file_matches(files, root, basenames, PY)
@@ -149,8 +149,8 @@ def do_tests(src_dir, obj_patterns, target_dir, opts):
file_matches(files, dirname, basenames, obj_patterns)
if not files:
sys.stderr.write("Didn't come up with any files to test! "
"Try with --compile?")
print("Didn't come up with any files to test! Try with --compile?",
file=sys.stderr)
exit(1)
os.chdir(cwd)
@@ -164,9 +164,9 @@ def do_tests(src_dir, obj_patterns, target_dir, opts):
except ValueError:
pass
print time.ctime()
print 'Source directory: ', src_dir
print 'Output directory: ', target_dir
print(time.ctime())
print('Source directory: ', src_dir)
print('Output directory: ', target_dir)
try:
_, _, failed_files, failed_verify = \
main(src_dir, target_dir, files, [],
@@ -242,13 +242,14 @@ if __name__ == '__main__':
if os.path.isdir(src_dir):
checked_dirs.append([src_dir, pattern, target_dir])
else:
sys.stderr.write("Can't find directory %s. Skipping" % src_dir)
print("Can't find directory %s. Skipping" % src_dir,
file=sys.stderr)
continue
last_compile_version = compiled_version
pass
if not checked_dirs:
sys.stderr.write("No directories found to check\n")
print("No directories found to check", file=sys.stderr)
sys.exit(1)
test_opts['compiled_version'] = last_compile_version

View File

@@ -1,8 +1,9 @@
#!/usr/bin/env python
# Mode: -*- python -*-
#
# Copyright (c) 2015-2016 by Rocky Bernstein <rb@dustyfeet.com>
# Copyright (c) 2015-2016, 2018 by Rocky Bernstein <rb@dustyfeet.com>
#
from __future__ import print_function
import sys, os, getopt
from uncompyle6.disas import disassemble_file
@@ -12,20 +13,27 @@ program, ext = os.path.splitext(os.path.basename(__file__))
__doc__ = """
Usage:
%s [OPTIONS]... FILE
%s [--help | -h | -V | --version]
{0} [OPTIONS]... FILE
{0} [--help | -h | -V | --version]
Disassemble FILE with the instruction mangling that is done to
assist uncompyle6 in parsing the instruction stream. For example
instructions with variable-length arguments like CALL_FUNCTION and
BUILD_LIST have arguement counts appended to the instruction name, and
COME_FROM instructions are inserted into the instruction stream.
Examples:
{0} foo.pyc
{0} foo.py # same thing as above but find the file
{0} foo.pyc bar.pyc # disassemble foo.pyc and bar.pyc
See also `pydisasm' from the `xdis' package.
Options:
-U | --uncompyle6 show instructions with uncompyle6 mangling
-V | --version show version and stop
-h | --help show this message
""" % (program, program)
""".format(program)
PATTERNS = ('*.pyc', '*.pyo')
@@ -33,18 +41,16 @@ def main():
Usage_short = """usage: %s FILE...
Type -h for for full help.""" % program
native = True
if len(sys.argv) == 1:
sys.stderr.write("No file(s) given\n")
sys.stderr.write(Usage_short)
print("No file(s) given", file=sys.stderr)
print(Usage_short, file=sys.stderr)
sys.exit(1)
try:
opts, files = getopt.getopt(sys.argv[1:], 'hVU',
['help', 'version', 'uncompyle6'])
except getopt.GetoptError(e):
sys.stderr.write('%s: %s' % (os.path.basename(sys.argv[0]), e))
except getopt.GetoptError as e:
print('%s: %s' % (os.path.basename(sys.argv[0]), e), file=sys.stderr)
sys.exit(-1)
for opt, val in opts:
@@ -54,18 +60,17 @@ Type -h for for full help.""" % program
elif opt in ('-V', '--version'):
print("%s %s" % (program, VERSION))
sys.exit(0)
elif opt in ('-U', '--uncompyle6'):
native = False
else:
print(opt)
sys.stderr.write(Usage_short)
print(Usage_short, file=sys.stderr)
sys.exit(1)
for file in files:
if os.path.exists(files[0]):
disassemble_file(file, sys.stdout, native)
disassemble_file(file, sys.stdout)
else:
sys.stderr.write("Can't read %s - skipping\n" % files[0])
print("Can't read %s - skipping" % files[0],
file=sys.stderr)
pass
pass
return

View File

@@ -4,6 +4,7 @@
# Copyright (c) 2015-2017 by Rocky Bernstein
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
#
from __future__ import print_function
import sys, os, getopt, time
program = 'uncompyle6'
@@ -67,11 +68,11 @@ def usage():
def main_bin():
if not (sys.version_info[0:2] in ((2, 4), (2, 5), (2, 6), (2, 7),
(3, 2), (3, 3),
if not (sys.version_info[0:2] in ((2, 6), (2, 7),
(3, 1), (3, 2), (3, 3),
(3, 4), (3, 5), (3, 6))):
sys.stderr.write('Error: %s requires Python 2.4 2.5 2.6, 2.7, '
'3.2, 3.3, 3.4, 3.5, or 3.6' % program)
print('Error: %s requires Python 2.6-2.7, or 3.1-3.6' % program,
file=sys.stderr)
sys.exit(-1)
do_verify = recurse_dirs = False
@@ -87,8 +88,8 @@ def main_bin():
'help asm grammar linemaps recurse timestamp tree '
'fragments verify verify-run version weak-verify '
'showgrammar'.split(' '))
except getopt.GetoptError(e):
sys.stderr.write('%s: %s\n' % (os.path.basename(sys.argv[0]), e))
except getopt.GetoptError as e:
print('%s: %s' % (os.path.basename(sys.argv[0]), e), file=sys.stderr)
sys.exit(-1)
options = {}
@@ -128,7 +129,7 @@ def main_bin():
elif opt in ('--recurse', '-r'):
recurse_dirs = True
else:
sys.stderr.write(opt)
print(opt, file=sys.stderr)
usage()
# expand directory if specified
@@ -153,7 +154,7 @@ def main_bin():
files = [f[sb_len:] for f in files]
if not files:
sys.stderr.write("No files given\n")
print("No files given", file=sys.stderr)
usage()
if outfile == '-':

View File

@@ -16,6 +16,8 @@ Second, we need structured instruction information for the
want to run on earlier Python versions.
"""
from __future__ import print_function
import sys
from collections import deque
@@ -34,9 +36,10 @@ def disco(version, co, out=None, is_pypy=False):
# store final output stream for case of error
real_out = out or sys.stdout
real_out.write('# Python %s\n' % version)
print('# Python %s' % version, file=real_out)
if co.co_filename:
real_out.write('# Embedded file name: %s\n' % co.co_filename)
print('# Embedded file name: %s' % co.co_filename,
file=real_out)
scanner = get_scanner(version, is_pypy=is_pypy)
@@ -48,15 +51,16 @@ def disco_loop(disasm, queue, real_out):
while len(queue) > 0:
co = queue.popleft()
if co.co_name != '<module>':
real_out.write('\n# %s line %d of %s\n' %
(co.co_name, co.co_firstlineno, co.co_filename))
print('\n# %s line %d of %s' %
(co.co_name, co.co_firstlineno, co.co_filename),
file=real_out)
tokens, customize = disasm(co)
for t in tokens:
if iscode(t.pattr):
queue.append(t.pattr)
elif iscode(t.attr):
queue.append(t.attr)
real_out.write(t)
print(t, file=real_out)
pass
pass

View File

@@ -10,7 +10,7 @@ def line_number_mapping(pyc_filename, src_filename):
source_size) = load_module(pyc_filename)
try:
code2 = load_file(src_filename)
except SyntaxError, e:
except SyntaxError as e:
return str(e)
queue = deque([code1, code2])

View File

@@ -1,3 +1,4 @@
from __future__ import print_function
import datetime, os, subprocess, sys, tempfile
from uncompyle6 import verify, IS_PYPY, PYTHON_VERSION
@@ -44,35 +45,20 @@ def decompile(
assert iscode(co)
if is_pypy:
co_pypy_str = 'PyPy '
else:
co_pypy_str = ''
if IS_PYPY:
run_pypy_str = 'PyPy '
else:
run_pypy_str = ''
if magic_int:
m = str(magic_int)
else:
m = ""
co_pypy_str = 'PyPy ' if is_pypy else ''
run_pypy_str = 'PyPy ' if IS_PYPY else ''
sys_version_lines = sys.version.split('\n')
write('# uncompyle6 version %s\n'
'# %sPython bytecode %s%s\n# Decompiled from: %sPython %s' %
(VERSION, co_pypy_str, bytecode_version,
" (%s)" % m, run_pypy_str,
'\n# '.join(sys_version_lines)))
" (%s)" % str(magic_int) if magic_int else "",
run_pypy_str, '\n# '.join(sys_version_lines)))
if co.co_filename:
write('# Embedded file name: %s' % co.co_filename,)
if timestamp:
write('# Compiled at: %s' %
datetime.datetime.fromtimestamp(timestamp))
write('# Compiled at: %s' % datetime.datetime.fromtimestamp(timestamp))
if source_size:
real_out.write('# Size of source mod 2**32: %d bytes\n' %
source_size)
write('# Size of source mod 2**32: %d bytes' % source_size)
try:
if mapstream:
@@ -99,7 +85,7 @@ def decompile(
is_pypy=is_pypy)
pass
return deparsed
except pysource.SourceWalkerError, e:
except pysource.SourceWalkerError as e:
# deparsing failed
raise pysource.SourceWalkerError(str(e))
@@ -178,10 +164,17 @@ def main(in_base, out_base, files, codes, outfile=None,
prefix = prefix[:-len('.py')]
# Unbuffer output if possible
if sys.stdout.isatty():
buffering = -1
buffering = -1 if sys.stdout.isatty() else 0
if PYTHON_VERSION >= 3.5:
t = tempfile.NamedTemporaryFile(mode='w+b',
buffering=buffering,
suffix='.py',
prefix=prefix)
else:
buffering = 0
t = tempfile.NamedTemporaryFile(mode='w+b',
suffix='.py',
prefix=prefix)
current_outfile = t.name
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', buffering)
tee = subprocess.Popen(["tee", current_outfile],
stdin=subprocess.PIPE)
@@ -220,9 +213,9 @@ def main(in_base, out_base, files, codes, outfile=None,
pass
pass
tot_files += 1
except (ValueError, SyntaxError, ParserError, pysource.SourceWalkerError):
except (ValueError, SyntaxError, ParserError, pysource.SourceWalkerError) as e:
sys.stdout.write("\n")
sys.stderr.write("# file %s\n" % (infile))
sys.stderr.write("\n# file %s\n# %s\n" % (infile, e))
failed_files += 1
except KeyboardInterrupt:
if outfile:
@@ -250,7 +243,7 @@ def main(in_base, out_base, files, codes, outfile=None,
do_verify)
if not current_outfile:
if not msg:
print '\n# okay decompiling %s' % infile
print('\n# okay decompiling %s' % infile)
okay_files += 1
else:
verify_failed_files += 1
@@ -259,16 +252,13 @@ def main(in_base, out_base, files, codes, outfile=None,
else:
okay_files += 1
pass
except verify.VerifyCmpError, e:
except verify.VerifyCmpError as e:
print(e)
verify_failed_files += 1
os.rename(current_outfile, current_outfile + '_unverified')
sys.stderr.write("### Error Verifying %s\n" % filename)
sys.stderr.write(str(e) + "\n")
if not outfile:
sys.stderr.write("### Error Verifiying %s" %
filename)
sys.stderr.write(e)
if raise_on_error:
raise
pass
@@ -278,15 +268,14 @@ def main(in_base, out_base, files, codes, outfile=None,
okay_files += 1
pass
elif do_verify:
sys.stderr.write("\n### uncompile successful, "
"but no file to compare against")
sys.stderr.write("\n### uncompile successful, but no file to compare against\n")
pass
else:
okay_files += 1
if not current_outfile:
mess = '\n# okay decompiling'
# mem_usage = __memUsage()
print mess, infile
print(mess, infile)
if current_outfile:
sys.stdout.write("%s\r" %
status_msg(do_verify, tot_files, okay_files, failed_files,

View File

@@ -6,6 +6,8 @@
Common uncompyle6 parser routines.
"""
from __future__ import print_function
import sys
from xdis.code import iscode
@@ -166,7 +168,7 @@ class PythonParser(GenericASTBuilder):
indent = ' '
else:
indent = '-> '
print "%s%s" % (indent, instructions[i])
print("%s%s" % (indent, instructions[i]))
raise ParserError(err_token, err_token.offset)
else:
raise ParserError(None, -1)
@@ -766,4 +768,4 @@ if __name__ == '__main__':
ast = python_parser(PYTHON_VERSION, co, showasm=True, is_pypy=IS_PYPY)
print(ast)
return
# parse_test(parse_test.__code__)
parse_test(parse_test.__code__)

View File

@@ -12,6 +12,8 @@ If we succeed in creating a parse tree, then we have a Python program
that a later phase can turn into a sequence of ASCII text.
"""
from __future__ import print_function
from uncompyle6.parser import PythonParser, PythonParserSingle, nop_func
from uncompyle6.parsers.astnode import AST
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG

View File

@@ -69,7 +69,6 @@ class Python24Parser(Python25Parser):
super(Python24Parser, self).customize_grammar_rules(tokens, customize)
if self.version == 2.4:
self.check_reduce['nop_stmt'] = 'tokens'
self.check_reduce['try_except'] = 'tokens'
def reduce_is_invalid(self, rule, ast, tokens, first, last):
invalid = super(Python24Parser,

View File

@@ -19,9 +19,6 @@ class Python25Parser(Python26Parser):
return_if_stmt ::= ret_expr RETURN_END_IF JUMP_BACK
# We have no jumps to jumps, so no "come_froms" but a single "COME_FROM"
ifelsestmt ::= testexpr c_stmts_opt jf_cf_pop else_suite COME_FROM
# Python 2.6 uses ROT_TWO instead of the STORE_xxx
# withas is allowed as a "from future" in 2.5
# 2.6 and 2.7 do something slightly different
@@ -39,8 +36,6 @@ class Python25Parser(Python26Parser):
# loop. FIXME: should "come_froms" below be a single COME_FROM?
tryelsestmt ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler else_suite come_froms
tryelsestmtl ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler else_suitel
# Python 2.6 omits the LOAD_FAST DELETE_FAST below
# withas is allowed as a "from future" in 2.5
@@ -59,9 +54,6 @@ class Python25Parser(Python26Parser):
def customize_grammar_rules(self, tokens, customize):
# Remove grammar rules inherited from Python 2.6 or Python 2
self.remove_rules("""
# No jump to jumps in 2.4 so we have a single "COME_FROM", not "come_froms"
ifelsestmt ::= testexpr c_stmts_opt jf_cf_pop else_suite come_froms
setupwith ::= DUP_TOP LOAD_ATTR ROT_TWO LOAD_ATTR CALL_FUNCTION_0 POP_TOP
withstmt ::= expr setupwith SETUP_FINALLY suite_stmts_opt
POP_BLOCK LOAD_CONST COME_FROM WITH_CLEANUP END_FINALLY

View File

@@ -130,6 +130,11 @@ class Python27Parser(Python2Parser):
whileelsestmt ::= SETUP_LOOP testexpr l_stmts_opt JUMP_BACK POP_BLOCK
else_suitel COME_FROM
return_stmts ::= _stmts return_stmt
return_stmts ::= return_stmt
return_stmt ::= return
ifstmt ::= testexpr return_stmts COME_FROM
ifstmt ::= testexpr return_if_stmts COME_FROM
ifelsestmt ::= testexpr c_stmts_opt JUMP_FORWARD else_suite COME_FROM
ifelsestmtc ::= testexpr c_stmts_opt JUMP_ABSOLUTE else_suitec
@@ -197,5 +202,4 @@ if __name__ == '__main__':
for t in remain_tokens])
remain_tokens = set(remain_tokens) - opcode_set
print(remain_tokens)
p.check_grammar()
p.dump_grammar()
# p.dump_grammar()

View File

@@ -19,7 +19,6 @@ from uncompyle6.parser import PythonParser, PythonParserSingle, nop_func
from uncompyle6.parsers.astnode import AST
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
from xdis import PYTHON3
from itertools import islice,chain,repeat
class Python3Parser(PythonParser):
@@ -484,33 +483,15 @@ class Python3Parser(PythonParser):
token.kind = self.call_fn_name(token)
uniq_param = args_kw + args_pos
if self.version == 3.5 and opname.startswith('CALL_FUNCTION_VAR'):
# Python 3.5 changes the stack position of *args. KW args come
# after *args.
# Python 3.6+ replaces CALL_FUNCTION_VAR_KW with CALL_FUNCTION_EX
if opname.endswith('KW'):
kw = 'expr '
else:
kw = ''
rule = ('call ::= expr expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) + kw + token.kind)
self.add_unique_rule(rule, token.kind, uniq_param, customize)
# Note: 3.5+ have subclassed this method; so we don't handle
# 'CALL_FUNCTION_VAR' or 'CALL_FUNCTION_EX' here.
rule = ('call ::= expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) +
'expr ' * nak + token.kind)
self.add_unique_rule(rule, token.kind, uniq_param, customize)
if self.version >= 3.5 and seen_GET_AWAITABLE_YIELD_FROM:
rule = ('async_call ::= expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) +
'expr ' * nak + token.kind +
' GET_AWAITABLE LOAD_CONST YIELD_FROM')
self.add_unique_rule(rule, token.kind, uniq_param, customize)
self.add_unique_rule('expr ::= async_call', token.kind, uniq_param, customize)
'expr ' * nak + token.kind)
self.add_unique_rule(rule, token.kind, uniq_param, customize)
if possible_class_decorator:
if next_token == 'CALL_FUNCTION' and next_token.attr == 1:
@@ -671,6 +652,11 @@ class Python3Parser(PythonParser):
rule = ('build_map_unpack_with_call ::= ' + 'expr1024 ' * int(v//1024) +
'expr32 ' * int((v//32) % 32) +
'expr ' * (v % 32) + opname)
self.addRule(rule, nop_func)
elif opname.startswith('BUILD_TUPLE_UNPACK_WITH_CALL'):
v = token.attr
rule = ('starred ::= %s %s' % ('expr ' * v, opname))
self.addRule(rule, nop_func)
elif opname_base in ('BUILD_LIST', 'BUILD_SET', 'BUILD_TUPLE'):
v = token.attr
@@ -1093,7 +1079,7 @@ class Python3Parser(PythonParser):
if tokens[cfl-1] != 'JUMP_BACK':
cfl_offset = tokens[cfl-1].offset
insn = chain((i for i in self.insts if cfl_offset == i.offset), repeat(None)).next()
insn = next(i for i in self.insts if cfl_offset == i.offset)
if insn and insn.is_jump_target:
return True

View File

@@ -2,6 +2,7 @@
"""
spark grammar differences over Python 3.1 for Python 3.0.
"""
from __future__ import print_function
from uncompyle6.parser import PythonParserSingle
from uncompyle6.parsers.parse31 import Python31Parser

View File

@@ -2,6 +2,7 @@
"""
spark grammar differences over Python 3.2 for Python 3.1.
"""
from __future__ import print_function
from uncompyle6.parser import PythonParserSingle
from uncompyle6.parsers.parse32 import Python32Parser

View File

@@ -2,6 +2,8 @@
"""
spark grammar differences over Python 3 for Python 3.2.
"""
from __future__ import print_function
from uncompyle6.parser import PythonParserSingle
from uncompyle6.parsers.parse3 import Python3Parser

View File

@@ -2,6 +2,7 @@
"""
spark grammar differences over Python 3.2 for Python 3.3.
"""
from __future__ import print_function
from uncompyle6.parser import PythonParserSingle
from uncompyle6.parsers.parse32 import Python32Parser

View File

@@ -2,6 +2,7 @@
"""
spark grammar differences over Python 3.4 for Python 3.5.
"""
from __future__ import print_function
from uncompyle6.parser import PythonParserSingle, nop_func
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
@@ -186,6 +187,52 @@ class Python35Parser(Python34Parser):
pass
return
def custom_classfunc_rule(self, opname, token, customize,
seen_LOAD_BUILD_CLASS,
seen_GET_AWAITABLE_YIELD_FROM,
*args):
args_pos, args_kw = self.get_pos_kw(token)
# Additional exprs for * and ** args:
# 0 if neither
# 1 for CALL_FUNCTION_VAR or CALL_FUNCTION_KW
# 2 for * and ** args (CALL_FUNCTION_VAR_KW).
# Yes, this computation based on instruction name is a little bit hoaky.
nak = ( len(opname)-len('CALL_FUNCTION') ) // 3
uniq_param = args_kw + args_pos
if seen_GET_AWAITABLE_YIELD_FROM:
rule = ('async_call ::= expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) +
'expr ' * nak + token.kind +
' GET_AWAITABLE LOAD_CONST YIELD_FROM')
self.add_unique_rule(rule, token.kind, uniq_param, customize)
self.add_unique_rule('expr ::= async_call', token.kind, uniq_param, customize)
uniq_param = args_kw + args_pos
if opname.startswith('CALL_FUNCTION_VAR'):
# Python 3.5 changes the stack position of *args. KW args come
# after *args.
# Note: Python 3.6+ replaces CALL_FUNCTION_VAR and
# CALL_FUNCTION_VAR_KW with CALL_FUNCTION_EX
token.kind = self.call_fn_name(token)
if opname.endswith('KW'):
kw = 'expr '
else:
kw = ''
rule = ('call ::= expr expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) + kw + token.kind)
self.add_unique_rule(rule, token.kind, uniq_param, customize)
else:
super(Python35Parser, self).custom_classfunc_rule(opname, token, customize,
seen_LOAD_BUILD_CLASS,
seen_GET_AWAITABLE_YIELD_FROM,
*args)
class Python35ParserSingle(Python35Parser, PythonParserSingle):
pass

View File

@@ -2,6 +2,7 @@
"""
spark grammar differences over Python 3.5 for Python 3.6.
"""
from __future__ import print_function
from uncompyle6.parser import PythonParserSingle, nop_func
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
@@ -131,12 +132,32 @@ class Python36Parser(Python35Parser):
def custom_classfunc_rule(self, opname, token, customize,
possible_class_decorator,
seen_GET_AWAITABLE_YIELD_FROM, next_token):
args_pos, args_kw = self.get_pos_kw(token)
# Additional exprs for * and ** args:
# 0 if neither
# 1 for CALL_FUNCTION_VAR or CALL_FUNCTION_KW
# 2 for * and ** args (CALL_FUNCTION_VAR_KW).
# Yes, this computation based on instruction name is a little bit hoaky.
nak = ( len(opname)-len('CALL_FUNCTION') ) // 3
uniq_param = args_kw + args_pos
if seen_GET_AWAITABLE_YIELD_FROM:
rule = ('async_call ::= expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) +
'expr ' * nak + token.kind +
' GET_AWAITABLE LOAD_CONST YIELD_FROM')
self.add_unique_rule(rule, token.kind, uniq_param, customize)
self.add_unique_rule('expr ::= async_call', token.kind, uniq_param, customize)
if opname.startswith('CALL_FUNCTION_KW'):
self.addRule("expr ::= call_kw", nop_func)
values = 'expr ' * token.attr
rule = 'call_kw ::= expr kwargs_36 %s' % token.kind
rule = 'call_kw ::= expr kwargs_36 {token.kind}'.format(**locals())
self.addRule(rule, nop_func)
rule = 'kwargs_36 ::= %s LOAD_CONST' % values
rule = 'kwargs_36 ::= {values} LOAD_CONST'.format(**locals())
self.add_unique_rule(rule, token.kind, token.attr, customize)
elif opname == 'CALL_FUNCTION_EX_KW':
self.addRule("""expr ::= call_ex_kw

View File

@@ -2,6 +2,7 @@
"""
spark grammar differences over Python 3.6 for Python 3.7
"""
from __future__ import print_function
from uncompyle6.parser import PythonParserSingle
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG

View File

@@ -1,4 +1,4 @@
# Copyright (c) 2016-2018 by Rocky Bernstein
# Copyright (c) 2016, 2018 by Rocky Bernstein
# Copyright (c) 2005 by Dan Pascu <dan@windowmaker.org>
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
# Copyright (c) 1999 John Aycock
@@ -10,6 +10,8 @@ scanner/ingestion module. From here we call various version-specific
scanners, e.g. for Python 2.7 or 3.4.
"""
from __future__ import print_function
import sys
from uncompyle6 import PYTHON3, IS_PYPY

View File

@@ -13,13 +13,13 @@ import uncompyle6.scanners.scanner21 as scan
from xdis.opcodes import opcode_15
JUMP_OPS = opcode_15.JUMP_OPS
# We base this off of 2.1 instead of the other way around
# We base this off of 2.2 instead of the other way around
# because we cleaned things up this way.
# The history is that 2.7 support is the cleanest,
# then from that we got 2.6 and so on.
class Scanner15(scan.Scanner21):
def __init__(self, show_asm=False):
scan.Scanner21.__init__(self, show_asm=False)
scan.Scanner21.__init__(self, show_asm)
self.opc = opcode_15
self.opname = opcode_15.opname
self.version = 1.5

View File

@@ -20,13 +20,9 @@ For example:
Finally we save token information.
"""
from uncompyle6 import PYTHON_VERSION
if PYTHON_VERSION < 2.6:
from xdis.namedtuple24 import namedtuple
else:
from collections import namedtuple
from __future__ import print_function
from collections import namedtuple
from array import array
from xdis.code import iscode
@@ -962,17 +958,32 @@ class Scanner2(Scanner):
'end': end_offset})
elif code_pre_rtarget == self.opc.RETURN_VALUE:
if self.version == 2.7 or pre_rtarget not in self.ignore_if:
# 10 is exception-match. If there is an exception match in the
# compare, then this is an exception clause not an if-then clause
# Below, 10 is exception-match. If there is an exception
# match in the compare, then this is an exception
# clause not an if-then clause
if (self.code[self.prev[offset]] != self.opc.COMPARE_OP or
self.code[self.prev[offset]+1] != 10):
self.structs.append({'type': 'if-then',
'start': start,
'end': rtarget})
self.thens[start] = rtarget
if self.version == 2.7 or code[pre_rtarget+1] != self.opc.JUMP_FORWARD:
if (self.version == 2.7 or
code[pre_rtarget+1] != self.opc.JUMP_FORWARD):
# The below is a big hack until we get
# better control flow analysis: disallow
# END_IF if the instruction before the
# END_IF instruction happens to be a jump
# target. In this case, probably what's
# gone on is that we messed up on the
# END_IF location and it should be the
# instruction before.
self.fixed_jumps[offset] = rtarget
self.return_end_ifs.add(pre_rtarget)
if (self.version == 2.7 and
self.insts[self.offset2inst_index[pre[pre_rtarget]]].is_jump_target):
self.return_end_ifs.add(pre[pre_rtarget])
pass
else:
self.return_end_ifs.add(pre_rtarget)
pass
pass
pass

View File

@@ -19,7 +19,7 @@ JUMP_OPS = opcode_21.JUMP_OPS
# then from that we got 2.6 and so on.
class Scanner21(scan.Scanner22):
def __init__(self, show_asm=False):
scan.Scanner22.__init__(self, show_asm=False)
scan.Scanner22.__init__(self, show_asm)
self.opc = opcode_21
self.opname = opcode_21.opname
self.version = 2.1

View File

@@ -19,7 +19,7 @@ JUMP_OPS = opcode_22.JUMP_OPS
# then from that we got 2.6 and so on.
class Scanner22(scan.Scanner23):
def __init__(self, show_asm=False):
scan.Scanner23.__init__(self, show_asm=False)
scan.Scanner23.__init__(self, show_asm)
self.opc = opcode_22
self.opname = opcode_22.opname
self.version = 2.2

View File

@@ -7,6 +7,8 @@ grammar parsing.
"""
from __future__ import print_function
from uncompyle6.scanners.scanner2 import Scanner2
from uncompyle6 import PYTHON3

View File

@@ -20,13 +20,9 @@ For example:
Finally we save token information.
"""
from uncompyle6 import PYTHON_VERSION
if PYTHON_VERSION < 2.6:
from xdis.namedtuple24 import namedtuple
else:
from collections import namedtuple
from __future__ import print_function
from collections import namedtuple
from array import array
from uncompyle6.scanner import Scanner
@@ -166,9 +162,7 @@ class Scanner3(Scanner):
self.code = array('B', co.co_code)
bytecode = Bytecode(co, self.opc)
if not show_asm:
show_asm = self.show_asm
show_asm = self.show_asm if not show_asm else show_asm
# show_asm = 'both'
if show_asm in ('both', 'before'):
for instr in bytecode.get_instructions(co):

View File

@@ -6,6 +6,8 @@ This sets up opcodes Python's 3.0 and calls a generalized
scanner routine for Python 3.
"""
from __future__ import print_function
# bytecode verification, verify(), uses JUMP_OPs from here
from xdis.opcodes import opcode_30 as opc
from xdis.bytecode import instruction_size, next_offset

View File

@@ -6,6 +6,8 @@ This sets up opcodes Python's 3.1 and calls a generalized
scanner routine for Python 3.
"""
from __future__ import print_function
# bytecode verification, verify(), uses JUMP_OPs from here
from xdis.opcodes import opcode_31 as opc
JUMP_OPS = opc.JUMP_OPS

View File

@@ -9,6 +9,8 @@ This sets up opcodes Python's 3.2 and calls a generalized
scanner routine for Python 3.
"""
from __future__ import print_function
# bytecode verification, verify(), uses JUMP_OPs from here
from xdis.opcodes import opcode_32 as opc
JUMP_OPS = opc.JUMP_OPS

View File

@@ -6,6 +6,8 @@ This sets up opcodes Python's 3.3 and calls a generalized
scanner routine for Python 3.
"""
from __future__ import print_function
# bytecode verification, verify(), uses JUMP_OPs from here
from xdis.opcodes import opcode_33 as opc
JUMP_OPS = opc.JUMP_OPS

View File

@@ -9,6 +9,8 @@ This sets up opcodes Python's 3.4 and calls a generalized
scanner routine for Python 3.
"""
from __future__ import print_function
from xdis.opcodes import opcode_34 as opc
# bytecode verification, verify(), uses JUMP_OPs from here

View File

@@ -9,6 +9,8 @@ This sets up opcodes Python's 3.5 and calls a generalized
scanner routine for Python 3.
"""
from __future__ import print_function
from uncompyle6.scanners.scanner3 import Scanner3
# bytecode verification, verify(), uses JUMP_OPs from here

View File

@@ -9,6 +9,8 @@ This sets up opcodes Python's 3.6 and calls a generalized
scanner routine for Python 3.
"""
from __future__ import print_function
from uncompyle6.scanners.scanner3 import Scanner3
# bytecode verification, verify(), uses JUMP_OPS from here
@@ -30,9 +32,11 @@ class Scanner36(Scanner3):
t.kind = 'CALL_FUNCTION_EX_KW'
pass
elif t.op == self.opc.CALL_FUNCTION_KW:
t.kind = 'CALL_FUNCTION_KW_%s' % t.attr
t.kind = 'CALL_FUNCTION_KW_{t.attr}'.format(**locals())
elif t.op == self.opc.BUILD_MAP_UNPACK_WITH_CALL:
t.kind = 'BUILD_MAP_UNPACK_WITH_CALL_%d' % t.attr
elif t.op == self.opc.BUILD_TUPLE_UNPACK_WITH_CALL:
t.kind = 'BUILD_TUPLE_UNPACK_WITH_CALL_%d' % t.attr
pass
return tokens, customize

Some files were not shown because too many files have changed in this diff Show More