You've already forked python-uncompyle6
mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2025-08-04 09:22:40 +08:00
Compare commits
75 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4f545c5bfa | ||
|
bbfdb814bf | ||
|
d088e7ef11 | ||
|
6646d18c7a | ||
|
716e097654 | ||
|
dba95c5200 | ||
|
d5df411c7a | ||
|
077f192711 | ||
|
498df35a6c | ||
|
7e71ce3260 | ||
|
825add1af7 | ||
|
1a901bde8f | ||
|
732b5165c2 | ||
|
7bd81efe9b | ||
|
c42e16fafe | ||
|
6de57249ed | ||
|
faf6ea9630 | ||
|
566143b515 | ||
|
b2e1edb434 | ||
|
62c249d6b2 | ||
|
db2fdb30fd | ||
|
37301ab49e | ||
|
97e3a7eb02 | ||
|
f98f7372c3 | ||
|
f88df747b8 | ||
|
6be3656ceb | ||
|
8b48f62fc8 | ||
|
868721595d | ||
|
9f270dce4a | ||
|
bffbd0b352 | ||
|
50fbea1a06 | ||
|
ddffc2c078 | ||
|
d9318e9bed | ||
|
c078048fb0 | ||
|
6a82b1045e | ||
|
3ea73cf977 | ||
|
f3bec73840 | ||
|
8f4343ef22 | ||
|
d50834193c | ||
|
fa7ff89a32 | ||
|
28d9e66a53 | ||
|
e39c6c7f0a | ||
|
8470bded59 | ||
|
01b2b46757 | ||
|
aa398423a3 | ||
|
41bcf3387d | ||
|
89e7a0a246 | ||
|
179fcafaba | ||
|
e56a3c86d5 | ||
|
f527fdbdcd | ||
|
d39169dbda | ||
|
996719688a | ||
|
af9f6b05fa | ||
|
39cbddccaf | ||
|
75b3aaa86d | ||
|
e93b70bcce | ||
|
0eaeb82d48 | ||
|
0ae9612c7c | ||
|
09f232700e | ||
|
bfde66c5e1 | ||
|
4773ca4e5b | ||
|
62a3fcc9d5 | ||
|
bc7d7ddf12 | ||
|
41b6e91286 | ||
|
56bf3e3125 | ||
|
805ec7dbfc | ||
|
668141662e | ||
|
cc55fa1de1 | ||
|
6f51f8910c | ||
|
bc614cf3fb | ||
|
05f3dad32c | ||
|
fb3761e4f3 | ||
|
9b2e22cbaf | ||
|
cea2c7e1dc | ||
|
79c38441b5 |
@@ -10,13 +10,6 @@ jobs:
|
||||
CIRCLE_ARTIFACTS: /tmp/circleci-artifacts
|
||||
CIRCLE_TEST_REPORTS: /tmp/circleci-test-results
|
||||
COMPILE: --compile
|
||||
# In CircleCI 1.0 we used a pre-configured image with a large number of languages and other packages.
|
||||
# In CircleCI 2.0 you can now specify your own image, or use one of our pre-configured images.
|
||||
# The following configuration line tells CircleCI to use the specified docker image as the runtime environment for you job.
|
||||
# We have selected a pre-built image that mirrors the build environment we use on
|
||||
# the 1.0 platform, but we recommend you choose an image more tailored to the needs
|
||||
# of each job. For more information on choosing an image (or alternatively using a
|
||||
# VM instead of a container) see https://circleci.com/docs/2.0/executor-types/
|
||||
# To see the list of pre-built images that CircleCI provides for most common languages see
|
||||
# https://circleci.com/docs/2.0/circleci-images/
|
||||
machine:
|
||||
|
6
.github/ISSUE_TEMPLATE/bug-report.md
vendored
6
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@@ -11,7 +11,7 @@ Please remove any of the optional sections if they are not applicable.
|
||||
Prerequisites
|
||||
|
||||
* Make sure the bytecode you have can be disassembled with a
|
||||
disassembler.
|
||||
disassembler and produces valid results.
|
||||
* Don't put bytecode and corresponding source code on any service that
|
||||
requires registration to download.
|
||||
* When you open a bug report there is no privacy. If the legitimacy of
|
||||
@@ -35,7 +35,7 @@ decompiler service for versions of Python up to 2.6.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
<!-- Please show both the input you gave and the
|
||||
<!-- Please show both the *input* you gave and the
|
||||
output you got in describing how to reproduce the bug:
|
||||
|
||||
or give a complete console log with input and output
|
||||
@@ -63,7 +63,7 @@ can add that too.
|
||||
Please modify for your setup
|
||||
|
||||
- Uncompyle6 version: output from `uncompyle6 --version` or `pip show uncompyle6`
|
||||
- Python version: `python -V`
|
||||
- Python version for the version of Python the byte-compiled the file: `python -c "import sys; print(sys.version)"` where `python` is the correct Cpython or Pypy binary.
|
||||
- OS and Version: [e.g. Ubuntu bionic]
|
||||
|
||||
-->
|
||||
|
70
NEWS.md
70
NEWS.md
@@ -1,3 +1,73 @@
|
||||
3.6.2: 2020-1-5 Samish
|
||||
======================
|
||||
|
||||
Yet again the focus has been on just fixing bugs, mostly geared in the
|
||||
later 3.x range. To get some sense what sill needs fixing, consult
|
||||
test/stdlib/runtests.sh. And that only has a portion of what's known.
|
||||
|
||||
`make_function.py` has gotten so complex that it was split out into 3 parts
|
||||
to handle different version ranges: Python <3, Python 3.0..3.6 and Python 3.7+.
|
||||
|
||||
An important fix is that we had been dropping docstrings in Python 3 code as a result
|
||||
of a incomplete merge from the decompile3 base with respect to the transform phase.
|
||||
|
||||
Also important (at least to me) is that we can now handle 3.6+
|
||||
variable type annotations. Some of the decompile3 code uses that in
|
||||
its source code, and I now use variable annotations in conjunction
|
||||
with mypy in some of my other Python projects
|
||||
|
||||
Code generation for imports, especially where the import is dotted
|
||||
changed a bit in 3.7; with this release are just now tracking that
|
||||
change better. For this I've added pseudo instruction
|
||||
`IMPORT_NAME_ATTR`, derived from the `IMPORT_NAME` instruction, to
|
||||
indicate when an import contains a dotted import. Similarly, code for
|
||||
3.7 `import .. as ` is basically the same as `from .. import`, the
|
||||
only difference is the target of the name changes to an "alias" in the
|
||||
former. As a result, the disambiguation is now done on the semantic
|
||||
action side, rathero than in parsing grammar rules.
|
||||
|
||||
Some small specific fixes:
|
||||
|
||||
* 3.7+ some chained compare parsing has been fixed. Other remain.
|
||||
* better if/else rule checking in the 3.4 and below range.
|
||||
* 3.4+ keyword-only parameter handling was fixed more generally
|
||||
* 3.3 .. 3.5 keyword-only parameter args in lambda was fixed
|
||||
|
||||
|
||||
3.6.1: 2019-12-10 Christmas Hannukah
|
||||
====================================
|
||||
|
||||
Overall, as in the past, the focus has been on just fixing bugs, more geared
|
||||
in the later 3.x range. Handling "async for/with" in 3.8+ works better.
|
||||
|
||||
Numerous bugs around handling `lambda` with keyword-only and `*` args in the
|
||||
3.0-3.8 have been fixed. However many still remain.
|
||||
|
||||
`binary_expr` and `unary_expr` have been renamed to `bin_op` and
|
||||
`unary_op` to better correspond the Python AST names.
|
||||
|
||||
Some work was done Python 3.7+ to handle `and` better; less was done
|
||||
along the lines of handling `or`. Much more is needed to improve
|
||||
parsing stability of 3.7+. More of what was done with `and` needs to
|
||||
be done with `or` and this will happen first in the "decompyle3"
|
||||
project.
|
||||
|
||||
Later this will probably be extended backwards to handle the 3.6-
|
||||
versions better. This however comes with a big decompilation speed
|
||||
penalty. When we redo control flow this should go back to normal, but
|
||||
for now, accuracy is more important than speed.
|
||||
|
||||
Another `assert` transform rule was added. Parser rules to distingish
|
||||
`try/finally` in 3.8 were added and we are more stringent about what
|
||||
can be turned into an `assert`. There was some grammar cleanup here
|
||||
too.
|
||||
|
||||
A number of small bugs were fixed, and some administrative changes to
|
||||
make `make check-short` really be short, but check more throughly what
|
||||
it checks. minimum xdis version needed was bumped to include in the
|
||||
newer 3.6-3.9 releases. See the `ChangeLog` for details.
|
||||
|
||||
|
||||
3.6.0: 2019-12-10 gecko gecko
|
||||
=============================
|
||||
|
||||
|
@@ -58,7 +58,7 @@ entry_points = {
|
||||
]}
|
||||
ftp_url = None
|
||||
install_requires = ["spark-parser >= 1.8.9, < 1.9.0",
|
||||
"xdis >= 4.2.0, < 4.3.0"]
|
||||
"xdis >= 4.2.2, < 4.3.0"]
|
||||
|
||||
license = "GPL3"
|
||||
mailing_list = "python-debugger@googlegroups.com"
|
||||
|
@@ -21,8 +21,9 @@ for version in $PYVERSIONS; do
|
||||
exit $?
|
||||
fi
|
||||
make clean && pip install -e .
|
||||
if ! make check; then
|
||||
if ! make check-short; then
|
||||
exit $?
|
||||
fi
|
||||
echo === $version ===
|
||||
done
|
||||
make check
|
||||
|
@@ -5,4 +5,4 @@ if [[ $0 == ${BASH_SOURCE[0]} ]] ; then
|
||||
echo "This script should be *sourced* rather than run directly through bash"
|
||||
exit 1
|
||||
fi
|
||||
export PYVERSIONS='3.5.9 3.6.9 2.6.9 3.3.7 2.7.17 3.2.6 3.1.5 3.4.10 3.7.5'
|
||||
export PYVERSIONS='3.5.9 3.6.9 2.6.9 3.3.7 2.7.17 3.2.6 3.1.5 3.4.8 3.7.6 3.8.1'
|
||||
|
@@ -20,3 +20,4 @@ cd $fulldir/..
|
||||
(cd ../python-xdis && git checkout master && pyenv local $PYTHON_VERSION) && git pull && \
|
||||
git checkout master && pyenv local $PYTHON_VERSION && git pull
|
||||
cd $owd
|
||||
rm -v */.python-version || true
|
||||
|
@@ -14,3 +14,4 @@ cd $fulldir/..
|
||||
(cd ../python-xdis && git checkout python-2.4 && pyenv local $PYTHON_VERSION) && git pull && \
|
||||
git checkout python-2.4 && pyenv local $PYTHON_VERSION && git pull
|
||||
cd $owd
|
||||
rm -v */.python-version || true
|
||||
|
@@ -1,7 +1,11 @@
|
||||
PHONY=check test pytest
|
||||
SHELL=/bin/bash
|
||||
|
||||
PYTHON ?= python
|
||||
|
||||
#: Run all tests
|
||||
test check pytest:
|
||||
py.test
|
||||
@PYTHON_VERSION=`$(PYTHON) -V 2>&1 | cut -d ' ' -f 2 | cut -d'.' -f1,2`; \
|
||||
if [[ $$PYTHON_VERSION > 3.2 ]] || [[ $$PYTHON_VERSION == 2.7 ]] || [[ $$PYTHON_VERSION == 2.6 ]]; then \
|
||||
py.test; \
|
||||
fi
|
||||
|
@@ -1,158 +0,0 @@
|
||||
# std
|
||||
# test
|
||||
import sys
|
||||
from uncompyle6 import PYTHON_VERSION, code_deparse
|
||||
import pytest
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
PYTHON_VERSION <= 2.6, reason="hypothesis needs 2.7 or later"
|
||||
)
|
||||
if PYTHON_VERSION > 2.6:
|
||||
|
||||
import hypothesis
|
||||
from hypothesis import strategies as st
|
||||
|
||||
# uncompyle6
|
||||
|
||||
@st.composite
|
||||
def expressions(draw):
|
||||
# todo : would be nice to generate expressions using hypothesis however
|
||||
# this is pretty involved so for now just use a corpus of expressions
|
||||
# from which to select.
|
||||
return draw(
|
||||
st.sampled_from(
|
||||
(
|
||||
"abc",
|
||||
"len(items)",
|
||||
"x + 1",
|
||||
"lineno",
|
||||
"container",
|
||||
"self.attribute",
|
||||
"self.method()",
|
||||
# These expressions are failing, I think these are control
|
||||
# flow problems rather than problems with FORMAT_VALUE,
|
||||
# however I need to confirm this...
|
||||
#'sorted(items, key=lambda x: x.name)',
|
||||
#'func(*args, **kwargs)',
|
||||
#'text or default',
|
||||
#'43 if life_the_universe and everything else None'
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@st.composite
|
||||
def format_specifiers(draw):
|
||||
"""
|
||||
Generate a valid format specifier using the rules:
|
||||
|
||||
format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]
|
||||
fill ::= <any character>
|
||||
align ::= "<" | ">" | "=" | "^"
|
||||
sign ::= "+" | "-" | " "
|
||||
width ::= integer
|
||||
precision ::= integer
|
||||
type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
|
||||
|
||||
See https://docs.python.org/2/library/string.html
|
||||
|
||||
:param draw: Let hypothesis draw from other strategies.
|
||||
|
||||
:return: An example format_specifier.
|
||||
"""
|
||||
alphabet_strategy = st.characters(
|
||||
min_codepoint=ord("a"), max_codepoint=ord("z")
|
||||
)
|
||||
fill = draw(st.one_of(alphabet_strategy, st.none()))
|
||||
align = draw(st.sampled_from(list("<>=^")))
|
||||
fill_align = (fill + align or "") if fill else ""
|
||||
|
||||
type_ = draw(st.sampled_from("bcdeEfFgGnosxX%"))
|
||||
can_have_sign = type_ in "deEfFgGnoxX%"
|
||||
can_have_comma = type_ in "deEfFgG%"
|
||||
can_have_precision = type_ in "fFgG"
|
||||
can_have_pound = type_ in "boxX%"
|
||||
can_have_zero = type_ in "oxX"
|
||||
|
||||
sign = draw(st.sampled_from(list("+- ") + [""])) if can_have_sign else ""
|
||||
pound = draw(st.sampled_from(("#", ""))) if can_have_pound else ""
|
||||
zero = draw(st.sampled_from(("0", ""))) if can_have_zero else ""
|
||||
|
||||
int_strategy = st.integers(min_value=1, max_value=1000)
|
||||
|
||||
width = draw(st.one_of(int_strategy, st.none()))
|
||||
width = str(width) if width is not None else ""
|
||||
|
||||
comma = draw(st.sampled_from((",", ""))) if can_have_comma else ""
|
||||
if can_have_precision:
|
||||
precision = draw(st.one_of(int_strategy, st.none()))
|
||||
precision = "." + str(precision) if precision else ""
|
||||
else:
|
||||
precision = ""
|
||||
|
||||
return "".join((fill_align, sign, pound, zero, width, comma, precision, type_))
|
||||
|
||||
@st.composite
|
||||
def fstrings(draw):
|
||||
"""
|
||||
Generate a valid f-string.
|
||||
See https://www.python.org/dev/peps/pep-0498/#specification
|
||||
|
||||
:param draw: Let hypothsis draw from other strategies.
|
||||
|
||||
:return: A valid f-string.
|
||||
"""
|
||||
character_strategy = st.characters(
|
||||
blacklist_characters="\r\n'\\s{}", min_codepoint=1, max_codepoint=1000
|
||||
)
|
||||
is_raw = draw(st.booleans())
|
||||
integer_strategy = st.integers(min_value=0, max_value=3)
|
||||
expression_count = draw(integer_strategy)
|
||||
content = []
|
||||
for _ in range(expression_count):
|
||||
expression = draw(expressions())
|
||||
conversion = draw(st.sampled_from(("", "!s", "!r", "!a")))
|
||||
has_specifier = draw(st.booleans())
|
||||
specifier = ":" + draw(format_specifiers()) if has_specifier else ""
|
||||
content.append("{{{}{}}}".format(expression, conversion, specifier))
|
||||
content.append(draw(st.text(character_strategy)))
|
||||
content = "".join(content)
|
||||
return "f{}'{}'".format("r" if is_raw else "", content)
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION != 3.6, reason="need Python 3.6")
|
||||
@hypothesis.given(format_specifiers())
|
||||
def test_format_specifiers(format_specifier):
|
||||
"""Verify that format_specifiers generates valid specifiers"""
|
||||
try:
|
||||
exec('"{:' + format_specifier + '}".format(0)')
|
||||
except ValueError as e:
|
||||
if "Unknown format code" not in str(e):
|
||||
raise
|
||||
|
||||
def run_test(text):
|
||||
hypothesis.assume(len(text))
|
||||
hypothesis.assume("f'{" in text)
|
||||
expr = text + "\n"
|
||||
code = compile(expr, "<string>", "single")
|
||||
deparsed = code_deparse(code, sys.stdout, PYTHON_VERSION, compile_mode="single")
|
||||
recompiled = compile(deparsed.text, "<string>", "single")
|
||||
if recompiled != code:
|
||||
print(recompiled)
|
||||
print("================")
|
||||
print(code)
|
||||
print("----------------")
|
||||
assert (
|
||||
"dis(" + deparsed.text.strip("\n") + ")"
|
||||
== "dis(" + expr.strip("\n") + ")"
|
||||
)
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION != 3.6, reason="need Python 3.6")
|
||||
@hypothesis.given(fstrings())
|
||||
def test_uncompyle_fstring(fstring):
|
||||
"""Verify uncompyling fstring bytecode"""
|
||||
run_test(fstring)
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION != 3.6, reason="need Python 3.6+")
|
||||
@pytest.mark.parametrize("fstring", ["f'{abc}{abc!s}'", "f'{abc}0'"])
|
||||
def test_uncompyle_direct(fstring):
|
||||
"""useful for debugging"""
|
||||
run_test(fstring)
|
@@ -1,185 +0,0 @@
|
||||
import string
|
||||
from uncompyle6 import PYTHON_VERSION
|
||||
import pytest
|
||||
pytestmark = pytest.mark.skip(PYTHON_VERSION < 2.7,
|
||||
reason="need at least Python 2.7")
|
||||
|
||||
if PYTHON_VERSION > 2.6:
|
||||
from hypothesis import given, assume, example, settings, strategies as st
|
||||
from validate import validate_uncompyle
|
||||
from test_fstring import expressions
|
||||
|
||||
alpha = st.sampled_from(string.ascii_lowercase)
|
||||
numbers = st.sampled_from(string.digits)
|
||||
alphanum = st.sampled_from(string.ascii_lowercase + string.digits)
|
||||
|
||||
|
||||
@st.composite
|
||||
def function_calls(draw,
|
||||
min_keyword_args=0, max_keyword_args=5,
|
||||
min_positional_args=0, max_positional_args=5,
|
||||
min_star_args=0, max_star_args=1,
|
||||
min_double_star_args=0, max_double_star_args=1):
|
||||
"""
|
||||
Strategy factory for generating function calls.
|
||||
|
||||
:param draw: Callable which draws examples from other strategies.
|
||||
|
||||
:return: The function call text.
|
||||
"""
|
||||
st_positional_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_positional_args,
|
||||
max_size=max_positional_args
|
||||
)
|
||||
st_keyword_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_keyword_args,
|
||||
max_size=max_keyword_args
|
||||
)
|
||||
st_star_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_star_args,
|
||||
max_size=max_star_args
|
||||
)
|
||||
st_double_star_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_double_star_args,
|
||||
max_size=max_double_star_args
|
||||
)
|
||||
|
||||
positional_args = draw(st_positional_args)
|
||||
keyword_args = draw(st_keyword_args)
|
||||
st_values = st.lists(
|
||||
expressions(),
|
||||
min_size=len(keyword_args),
|
||||
max_size=len(keyword_args)
|
||||
)
|
||||
keyword_args = [
|
||||
x + '=' + e
|
||||
for x, e in
|
||||
zip(keyword_args, draw(st_values))
|
||||
]
|
||||
star_args = ['*' + x for x in draw(st_star_args)]
|
||||
double_star_args = ['**' + x for x in draw(st_double_star_args)]
|
||||
|
||||
arguments = positional_args + keyword_args + star_args + double_star_args
|
||||
draw(st.randoms()).shuffle(arguments)
|
||||
arguments = ','.join(arguments)
|
||||
|
||||
function_call = 'fn({arguments})'.format(arguments=arguments)
|
||||
try:
|
||||
# TODO: Figure out the exact rules for ordering of positional, keyword,
|
||||
# star args, double star args and in which versions the various
|
||||
# types of arguments are supported so we don't need to check that the
|
||||
# expression compiles like this.
|
||||
compile(function_call, '<string>', 'single')
|
||||
except:
|
||||
assume(False)
|
||||
return function_call
|
||||
|
||||
|
||||
def test_function_no_args():
|
||||
validate_uncompyle("fn()")
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 2.7,
|
||||
reason="need at least Python 2.7")
|
||||
def isolated_function_calls(which):
|
||||
"""
|
||||
Returns a strategy for generating function calls, but isolated to
|
||||
particular types of arguments, for example only positional arguments.
|
||||
|
||||
This can help reason about debugging errors in specific types of function
|
||||
calls.
|
||||
|
||||
:param which: One of 'keyword', 'positional', 'star', 'double_star'
|
||||
|
||||
:return: Strategy for generating an function call isolated to specific
|
||||
argument types.
|
||||
"""
|
||||
kwargs = dict(
|
||||
max_keyword_args=0,
|
||||
max_positional_args=0,
|
||||
max_star_args=0,
|
||||
max_double_star_args=0,
|
||||
)
|
||||
kwargs['_'.join(('min', which, 'args'))] = 1
|
||||
kwargs['_'.join(('max', which, 'args'))] = 5 if 'star' not in which else 1
|
||||
return function_calls(**kwargs)
|
||||
|
||||
|
||||
with settings(max_examples=25):
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 2.7,
|
||||
reason="need at least Python 2.7")
|
||||
@given(isolated_function_calls('positional'))
|
||||
@example("fn(0)")
|
||||
def test_function_positional_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 2.7,
|
||||
reason="need at least Python 2.7")
|
||||
@given(isolated_function_calls('keyword'))
|
||||
@example("fn(a=0)")
|
||||
def test_function_call_keyword_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 2.7,
|
||||
reason="need at least Python 2.7")
|
||||
@given(isolated_function_calls('star'))
|
||||
@example("fn(*items)")
|
||||
def test_function_call_star_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 2.7,
|
||||
reason="need at least Python 2.7")
|
||||
@given(isolated_function_calls('double_star'))
|
||||
@example("fn(**{})")
|
||||
def test_function_call_double_star_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_CONST_KEY_MAP_BUILD_MAP_UNPACK_WITH_CALL_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(w=0,m=0,**v)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_BUILD_MAP_UNPACK_WITH_CALL_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(a=0,**g)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*g,**j)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*z,u=0)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(**a)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_BUILD_TUPLE_BUILD_TUPLE_UNPACK_WITH_CALL_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(b,b,b=0,*a)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_TUPLE_BUILD_TUPLE_UNPACK_WITH_CALL_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*c,v)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_CONST_KEY_MAP_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(i=0,y=0,*p)")
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='skipping property based test until all individual tests are passing')
|
||||
@given(function_calls())
|
||||
def test_function_call(function_call):
|
||||
validate_uncompyle(function_call)
|
@@ -35,7 +35,7 @@ def test_grammar():
|
||||
|
||||
expect_right_recursive = set([("designList", ("store", "DUP_TOP", "designList"))])
|
||||
|
||||
if PYTHON_VERSION <= 3.7:
|
||||
if PYTHON_VERSION <= 3.6:
|
||||
unused_rhs.add("call")
|
||||
|
||||
if PYTHON_VERSION > 2.6:
|
||||
@@ -67,20 +67,14 @@ def test_grammar():
|
||||
(("l_stmts", ("lastl_stmt", "come_froms", "l_stmts")))
|
||||
)
|
||||
pass
|
||||
elif 3.0 < PYTHON_VERSION < 3.3:
|
||||
expect_right_recursive.add(
|
||||
(("l_stmts", ("lastl_stmt", "COME_FROM", "l_stmts")))
|
||||
)
|
||||
pass
|
||||
pass
|
||||
pass
|
||||
else:
|
||||
expect_lhs.add("kwarg")
|
||||
|
||||
assert expect_lhs == set(lhs)
|
||||
|
||||
# FIXME
|
||||
if PYTHON_VERSION != 3.8:
|
||||
if PYTHON_VERSION < 3.8:
|
||||
assert expect_lhs == set(lhs)
|
||||
assert unused_rhs == set(rhs)
|
||||
|
||||
assert expect_right_recursive == right_recursive
|
||||
@@ -96,8 +90,10 @@ def test_grammar():
|
||||
]
|
||||
)
|
||||
reduced_dup_rhs = dict((k, dup_rhs[k]) for k in dup_rhs if k not in expect_dup_rhs)
|
||||
for k in reduced_dup_rhs:
|
||||
print(k, reduced_dup_rhs[k])
|
||||
if reduced_dup_rhs:
|
||||
print("\nPossible duplicate RHS that might be folded, into one of the LHS symbols")
|
||||
for k in reduced_dup_rhs:
|
||||
print(k, reduced_dup_rhs[k])
|
||||
# assert not reduced_dup_rhs, reduced_dup_rhs
|
||||
|
||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||
|
@@ -1,4 +1,3 @@
|
||||
flake8
|
||||
hypothesis<=3.0.0
|
||||
six
|
||||
pytest==3.2.5
|
||||
pytest==3.2.5 # for 2.7 < PYTHON_VERSION <= 3.2 use pytest 2.9.2; for 3.1 2.10
|
||||
|
10
setup.cfg
10
setup.cfg
@@ -2,10 +2,10 @@
|
||||
release = 1
|
||||
packager = rocky <rb@dustyfeet.com>
|
||||
doc_files = README
|
||||
# CHANGES.txt
|
||||
# USAGE.txt
|
||||
# doc/
|
||||
# examples/
|
||||
|
||||
[bdist_wheel]
|
||||
# universal=1
|
||||
|
||||
[egg_info]
|
||||
tag_build =
|
||||
tag_date = 0
|
||||
|
||||
|
1
test/.gitignore
vendored
1
test/.gitignore
vendored
@@ -1 +1,2 @@
|
||||
/.coverage
|
||||
/nohup.out
|
||||
|
@@ -23,7 +23,7 @@ COVER_DIR=../tmp/grammar-cover
|
||||
# Run short tests
|
||||
check-short:
|
||||
@$(PYTHON) -V && PYTHON_VERSION=`$(PYTHON) -V 2>&1 | cut -d ' ' -f 2 | cut -d'.' -f1,2`; \
|
||||
$(MAKE) check-bytecode-short
|
||||
$(MAKE) check-bytecode-$${PYTHON_VERSION}
|
||||
|
||||
# Run all tests
|
||||
check:
|
||||
|
BIN
test/bytecode_2.6_run/02_decorator.pyc
Normal file
BIN
test/bytecode_2.6_run/02_decorator.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.0_run/04_lambda_star_default.pyc
Normal file
BIN
test/bytecode_3.0_run/04_lambda_star_default.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.1_run/04_lambda_star_default.pyc
Normal file
BIN
test/bytecode_3.1_run/04_lambda_star_default.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.1_run/10_complex.pyc
Normal file
BIN
test/bytecode_3.1_run/10_complex.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.2_run/04_lambda_star_default.pyc
Normal file
BIN
test/bytecode_3.2_run/04_lambda_star_default.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.3_run/00_docstring.pyc
Normal file
BIN
test/bytecode_3.3_run/00_docstring.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.3_run/02_pos_args.pyc
Normal file
BIN
test/bytecode_3.3_run/02_pos_args.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.3_run/04_lambda_star_default.pyc
Normal file
BIN
test/bytecode_3.3_run/04_lambda_star_default.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.3_run/10_complex.pyc
Normal file
BIN
test/bytecode_3.3_run/10_complex.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.4_run/04_lambda_star_default.pyc
Normal file
BIN
test/bytecode_3.4_run/04_lambda_star_default.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.5_run/02_decorator.pyc
Normal file
BIN
test/bytecode_3.5_run/02_decorator.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.5_run/02_pos_args.pyc
Normal file
BIN
test/bytecode_3.5_run/02_pos_args.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.5_run/04_importlist.pyc
Normal file
BIN
test/bytecode_3.5_run/04_importlist.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.5_run/04_lambda_star_default.pyc
Normal file
BIN
test/bytecode_3.5_run/04_lambda_star_default.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.5_run/10_complex.pyc
Normal file
BIN
test/bytecode_3.5_run/10_complex.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.6_run/02_if_not_or.pyc
Normal file
BIN
test/bytecode_3.6_run/02_if_not_or.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6_run/02_var_annotate.pyc
Normal file
BIN
test/bytecode_3.6_run/02_var_annotate.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6_run/04_importlist.pyc
Normal file
BIN
test/bytecode_3.6_run/04_importlist.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6_run/04_lambda_star_default.pyc
Normal file
BIN
test/bytecode_3.6_run/04_lambda_star_default.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7/02_async_for_generator.pyc
Normal file
BIN
test/bytecode_3.7/02_async_for_generator.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7/04_async.pyc
Normal file
BIN
test/bytecode_3.7/04_async.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.7_run/02_if_not_or.pyc
Normal file
BIN
test/bytecode_3.7_run/02_if_not_or.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7_run/02_var_annotate.pyc
Normal file
BIN
test/bytecode_3.7_run/02_var_annotate.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7_run/04_importlist.pyc
Normal file
BIN
test/bytecode_3.7_run/04_importlist.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7_run/04_lambda_star_default.pyc
Normal file
BIN
test/bytecode_3.7_run/04_lambda_star_default.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7_run/06_listcomp.pyc
Normal file
BIN
test/bytecode_3.7_run/06_listcomp.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7_run/10_complex.pyc
Normal file
BIN
test/bytecode_3.7_run/10_complex.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.8/03_pop_top.pyc
Normal file
BIN
test/bytecode_3.8/03_pop_top.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.8_run/02_var_annotate.pyc
Normal file
BIN
test/bytecode_3.8_run/02_var_annotate.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.8_run/04_and_del.pyc
Normal file
BIN
test/bytecode_3.8_run/04_and_del.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.8_run/04_importlist.pyc
Normal file
BIN
test/bytecode_3.8_run/04_importlist.pyc
Normal file
Binary file not shown.
@@ -1,9 +1,34 @@
|
||||
# From python 2.5 make_decorators.py
|
||||
# Bug was in not recognizing @memoize which uses grammra rules
|
||||
# Bug was in not recognizing @memoize which uses grammar rules
|
||||
# using nonterminals mkfuncdeco and mkfuncdeco0
|
||||
|
||||
# This file is RUNNABLE!
|
||||
def memoize(func):
|
||||
pass
|
||||
|
||||
def test_memoize(self):
|
||||
@memoize
|
||||
def double(x):
|
||||
return x * 2
|
||||
|
||||
# Seen in 3.7 test/test_c_locale_coercion.py
|
||||
# Bug was handling multiple decorators in 3.5+
|
||||
# simply because we didn't carry over parser rules over from
|
||||
# earlier versions.
|
||||
|
||||
x = 1
|
||||
def decorator(func):
|
||||
def inc_x():
|
||||
global x
|
||||
x += 1
|
||||
func()
|
||||
return inc_x
|
||||
|
||||
@decorator
|
||||
@decorator
|
||||
def fn():
|
||||
return
|
||||
|
||||
assert x == 1
|
||||
fn()
|
||||
assert x == 3
|
||||
|
@@ -1,5 +1,7 @@
|
||||
# From 2.5.6 osxemxpath.py
|
||||
# Bug is in getting "and" and "del" correct
|
||||
|
||||
# This is RUNNABLE!
|
||||
def normpath(comps):
|
||||
i = 0
|
||||
while i < len(comps):
|
||||
|
@@ -22,3 +22,11 @@ def columnize(l):
|
||||
return [i for i in range(len(l))
|
||||
if not isinstance(l[i], str)]
|
||||
assert [0, 2] == columnize([1, 'a', 2])
|
||||
|
||||
# From 3.7 test_generators
|
||||
# Bug was in handling the way list_if is optimized in 3.7+;
|
||||
# We need list_if37 and compare_chained37.
|
||||
def init_board(c):
|
||||
return [io for io in c if 3 <= io < 5]
|
||||
|
||||
assert init_board(list(range(6))) == [3, 4]
|
||||
|
54
test/simple_source/bug31/10_complex.py
Normal file
54
test/simple_source/bug31/10_complex.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# Greatly simplified from from 3.3 test_complex.py
|
||||
from math import atan2
|
||||
|
||||
# RUNNABLE!
|
||||
def assertCloseAbs(x, y, eps=1e-09):
|
||||
"""Return true iff floats x and y "are close\""""
|
||||
if abs(x) > abs(y):
|
||||
x, y = y, x
|
||||
if y == 0:
|
||||
return abs(x) < eps
|
||||
if x == 0:
|
||||
return abs(y) < eps
|
||||
assert abs((x - y) / y) < eps
|
||||
|
||||
def assertClose(x, y, eps=1e-09):
|
||||
"""Return true iff complexes x and y "are close\""""
|
||||
assertCloseAbs(x.real, y.real, eps)
|
||||
assertCloseAbs(x.imag, y.imag, eps)
|
||||
|
||||
def check_div(x, y):
|
||||
"""Compute complex z=x*y, and check that z/x==y and z/y==x."""
|
||||
z = x * y
|
||||
if x != 0:
|
||||
q = z / x
|
||||
assertClose(q, y)
|
||||
q = z.__truediv__(x)
|
||||
assertClose(q, y)
|
||||
if y != 0:
|
||||
q = z / y
|
||||
assertClose(q, x)
|
||||
q = z.__truediv__(y)
|
||||
assertClose(q, x)
|
||||
|
||||
def test_truediv():
|
||||
simple_real = [float(i) for i in range(-5, 6)]
|
||||
simple_complex = [complex(x, y) for x in simple_real for y in simple_real]
|
||||
for x in simple_complex:
|
||||
for y in simple_complex:
|
||||
check_div(x, y)
|
||||
|
||||
def test_plus_minus_0j():
|
||||
assert -0j == -0j == complex(0.0, 0.0)
|
||||
assert -0-0j == -0j == complex(0.0, 0.0)
|
||||
z1, z2 = (0j, -0j)
|
||||
assert atan2(z1.imag, -1.0) == atan2(0.0, -1.0)
|
||||
# assert atan2(z2.imag, -1.0), atan2(-0.0, -1.0)
|
||||
|
||||
# Check that we can handle -inf, and inf as a complex numbers.
|
||||
# And put it in a tuple and a list to make it harder.
|
||||
z1, z2 = (-1e1000j, 1e1000j)
|
||||
assert z1 in [-1e1000j, 1e1000j]
|
||||
assert z1 != z2
|
||||
test_truediv()
|
||||
test_plus_minus_0j()
|
18
test/simple_source/bug33/04_lambda_star_default.py
Normal file
18
test/simple_source/bug33/04_lambda_star_default.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# From 3.x test_audiop.py
|
||||
|
||||
# Bug is handling default value after * argument in a lambda.
|
||||
# That's a mouthful of desciption; I am not sure if the really
|
||||
# hacky fix to the code is even correct.
|
||||
|
||||
#
|
||||
# FIXME: try and test with more than one default argument.
|
||||
|
||||
# RUNNABLE
|
||||
def pack(width, data):
|
||||
return (width, data)
|
||||
|
||||
packs = {w: (lambda *data, width=w: pack(width, data)) for w in (1, 2, 4)}
|
||||
|
||||
assert packs[1]('a') == (1, ('a',))
|
||||
assert packs[2]('b') == (2, ('b',))
|
||||
assert packs[4]('c') == (4, ('c',))
|
@@ -1,7 +1,16 @@
|
||||
# From 3.7 test_cmath.py
|
||||
# Had bug in 3.x in not having semantic importlist rule
|
||||
def main(osp, Mfile, mainpyfile, dbg=None):
|
||||
try:
|
||||
from xdis import load_module, PYTHON_VERSION, IS_PYPY
|
||||
return PYTHON_VERSION, IS_PYPY, load_module
|
||||
except:
|
||||
pass
|
||||
# bug is treating "import as" as "from xx import" while
|
||||
# still being able to hand "from xx import" properly
|
||||
|
||||
# RUNNABLE!
|
||||
import os.path as osp
|
||||
from sys import path
|
||||
from os import sep, name
|
||||
import collections.abc
|
||||
|
||||
assert osp.basename("a") == "a"
|
||||
assert path
|
||||
assert sep
|
||||
assert name
|
||||
assert collections.abc
|
||||
|
@@ -1,5 +1,7 @@
|
||||
# Self-checking test.
|
||||
# Python 3 bug in not detecting the end bounds of if elif.
|
||||
|
||||
# RUNNABLE!
|
||||
def testit(b):
|
||||
if b == 1:
|
||||
a = 1
|
||||
|
@@ -1,6 +1,8 @@
|
||||
# Bug in 3.6 and above.
|
||||
#Not detecting 2nd return is outside of
|
||||
# if/then. Fix was to ensure COME_FROM
|
||||
|
||||
# RUNNABLE!
|
||||
def return_return_bug(foo):
|
||||
if foo == 'say_hello':
|
||||
return "hello"
|
||||
|
@@ -1,6 +1,7 @@
|
||||
# Self-checking test.
|
||||
# String interpolation tests
|
||||
|
||||
# RUNNABLE!
|
||||
var1 = 'x'
|
||||
var2 = 'y'
|
||||
abc = 'def'
|
||||
|
@@ -1,6 +1,7 @@
|
||||
# Bug in 3.6 was not taking "else" branch after compond "if"
|
||||
# In earlier versions we had else detection needed here.
|
||||
|
||||
# RUNNABLE!
|
||||
def f(a, b, c):
|
||||
if a and b:
|
||||
x = 1
|
||||
|
@@ -4,6 +4,7 @@
|
||||
# showparams(c, test="A", **extra_args)
|
||||
# below
|
||||
|
||||
# RUNNABLE!
|
||||
def showparams(c, test, **extra_args):
|
||||
return {'c': c, **extra_args, 'test': test}
|
||||
|
||||
@@ -45,3 +46,10 @@ assert f(2, **a) == {'c': 2, 'param1': 2, 'test': 'A'}
|
||||
assert f3(2, *c, **a) == {'c': 2, 'param1': 2, 'test': 2}
|
||||
assert f3(*d, **a) == {'c': 2, 'param1': 2, 'test': 3}
|
||||
|
||||
# From 3.7 test/test_collections.py
|
||||
# Bug was in getting **dict(..) right
|
||||
from collections import namedtuple
|
||||
|
||||
Point = namedtuple('Point', 'x y')
|
||||
p = Point(11, 22)
|
||||
assert p == Point(**dict(x=11, y=22))
|
||||
|
12
test/simple_source/bug36/02_var_annotate.py
Normal file
12
test/simple_source/bug36/02_var_annotate.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# 3.6+ type annotations on variables
|
||||
from typing import List
|
||||
|
||||
# RUNNABLE!
|
||||
y = 2
|
||||
x: bool
|
||||
z: int = 5
|
||||
x = (z == 5)
|
||||
assert x
|
||||
assert y == 2
|
||||
v: List[int] = [1, 2]
|
||||
assert v[1] == y
|
42
test/simple_source/bug37/02_and_or.py
Normal file
42
test/simple_source/bug37/02_and_or.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# from 3.7 decompyle3/pytest/validate.py
|
||||
# 3.7 changes changes "and" to use JUMP_IF_FALSE_OR_POP instead of
|
||||
# POP_JUMP_IF_FALSE
|
||||
|
||||
# RUNNABLE!
|
||||
def are_instructions_equal(a, b, c, d):
|
||||
return a and (b or c) and d
|
||||
|
||||
for a, b, c, d, expect in (
|
||||
(True, True, False, True, True),
|
||||
(True, False, True, True, True),
|
||||
(False, False, True, True, False),
|
||||
(True, False, True, False, False),
|
||||
):
|
||||
assert are_instructions_equal(a, b, c, d) == expect
|
||||
|
||||
|
||||
# FIXME: figure out how to fix properly, and test.
|
||||
# from 3.7 decompyle3/semantics/pysource.py
|
||||
|
||||
# Bug *is* miscompiling to
|
||||
# if a:
|
||||
# if b or c:
|
||||
# d = 1
|
||||
# else:
|
||||
# d = 2
|
||||
|
||||
def n_alias(a, b, c, d=3):
|
||||
if a and b or c:
|
||||
d = 1
|
||||
else:
|
||||
d = 2
|
||||
return d
|
||||
|
||||
for a, b, c, expect in (
|
||||
(True, True, False, 1),
|
||||
(True, False, True, 1),
|
||||
# (True, False, False, 2), # miscompiles
|
||||
# (False, False, True, 1), # miscompiles
|
||||
(False, False, False, 2),
|
||||
):
|
||||
assert n_alias(a, b, c) == expect, f"{a}, {b}, {c}, {expect}"
|
8
test/simple_source/bug37/02_async_for_generator.py
Normal file
8
test/simple_source/bug37/02_async_for_generator.py
Normal file
@@ -0,0 +1,8 @@
|
||||
# From 3.7 test_asyncgen.py
|
||||
# Bug is handling new "async for" lingo
|
||||
def make_arange(n):
|
||||
# This syntax is legal starting with Python 3.7
|
||||
return (i * 2 async for i in n)
|
||||
|
||||
async def run(m):
|
||||
return [i async for i in m]
|
43
test/simple_source/bug37/02_if_not_or.py
Normal file
43
test/simple_source/bug37/02_if_not_or.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# from 3.7 inspect.py
|
||||
# Bug was "if not predicate or" inside "for".
|
||||
# Jump optimization turns a POP_JUMP_IF_TRUE into
|
||||
# a POP_JUMP_IF_FALSE and this has to be
|
||||
# dealt with at the "if" (or actually "testfalse") level.
|
||||
|
||||
# RUNNABLE!
|
||||
def getmembers(names, object, predicate):
|
||||
for key in names:
|
||||
if not predicate or object:
|
||||
object = 2
|
||||
object += 1
|
||||
return object
|
||||
|
||||
assert getmembers([1], 0, False) == 3
|
||||
assert getmembers([1], 1, True) == 3
|
||||
assert getmembers([1], 0, True) == 1
|
||||
assert getmembers([1], 1, False) == 3
|
||||
assert getmembers([], 1, False) == 1
|
||||
assert getmembers([], 2, True) == 2
|
||||
|
||||
def _shadowed_dict(klass, a, b, c):
|
||||
for entry in klass:
|
||||
if not (a and b):
|
||||
c = 1
|
||||
return c
|
||||
|
||||
assert _shadowed_dict([1], True, True, 3) == 3
|
||||
assert _shadowed_dict([1], True, False, 3) == 1
|
||||
assert _shadowed_dict([1], False, True, 3) == 1
|
||||
assert _shadowed_dict([1], False, False, 3) == 1
|
||||
assert _shadowed_dict([], False, False, 3) == 3
|
||||
|
||||
# Bug: the double "and" comes out as if .. if not and
|
||||
def _shadowed_dict2(klass, a, b, c, d):
|
||||
for entry in klass:
|
||||
if not (a and b and c):
|
||||
d = 1
|
||||
return d
|
||||
|
||||
# Not yet --
|
||||
# assert _shadowed_dict2([1], False, False, False, 3) == 1
|
||||
# assert _shadowed_dict2([1], True, True, True, 3) == 3
|
25
test/simple_source/bug37/03_jump_to_jump.py
Normal file
25
test/simple_source/bug37/03_jump_to_jump.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# From uncompyle6/verify.py
|
||||
# Bug was POP_JUMP offset to short so we have a POP_JUMP
|
||||
# to a JUMP_ABSOULTE and this messes up reduction rule checking.
|
||||
|
||||
def cmp_code_objects(member, a, tokens1, tokens2, verify, f):
|
||||
for member in members:
|
||||
while a:
|
||||
# Increase the bytecode length of the while statement
|
||||
x = 1; x = 2; x = 3; x = 4; x = 5; x = 6; x = 7; x = 8
|
||||
x = 1; x = 2; x = 3; x = 4; x = 5; x = 6; x = 7; x = 8
|
||||
x = 1; x = 2; x = 3; x = 4; x = 5; x = 6; x = 7; x = 8
|
||||
x = 1; x = 2; x = 3; x = 4; x = 5; x = 6; x = 7; x = 8
|
||||
x = 1; x = 2; x = 3; x = 4; x = 5; x = 6; x = 7; x = 8
|
||||
x = 1; x = 2; x = 3; x = 4; x = 5; x = 6; x = 7; x = 8
|
||||
x = 49; x = 50; x = 51; x = 52; x = 53;
|
||||
if tokens1:
|
||||
if tokens2:
|
||||
continue
|
||||
elif f:
|
||||
continue
|
||||
else:
|
||||
a = 2
|
||||
|
||||
i1 += 1
|
||||
x = 54 # comment this out and we're good
|
12
test/simple_source/bug37/04_async.py
Normal file
12
test/simple_source/bug37/04_async.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# from 3.7 test_contextlib_async.py
|
||||
# Bugs were not adding "async" when a function is a decorator,
|
||||
# and a misaligment when using "async with as".
|
||||
@_async_test
|
||||
async def test_enter(self):
|
||||
self.assertIs(await manager.__aenter__(), manager)
|
||||
|
||||
async with manager as context:
|
||||
async with woohoo() as x:
|
||||
x = 1
|
||||
y = 2
|
||||
assert manager is context
|
@@ -1,4 +1,4 @@
|
||||
# Tests:
|
||||
n# Tests:
|
||||
# importstmt ::= LOAD_CONST LOAD_CONST import_as
|
||||
# import_as ::= IMPORT_NAME store
|
||||
|
||||
@@ -15,3 +15,11 @@ class BZ2File(io.BufferedIOBase):
|
||||
|
||||
class ABC(metaclass=BZ2File):
|
||||
pass
|
||||
|
||||
# From 3.3 test_abc
|
||||
|
||||
# Bug was class Descriptor("Descriptor"): instead of below
|
||||
def test_customdescriptors_with_abstractmethod():
|
||||
class Descriptor:
|
||||
def setter(self):
|
||||
return Descriptor(self._fget)
|
||||
|
@@ -4,3 +4,13 @@ from functools import total_ordering
|
||||
@total_ordering
|
||||
class Frame:
|
||||
pass
|
||||
|
||||
|
||||
# From 3.7 test/test_c_locale_coercion.py
|
||||
# Bug is multiple decorators
|
||||
|
||||
@test
|
||||
@unittest
|
||||
class LocaleCoercionTests():
|
||||
# Test implicit reconfiguration of the environment during CLI startup
|
||||
pass
|
||||
|
@@ -4,6 +4,7 @@ from os import path
|
||||
from os import *
|
||||
import time as time1, os as os1
|
||||
import http.client as httpclient
|
||||
from sys import stdin, stdout, stderr
|
||||
if len(__file__) == 0:
|
||||
# a.b.c should force consecutive LOAD_ATTRs
|
||||
import a.b.c as d
|
||||
|
@@ -26,7 +26,7 @@ FULLVERSION=$(pyenv local)
|
||||
PYVERSION=${FULLVERSION%.*}
|
||||
MINOR=${FULLVERSION##?.?.}
|
||||
|
||||
typeset -i STOP_ONERROR=1
|
||||
STOP_ONERROR=${STOP_ONERROR:-1}
|
||||
|
||||
typeset -A SKIP_TESTS
|
||||
case $PYVERSION in
|
||||
@@ -37,11 +37,8 @@ case $PYVERSION in
|
||||
[test_pep247.py]=1 # Long test - might work? Control flow?
|
||||
[test_pwd.py]=1 # Long test - might work? Control flow?
|
||||
[test_pyclbr.py]=1 # Investigate
|
||||
[test_pyexpat.py]=1 # Investigate
|
||||
[test_queue.py]=1 # Control flow?
|
||||
[test_re.py]=1 # try confused with try-else again
|
||||
[test_socketserver.py]=1 # -- test takes too long to run: 40 seconds
|
||||
[test_threading.py]=1 # Line numbers are expected to be different
|
||||
[test_threading.py]=1 # test takes too long to run: 11 seconds
|
||||
[test_thread.py]=1 # test takes too long to run: 36 seconds
|
||||
[test_trace.py]=1 # Long test - works
|
||||
[test_zipfile64.py]=1 # Runs ok but takes 204 seconds
|
||||
@@ -63,7 +60,7 @@ case $PYVERSION in
|
||||
[test_struct.py]=1 # "if and" confused for if .. assert and
|
||||
[test_sys.py]=1 # try confused with try-else again; in test_current_frames()
|
||||
[test_tarfile.py]=1 # try confused with try-else again; top-level import
|
||||
[test_threading.py]=1 # Line numbers are expected to be different
|
||||
[test_threading.py]=1 # test takes too long to run: 11 seconds
|
||||
[test_thread.py]=1 # test takes too long to run: 36 seconds
|
||||
[test_trace.py]=1 # Line numbers are expected to be different
|
||||
[test_zipfile64.py]=1 # Runs ok but takes 204 seconds
|
||||
@@ -75,7 +72,6 @@ case $PYVERSION in
|
||||
[test_codeccallbacks.py]=1 # Fails on its own
|
||||
[test_compile.py]=1 # Intermittent - sometimes works and sometimes doesn't
|
||||
[test_exceptions.py]=1
|
||||
[test_generators.py]=1 # Investigate
|
||||
[test_grp.py]=1 # Long test - might work Control flow?
|
||||
[test_pep352.py]=1 # Investigate
|
||||
[test_pprint.py]=1
|
||||
@@ -87,14 +83,6 @@ case $PYVERSION in
|
||||
[test_zlib.py]=1 # Takes too long to run (more than 3 minutes 39 seconds)
|
||||
# .pyenv/versions/2.6.9/lib/python2.6/lib2to3/refactor.pyc
|
||||
# .pyenv/versions/2.6.9/lib/python2.6/pyclbr.pyc
|
||||
# .pyenv/versions/2.6.9/lib/python2.6/quopri.pyc -- look at ishex, is short
|
||||
# .pyenv/versions/2.6.9/lib/python2.6/random.pyc
|
||||
# .pyenv/versions/2.6.9/lib/python2.6/smtpd.pyc
|
||||
# .pyenv/versions/2.6.9/lib/python2.6/sre_parse.pyc
|
||||
# .pyenv/versions/2.6.9/lib/python2.6/tabnanny.pyc
|
||||
# .pyenv/versions/2.6.9/lib/python2.6/tarfile.pyc
|
||||
|
||||
|
||||
)
|
||||
if (( batch )) ; then
|
||||
# Fails in crontab environment?
|
||||
@@ -120,15 +108,14 @@ case $PYVERSION in
|
||||
|
||||
[test_capi.py]=1
|
||||
[test_curses.py]=1 # Possibly fails on its own but not detected
|
||||
[test test_cmd_line.py]=1 # Takes too long, maybe hangs, or looking for interactive input?
|
||||
[test_cmd_line.py]=1 # Takes too long, maybe hangs, or looking for interactive input?
|
||||
[test_compilex.py]=1 # Probably complex literals again. Investigate
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
[test_doctest.py]=1 # Fails on its own
|
||||
[test_exceptions.py]=1
|
||||
[test_format.py]=1 # control flow. uncompyle2 does not have problems here
|
||||
[test_generators.py]=1 # control flow. uncompyle2 has problem here too
|
||||
[test_grammar.py]=1 # Too many stmts. Handle large stmts
|
||||
[test_grp.py]=1 # test takes to long, works interactively though
|
||||
[test_hashlib.py]=1 # Investigate
|
||||
[test_io.py]=1 # Test takes too long to run
|
||||
[test_ioctl.py]=1 # Test takes too long to run
|
||||
[test_long.py]=1
|
||||
@@ -162,9 +149,92 @@ case $PYVERSION in
|
||||
SKIP_TESTS[test_base64.py]=1
|
||||
fi
|
||||
;;
|
||||
3.0)
|
||||
SKIP_TESTS=(
|
||||
[test_array.py]=1 # Handling of bytestring
|
||||
[test_concurrent_futures.py]=1 # too long to run over 46 seconds by itself
|
||||
[test_datetimetester.py]=1
|
||||
[test_decimal.py]=1
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
[test_fileio.py]=1
|
||||
)
|
||||
if (( batch )) ; then
|
||||
# Fails in crontab environment?
|
||||
# Figure out what's up here
|
||||
SKIP_TESTS[test_exception_variations.py]=1
|
||||
SKIP_TESTS[test_quopri.py]=1
|
||||
fi
|
||||
;;
|
||||
3.1)
|
||||
SKIP_TESTS=(
|
||||
[test_collections.py]=1
|
||||
[test_concurrent_futures.py]=1 # too long to run over 46 seconds by itself
|
||||
[test_datetimetester.py]=1
|
||||
[test_decimal.py]=1
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
[test_fileio.py]=1
|
||||
)
|
||||
if (( batch )) ; then
|
||||
# Fails in crontab environment?
|
||||
# Figure out what's up here
|
||||
SKIP_TESTS[test_exception_variations.py]=1
|
||||
SKIP_TESTS[test_quopri.py]=1
|
||||
fi
|
||||
;;
|
||||
3.2)
|
||||
SKIP_TESTS=(
|
||||
[test_ast.py]=1 # Look at: AssertionError: b'hi' != 'hi'
|
||||
[test_cmd_line.py]=1
|
||||
[test_collections.py]=1
|
||||
[test_concurrent_futures.py]=1 # too long to run over 46 seconds by itself
|
||||
[test_datetimetester.py]=1
|
||||
[test_decimal.py]=1
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
[test_quopri.py]=1 # TypeError: Can't convert 'bytes' object to str implicitly
|
||||
)
|
||||
if (( batch )) ; then
|
||||
# Fails in crontab environment?
|
||||
# Figure out what's up here
|
||||
SKIP_TESTS[test_exception_variations.py]=1
|
||||
SKIP_TESTS[test_quopri.py]=1
|
||||
fi
|
||||
;;
|
||||
|
||||
3.3)
|
||||
SKIP_TESTS=(
|
||||
[test_atexit.py]=1 #
|
||||
)
|
||||
if (( batch )) ; then
|
||||
# Fails in crontab environment?
|
||||
# Figure out what's up here
|
||||
SKIP_TESTS[test_exception_variations.py]=1
|
||||
SKIP_TESTS[test_quopri.py]=1
|
||||
fi
|
||||
;;
|
||||
|
||||
3.4)
|
||||
SKIP_TESTS=(
|
||||
[test_asynchat.py]=1 #
|
||||
[test_asyncore.py]=1 #
|
||||
[test_atexit.py]=1 #
|
||||
[test_bdb.py]=1 #
|
||||
[test_binascii]=1
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
)
|
||||
if (( batch )) ; then
|
||||
# Fails in crontab environment?
|
||||
# Figure out what's up here
|
||||
SKIP_TESTS[test_exception_variations.py]=1
|
||||
SKIP_TESTS[test_quopri.py]=1
|
||||
fi
|
||||
;;
|
||||
3.5)
|
||||
SKIP_TESTS=(
|
||||
[test_decorators.py]=1 # Control flow wrt "if elif"
|
||||
[test_ast.py]=1 # line 379, in test_literal_eval self.assertEqual(ast.literal_eval('b"hi"'), 'hi')
|
||||
[test_atexit.py]=1 #
|
||||
[test_builtin.py]=1 #
|
||||
[test_compare.py]=1
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
)
|
||||
if (( batch )) ; then
|
||||
# Fails in crontab environment?
|
||||
@@ -176,10 +246,63 @@ case $PYVERSION in
|
||||
|
||||
3.6)
|
||||
SKIP_TESTS=(
|
||||
[test_ast.py]=1 #
|
||||
[test_atexit.py]=1 #
|
||||
[test_bdb.py]=1 #
|
||||
[test_builtin.py]=1 #
|
||||
[test_compare.py]=1
|
||||
[test_compile.py]=1
|
||||
[test_contains.py]=1 # Code "while False: yield None" is optimized away in compilation
|
||||
[test_contextlib_async.py]=1 # Investigate
|
||||
[test_coroutines.py]=1 # Parse error
|
||||
[test_curses.py]=1 # Parse error
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
[test_quopri.py]=1 # AssertionError: b'123=four' != '123=four'
|
||||
)
|
||||
;;
|
||||
3.7)
|
||||
SKIP_TESTS=(
|
||||
[test_ast.py]=1 #
|
||||
[test_atexit.py]=1 #
|
||||
[test_baseexception.py]=1 #
|
||||
[test_bdb.py]=1 #
|
||||
[test_buffer.py]=1 # parse error
|
||||
[test_builtin.py]=1 # parser error
|
||||
[test_cmdline.py]=1 # Interactive?
|
||||
[test_collections.py]=1 # Fixed I think in decompyle3 - pull from there
|
||||
[test_compare.py]=1
|
||||
[test_compile.py]=1
|
||||
[test_configparser.py]=1
|
||||
[test_contains.py]=1 # Code "while False: yield None" is optimized away in compilation
|
||||
[test_contextlib_async.py]=1 # Investigate
|
||||
[test_context.py]=1
|
||||
[test_coroutines.py]=1 # Parse error
|
||||
[test_crypt.py]=1 # Parse error
|
||||
[test_curses.py]=1 # Parse error
|
||||
[test_dataclasses.py]=1 # parse error
|
||||
[test_datetime.py]=1 # Takes too long
|
||||
[test_dbm_gnu.py]=1 # Takes too long
|
||||
[test_decimal.py]=1 # Parse error
|
||||
[test_descr.py]=1 # Parse error
|
||||
[test_dictcomps.py]=1 # Bad semantics - Investigate
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
[test_enumerate.py]=1 #
|
||||
[test_enum.py]=1 #
|
||||
[test_faulthandler.py]=1 # takes too long
|
||||
[test_generators.py]=1 # improper decompile of assert i < n and (n-i) % 3 == 0
|
||||
# ...
|
||||
)
|
||||
;;
|
||||
3.8)
|
||||
SKIP_TESTS=(
|
||||
[test_contains.py]=1 # Code "while False: yield None" is optimized away in compilation
|
||||
[test_collections.py]=1 # Investigate
|
||||
[test_decorators.py]=1 # Control flow wrt "if elif"
|
||||
[test_exceptions.py]=1 # parse error
|
||||
[test_dis.py]=1 # We change line numbers - duh!
|
||||
[test_pow.py]=1 # Control flow wrt "continue"
|
||||
[test_quopri.py]=1 # Only fails on POWER
|
||||
# ...
|
||||
)
|
||||
;;
|
||||
*)
|
||||
@@ -202,11 +325,15 @@ TESTDIR=/tmp/test${PYVERSION}
|
||||
if [[ -e $TESTDIR ]] ; then
|
||||
rm -fr $TESTDIR
|
||||
fi
|
||||
|
||||
PYENV_ROOT=${PYENV_ROOT:-$HOME/.pyenv}
|
||||
pyenv_local=$(pyenv local)
|
||||
mkdir $TESTDIR || exit $?
|
||||
cp -r ${PYENV_ROOT}/versions/${PYVERSION}.${MINOR}/lib/python${PYVERSION}/test $TESTDIR
|
||||
cd $TESTDIR/test
|
||||
pyenv local $FULLVERSION
|
||||
export PYTHONPATH=$TESTDIR
|
||||
export PATH=${PYENV_ROOT}/shims:${PATH}
|
||||
|
||||
# Run tests
|
||||
typeset -i i=0
|
||||
@@ -218,7 +345,7 @@ if [[ -n $1 ]] ; then
|
||||
SKIP_TESTS=()
|
||||
fi
|
||||
else
|
||||
files=test_*.py
|
||||
files=$(echo test_*.py)
|
||||
fi
|
||||
|
||||
typeset -i ALL_FILES_STARTTIME=$(date +%s)
|
||||
|
@@ -135,8 +135,8 @@ def do_tests(
|
||||
pass
|
||||
|
||||
if len(files) > max_files:
|
||||
files = [file for file in files if not "site-packages" in file]
|
||||
files = [file for file in files if not "test" in file]
|
||||
files = [file for file in files if not "site-packages" in file and (file.endswith(".pyo") or file.endswith(".pyc"))]
|
||||
files = [file for file in files if not "test" in file and (file.endswith(".pyo") or file.endswith(".pyc"))]
|
||||
if len(files) > max_files:
|
||||
# print("Number of files %d - truncating to last 200" % len(files))
|
||||
print(
|
||||
|
@@ -22,17 +22,19 @@ from uncompyle6.disas import check_object_path
|
||||
from uncompyle6.semantics import pysource
|
||||
from uncompyle6.parser import ParserError
|
||||
from uncompyle6.version import VERSION
|
||||
|
||||
# from uncompyle6.linenumbers import line_number_mapping
|
||||
|
||||
from uncompyle6.semantics.pysource import code_deparse
|
||||
from uncompyle6.semantics.fragments import code_deparse as code_deparse_fragments
|
||||
from uncompyle6.semantics.fragments import code_deparse as code_deparse_fragments
|
||||
from uncompyle6.semantics.linemap import deparse_code_with_map
|
||||
|
||||
from xdis.load import load_module
|
||||
|
||||
|
||||
def _get_outstream(outfile):
|
||||
dir = os.path.dirname(outfile)
|
||||
failed_file = outfile + '_failed'
|
||||
failed_file = outfile + "_failed"
|
||||
if os.path.exists(failed_file):
|
||||
os.remove(failed_file)
|
||||
try:
|
||||
@@ -40,9 +42,10 @@ def _get_outstream(outfile):
|
||||
except OSError:
|
||||
pass
|
||||
if PYTHON_VERSION < 3.0:
|
||||
return open(outfile, mode='wb')
|
||||
return open(outfile, mode="wb")
|
||||
else:
|
||||
return open(outfile, mode='w', encoding='utf-8')
|
||||
return open(outfile, mode="w", encoding="utf-8")
|
||||
|
||||
|
||||
def decompile(
|
||||
bytecode_version,
|
||||
@@ -75,81 +78,99 @@ def decompile(
|
||||
real_out = out or sys.stdout
|
||||
|
||||
def write(s):
|
||||
s += '\n'
|
||||
s += "\n"
|
||||
real_out.write(s)
|
||||
|
||||
assert iscode(co)
|
||||
|
||||
co_pypy_str = 'PyPy ' if is_pypy else ''
|
||||
run_pypy_str = 'PyPy ' if IS_PYPY else ''
|
||||
sys_version_lines = sys.version.split('\n')
|
||||
co_pypy_str = "PyPy " if is_pypy else ""
|
||||
run_pypy_str = "PyPy " if IS_PYPY else ""
|
||||
sys_version_lines = sys.version.split("\n")
|
||||
if source_encoding:
|
||||
write('# -*- coding: %s -*-' % source_encoding)
|
||||
write('# uncompyle6 version %s\n'
|
||||
'# %sPython bytecode %s%s\n# Decompiled from: %sPython %s' %
|
||||
(VERSION, co_pypy_str, bytecode_version,
|
||||
" (%s)" % str(magic_int) if magic_int else "",
|
||||
run_pypy_str, '\n# '.join(sys_version_lines)))
|
||||
write("# -*- coding: %s -*-" % source_encoding)
|
||||
write(
|
||||
"# uncompyle6 version %s\n"
|
||||
"# %sPython bytecode %s%s\n# Decompiled from: %sPython %s"
|
||||
% (
|
||||
VERSION,
|
||||
co_pypy_str,
|
||||
bytecode_version,
|
||||
" (%s)" % str(magic_int) if magic_int else "",
|
||||
run_pypy_str,
|
||||
"\n# ".join(sys_version_lines),
|
||||
)
|
||||
)
|
||||
if co.co_filename:
|
||||
write('# Embedded file name: %s' % co.co_filename,)
|
||||
write("# Embedded file name: %s" % co.co_filename,)
|
||||
if timestamp:
|
||||
write('# Compiled at: %s' % datetime.datetime.fromtimestamp(timestamp))
|
||||
write("# Compiled at: %s" % datetime.datetime.fromtimestamp(timestamp))
|
||||
if source_size:
|
||||
write('# Size of source mod 2**32: %d bytes' % source_size)
|
||||
write("# Size of source mod 2**32: %d bytes" % source_size)
|
||||
|
||||
debug_opts = {
|
||||
'asm': showasm,
|
||||
'ast': showast,
|
||||
'grammar': showgrammar
|
||||
}
|
||||
debug_opts = {"asm": showasm, "ast": showast, "grammar": showgrammar}
|
||||
|
||||
try:
|
||||
if mapstream:
|
||||
if isinstance(mapstream, str):
|
||||
mapstream = _get_outstream(mapstream)
|
||||
|
||||
deparsed = deparse_code_with_map(bytecode_version, co, out, showasm, showast,
|
||||
showgrammar,
|
||||
code_objects = code_objects,
|
||||
is_pypy = is_pypy,
|
||||
)
|
||||
header_count = 3+len(sys_version_lines)
|
||||
linemap = [(line_no, deparsed.source_linemap[line_no]+header_count)
|
||||
for line_no in
|
||||
sorted(deparsed.source_linemap.keys())]
|
||||
deparsed = deparse_code_with_map(
|
||||
bytecode_version,
|
||||
co,
|
||||
out,
|
||||
showasm,
|
||||
showast,
|
||||
showgrammar,
|
||||
code_objects=code_objects,
|
||||
is_pypy=is_pypy,
|
||||
)
|
||||
header_count = 3 + len(sys_version_lines)
|
||||
linemap = [
|
||||
(line_no, deparsed.source_linemap[line_no] + header_count)
|
||||
for line_no in sorted(deparsed.source_linemap.keys())
|
||||
]
|
||||
mapstream.write("\n\n# %s\n" % linemap)
|
||||
else:
|
||||
if do_fragments:
|
||||
deparse_fn = code_deparse_fragments
|
||||
else:
|
||||
deparse_fn = code_deparse
|
||||
deparsed = deparse_fn(co, out, bytecode_version,
|
||||
debug_opts = debug_opts,
|
||||
is_pypy=is_pypy)
|
||||
deparsed = deparse_fn(
|
||||
co, out, bytecode_version, debug_opts=debug_opts, is_pypy=is_pypy
|
||||
)
|
||||
pass
|
||||
return deparsed
|
||||
except pysource.SourceWalkerError as e:
|
||||
# deparsing failed
|
||||
raise pysource.SourceWalkerError(str(e))
|
||||
|
||||
|
||||
def compile_file(source_path):
|
||||
if source_path.endswith('.py'):
|
||||
if source_path.endswith(".py"):
|
||||
basename = source_path[:-3]
|
||||
else:
|
||||
basename = source_path
|
||||
|
||||
if hasattr(sys, 'pypy_version_info'):
|
||||
if hasattr(sys, "pypy_version_info"):
|
||||
bytecode_path = "%s-pypy%s.pyc" % (basename, PYTHON_VERSION)
|
||||
else:
|
||||
bytecode_path = "%s-%s.pyc" % (basename, PYTHON_VERSION)
|
||||
|
||||
print("compiling %s to %s" % (source_path, bytecode_path))
|
||||
py_compile.compile(source_path, bytecode_path, 'exec')
|
||||
py_compile.compile(source_path, bytecode_path, "exec")
|
||||
return bytecode_path
|
||||
|
||||
|
||||
def decompile_file(filename, outstream=None, showasm=None, showast=False,
|
||||
showgrammar=False, source_encoding=None, mapstream=None, do_fragments=False):
|
||||
def decompile_file(
|
||||
filename,
|
||||
outstream=None,
|
||||
showasm=None,
|
||||
showast=False,
|
||||
showgrammar=False,
|
||||
source_encoding=None,
|
||||
mapstream=None,
|
||||
do_fragments=False,
|
||||
):
|
||||
"""
|
||||
decompile Python byte-code file (.pyc). Return objects to
|
||||
all of the deparsed objects found in `filename`.
|
||||
@@ -157,32 +178,68 @@ def decompile_file(filename, outstream=None, showasm=None, showast=False,
|
||||
|
||||
filename = check_object_path(filename)
|
||||
code_objects = {}
|
||||
(version, timestamp, magic_int, co, is_pypy,
|
||||
source_size) = load_module(filename, code_objects)
|
||||
(version, timestamp, magic_int, co, is_pypy, source_size) = load_module(
|
||||
filename, code_objects
|
||||
)
|
||||
|
||||
if isinstance(co, list):
|
||||
deparsed = []
|
||||
for con in co:
|
||||
deparsed.append(
|
||||
decompile(version, con, outstream, showasm, showast,
|
||||
timestamp, showgrammar, source_encoding, code_objects=code_objects,
|
||||
is_pypy=is_pypy, magic_int=magic_int),
|
||||
mapstream=mapstream)
|
||||
decompile(
|
||||
version,
|
||||
con,
|
||||
outstream,
|
||||
showasm,
|
||||
showast,
|
||||
timestamp,
|
||||
showgrammar,
|
||||
source_encoding,
|
||||
code_objects=code_objects,
|
||||
is_pypy=is_pypy,
|
||||
magic_int=magic_int,
|
||||
),
|
||||
mapstream=mapstream,
|
||||
)
|
||||
else:
|
||||
deparsed = [decompile(version, co, outstream, showasm, showast,
|
||||
timestamp, showgrammar, source_encoding,
|
||||
code_objects=code_objects, source_size=source_size,
|
||||
is_pypy=is_pypy, magic_int=magic_int,
|
||||
mapstream=mapstream, do_fragments=do_fragments)]
|
||||
deparsed = [
|
||||
decompile(
|
||||
version,
|
||||
co,
|
||||
outstream,
|
||||
showasm,
|
||||
showast,
|
||||
timestamp,
|
||||
showgrammar,
|
||||
source_encoding,
|
||||
code_objects=code_objects,
|
||||
source_size=source_size,
|
||||
is_pypy=is_pypy,
|
||||
magic_int=magic_int,
|
||||
mapstream=mapstream,
|
||||
do_fragments=do_fragments,
|
||||
)
|
||||
]
|
||||
co = None
|
||||
return deparsed
|
||||
|
||||
|
||||
# FIXME: combine into an options parameter
|
||||
def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
showasm=None, showast=False, do_verify=False,
|
||||
showgrammar=False, source_encoding=None, raise_on_error=False,
|
||||
do_linemaps=False, do_fragments=False):
|
||||
def main(
|
||||
in_base,
|
||||
out_base,
|
||||
compiled_files,
|
||||
source_files,
|
||||
outfile=None,
|
||||
showasm=None,
|
||||
showast=False,
|
||||
do_verify=False,
|
||||
showgrammar=False,
|
||||
source_encoding=None,
|
||||
raise_on_error=False,
|
||||
do_linemaps=False,
|
||||
do_fragments=False,
|
||||
):
|
||||
"""
|
||||
in_base base directory for input files
|
||||
out_base base directory for output files (ignored when
|
||||
@@ -205,49 +262,46 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
infile = os.path.join(in_base, filename)
|
||||
# print("XXX", infile)
|
||||
if not os.path.exists(infile):
|
||||
sys.stderr.write("File '%s' doesn't exist. Skipped\n"
|
||||
% infile)
|
||||
sys.stderr.write("File '%s' doesn't exist. Skipped\n" % infile)
|
||||
continue
|
||||
|
||||
if do_linemaps:
|
||||
linemap_stream = infile + '.pymap'
|
||||
linemap_stream = infile + ".pymap"
|
||||
pass
|
||||
|
||||
# print (infile, file=sys.stderr)
|
||||
|
||||
if outfile: # outfile was given as parameter
|
||||
if outfile: # outfile was given as parameter
|
||||
outstream = _get_outstream(outfile)
|
||||
elif out_base is None:
|
||||
outstream = sys.stdout
|
||||
if do_linemaps:
|
||||
linemap_stream = sys.stdout
|
||||
if do_verify:
|
||||
prefix = os.path.basename(filename) + '-'
|
||||
if prefix.endswith('.py'):
|
||||
prefix = prefix[:-len('.py')]
|
||||
prefix = os.path.basename(filename) + "-"
|
||||
if prefix.endswith(".py"):
|
||||
prefix = prefix[: -len(".py")]
|
||||
|
||||
# Unbuffer output if possible
|
||||
buffering = -1 if sys.stdout.isatty() else 0
|
||||
if PYTHON_VERSION >= 3.5:
|
||||
t = tempfile.NamedTemporaryFile(mode='w+b',
|
||||
buffering=buffering,
|
||||
suffix='.py',
|
||||
prefix=prefix)
|
||||
t = tempfile.NamedTemporaryFile(
|
||||
mode="w+b", buffering=buffering, suffix=".py", prefix=prefix
|
||||
)
|
||||
else:
|
||||
t = tempfile.NamedTemporaryFile(mode='w+b',
|
||||
suffix='.py',
|
||||
prefix=prefix)
|
||||
t = tempfile.NamedTemporaryFile(
|
||||
mode="w+b", suffix=".py", prefix=prefix
|
||||
)
|
||||
current_outfile = t.name
|
||||
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', buffering)
|
||||
tee = subprocess.Popen(["tee", current_outfile],
|
||||
stdin=subprocess.PIPE)
|
||||
sys.stdout = os.fdopen(sys.stdout.fileno(), "w", buffering)
|
||||
tee = subprocess.Popen(["tee", current_outfile], stdin=subprocess.PIPE)
|
||||
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
|
||||
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
|
||||
else:
|
||||
if filename.endswith('.pyc'):
|
||||
if filename.endswith(".pyc"):
|
||||
current_outfile = os.path.join(out_base, filename[0:-1])
|
||||
else:
|
||||
current_outfile = os.path.join(out_base, filename) + '_dis'
|
||||
current_outfile = os.path.join(out_base, filename) + "_dis"
|
||||
pass
|
||||
pass
|
||||
|
||||
@@ -257,15 +311,25 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
|
||||
# Try to uncompile the input file
|
||||
try:
|
||||
deparsed = decompile_file(infile, outstream, showasm, showast, showgrammar,
|
||||
source_encoding, linemap_stream, do_fragments)
|
||||
deparsed = decompile_file(
|
||||
infile,
|
||||
outstream,
|
||||
showasm,
|
||||
showast,
|
||||
showgrammar,
|
||||
source_encoding,
|
||||
linemap_stream,
|
||||
do_fragments,
|
||||
)
|
||||
if do_fragments:
|
||||
for d in deparsed:
|
||||
last_mod = None
|
||||
offsets = d.offsets
|
||||
for e in sorted([k for k in offsets.keys() if isinstance(k[1], int)]):
|
||||
for e in sorted(
|
||||
[k for k in offsets.keys() if isinstance(k[1], int)]
|
||||
):
|
||||
if e[0] != last_mod:
|
||||
line = '=' * len(e[0])
|
||||
line = "=" * len(e[0])
|
||||
outstream.write("%s\n%s\n%s\n" % (line, e[0], line))
|
||||
last_mod = e[0]
|
||||
info = offsets[e]
|
||||
@@ -290,9 +354,11 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
raise
|
||||
except RuntimeError as e:
|
||||
sys.stdout.write("\n%s\n" % str(e))
|
||||
if str(e).startswith('Unsupported Python'):
|
||||
if str(e).startswith("Unsupported Python"):
|
||||
sys.stdout.write("\n")
|
||||
sys.stderr.write("\n# Unsupported bytecode in file %s\n# %s\n" % (infile, e))
|
||||
sys.stderr.write(
|
||||
"\n# Unsupported bytecode in file %s\n# %s\n" % (infile, e)
|
||||
)
|
||||
else:
|
||||
if outfile:
|
||||
outstream.close()
|
||||
@@ -309,22 +375,22 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
# else:
|
||||
# sys.stderr.write("\n# %s" % sys.exc_info()[1])
|
||||
# sys.stderr.write("\n# Can't uncompile %s\n" % infile)
|
||||
else: # uncompile successful
|
||||
else: # uncompile successful
|
||||
if current_outfile:
|
||||
outstream.close()
|
||||
|
||||
if do_verify:
|
||||
try:
|
||||
msg = verify.compare_code_with_srcfile(infile,
|
||||
current_outfile,
|
||||
do_verify)
|
||||
msg = verify.compare_code_with_srcfile(
|
||||
infile, current_outfile, do_verify
|
||||
)
|
||||
if not current_outfile:
|
||||
if not msg:
|
||||
print('\n# okay decompiling %s' % infile)
|
||||
print("\n# okay decompiling %s" % infile)
|
||||
okay_files += 1
|
||||
else:
|
||||
verify_failed_files += 1
|
||||
print('\n# %s\n\t%s', infile, msg)
|
||||
print("\n# %s\n\t%s", infile, msg)
|
||||
pass
|
||||
else:
|
||||
okay_files += 1
|
||||
@@ -332,7 +398,7 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
except verify.VerifyCmpError as e:
|
||||
print(e)
|
||||
verify_failed_files += 1
|
||||
os.rename(current_outfile, current_outfile + '_unverified')
|
||||
os.rename(current_outfile, current_outfile + "_unverified")
|
||||
sys.stderr.write("### Error Verifying %s\n" % filename)
|
||||
sys.stderr.write(str(e) + "\n")
|
||||
if not outfile:
|
||||
@@ -345,18 +411,31 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
okay_files += 1
|
||||
pass
|
||||
elif do_verify:
|
||||
sys.stderr.write("\n### uncompile successful, but no file to compare against\n")
|
||||
sys.stderr.write(
|
||||
"\n### uncompile successful, but no file to compare against\n"
|
||||
)
|
||||
pass
|
||||
else:
|
||||
okay_files += 1
|
||||
if not current_outfile:
|
||||
mess = '\n# okay decompiling'
|
||||
mess = "\n# okay decompiling"
|
||||
# mem_usage = __memUsage()
|
||||
print(mess, infile)
|
||||
if current_outfile:
|
||||
sys.stdout.write("%s -- %s\r" %
|
||||
(infile, status_msg(do_verify, tot_files, okay_files, failed_files,
|
||||
verify_failed_files, do_verify)))
|
||||
sys.stdout.write(
|
||||
"%s -- %s\r"
|
||||
% (
|
||||
infile,
|
||||
status_msg(
|
||||
do_verify,
|
||||
tot_files,
|
||||
okay_files,
|
||||
failed_files,
|
||||
verify_failed_files,
|
||||
do_verify,
|
||||
),
|
||||
)
|
||||
)
|
||||
try:
|
||||
# FIXME: Something is weird with Pypy here
|
||||
sys.stdout.flush()
|
||||
@@ -375,24 +454,30 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
|
||||
# ---- main ----
|
||||
|
||||
if sys.platform.startswith('linux') and os.uname()[2][:2] in ['2.', '3.', '4.']:
|
||||
if sys.platform.startswith("linux") and os.uname()[2][:2] in ["2.", "3.", "4."]:
|
||||
|
||||
def __memUsage():
|
||||
mi = open('/proc/self/stat', 'r')
|
||||
mi = open("/proc/self/stat", "r")
|
||||
mu = mi.readline().split()[22]
|
||||
mi.close()
|
||||
return int(mu) / 1000000
|
||||
else:
|
||||
def __memUsage():
|
||||
return ''
|
||||
|
||||
def status_msg(do_verify, tot_files, okay_files, failed_files,
|
||||
verify_failed_files, weak_verify):
|
||||
if weak_verify == 'weak':
|
||||
verification_type = 'weak '
|
||||
elif weak_verify == 'verify-run':
|
||||
verification_type = 'run '
|
||||
|
||||
else:
|
||||
|
||||
def __memUsage():
|
||||
return ""
|
||||
|
||||
|
||||
def status_msg(
|
||||
do_verify, tot_files, okay_files, failed_files, verify_failed_files, weak_verify
|
||||
):
|
||||
if weak_verify == "weak":
|
||||
verification_type = "weak "
|
||||
elif weak_verify == "verify-run":
|
||||
verification_type = "run "
|
||||
else:
|
||||
verification_type = ''
|
||||
verification_type = ""
|
||||
if tot_files == 1:
|
||||
if failed_files:
|
||||
return "\n# decompile failed"
|
||||
@@ -402,7 +487,11 @@ def status_msg(do_verify, tot_files, okay_files, failed_files,
|
||||
return "\n# Successfully decompiled file"
|
||||
pass
|
||||
pass
|
||||
mess = "decompiled %i files: %i okay, %i failed" % (tot_files, okay_files, failed_files)
|
||||
mess = "decompiled %i files: %i okay, %i failed" % (
|
||||
tot_files,
|
||||
okay_files,
|
||||
failed_files,
|
||||
)
|
||||
if do_verify:
|
||||
mess += (", %i %sverification failed" % (verify_failed_files, verification_type))
|
||||
mess += ", %i %sverification failed" % (verify_failed_files, verification_type)
|
||||
return mess
|
||||
|
@@ -62,6 +62,8 @@ class PythonParser(GenericASTBuilder):
|
||||
'kvlist_n',
|
||||
# Python 3.6+
|
||||
'come_from_loops',
|
||||
# Python 3.7+
|
||||
'importlist37',
|
||||
]
|
||||
self.collect = frozenset(nt_list)
|
||||
|
||||
@@ -101,7 +103,6 @@ class PythonParser(GenericASTBuilder):
|
||||
many arguments it has. Often it is not used.
|
||||
"""
|
||||
if rule not in self.new_rules:
|
||||
# print("XXX ", rule) # debug
|
||||
self.new_rules.add(rule)
|
||||
self.addRule(rule, nop_func)
|
||||
customize[opname] = arg_count
|
||||
@@ -496,45 +497,47 @@ class PythonParser(GenericASTBuilder):
|
||||
"""
|
||||
|
||||
def p_expr(self, args):
|
||||
'''
|
||||
expr ::= _mklambda
|
||||
"""
|
||||
expr ::= LOAD_CODE
|
||||
expr ::= LOAD_FAST
|
||||
expr ::= LOAD_NAME
|
||||
expr ::= LOAD_CONST
|
||||
expr ::= LOAD_GLOBAL
|
||||
expr ::= LOAD_DEREF
|
||||
expr ::= binary_expr
|
||||
expr ::= list
|
||||
expr ::= LOAD_FAST
|
||||
expr ::= LOAD_GLOBAL
|
||||
expr ::= LOAD_NAME
|
||||
expr ::= _mklambda
|
||||
expr ::= and
|
||||
expr ::= bin_op
|
||||
expr ::= call
|
||||
expr ::= compare
|
||||
expr ::= dict
|
||||
expr ::= and
|
||||
expr ::= list
|
||||
expr ::= or
|
||||
expr ::= unary_expr
|
||||
expr ::= call
|
||||
expr ::= unary_not
|
||||
expr ::= subscript
|
||||
expr ::= subscript2
|
||||
expr ::= unary_op
|
||||
expr ::= unary_not
|
||||
expr ::= yield
|
||||
|
||||
binary_expr ::= expr expr binary_op
|
||||
binary_op ::= BINARY_ADD
|
||||
binary_op ::= BINARY_MULTIPLY
|
||||
binary_op ::= BINARY_AND
|
||||
binary_op ::= BINARY_OR
|
||||
binary_op ::= BINARY_XOR
|
||||
binary_op ::= BINARY_SUBTRACT
|
||||
binary_op ::= BINARY_TRUE_DIVIDE
|
||||
binary_op ::= BINARY_FLOOR_DIVIDE
|
||||
binary_op ::= BINARY_MODULO
|
||||
binary_op ::= BINARY_LSHIFT
|
||||
binary_op ::= BINARY_RSHIFT
|
||||
binary_op ::= BINARY_POWER
|
||||
# bin_op (formerly "binary_expr") is the Python AST BinOp
|
||||
bin_op ::= expr expr binary_operator
|
||||
binary_operator ::= BINARY_ADD
|
||||
binary_operator ::= BINARY_MULTIPLY
|
||||
binary_operator ::= BINARY_AND
|
||||
binary_operator ::= BINARY_OR
|
||||
binary_operator ::= BINARY_XOR
|
||||
binary_operator ::= BINARY_SUBTRACT
|
||||
binary_operator ::= BINARY_TRUE_DIVIDE
|
||||
binary_operator ::= BINARY_FLOOR_DIVIDE
|
||||
binary_operator ::= BINARY_MODULO
|
||||
binary_operator ::= BINARY_LSHIFT
|
||||
binary_operator ::= BINARY_RSHIFT
|
||||
binary_operator ::= BINARY_POWER
|
||||
|
||||
unary_expr ::= expr unary_op
|
||||
unary_op ::= UNARY_POSITIVE
|
||||
unary_op ::= UNARY_NEGATIVE
|
||||
unary_op ::= UNARY_INVERT
|
||||
# unary_op (formerly "unary_expr") is the Python AST BinOp
|
||||
unary_op ::= expr unary_operator
|
||||
unary_operator ::= UNARY_POSITIVE
|
||||
unary_operator ::= UNARY_NEGATIVE
|
||||
unary_operator ::= UNARY_INVERT
|
||||
|
||||
unary_not ::= expr UNARY_NOT
|
||||
|
||||
@@ -574,10 +577,10 @@ class PythonParser(GenericASTBuilder):
|
||||
|
||||
# Positional arguments in make_function
|
||||
pos_arg ::= expr
|
||||
'''
|
||||
"""
|
||||
|
||||
def p_store(self, args):
|
||||
'''
|
||||
"""
|
||||
# Note. The below is right-recursive:
|
||||
designList ::= store store
|
||||
designList ::= store DUP_TOP designList
|
||||
@@ -597,7 +600,7 @@ class PythonParser(GenericASTBuilder):
|
||||
store ::= store_subscript
|
||||
store_subscript ::= expr expr STORE_SUBSCR
|
||||
store ::= unpack
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
def parse(p, tokens, customize):
|
||||
|
@@ -227,7 +227,7 @@ class Python2Parser(PythonParser):
|
||||
def p_op2(self, args):
|
||||
"""
|
||||
inplace_op ::= INPLACE_DIVIDE
|
||||
binary_op ::= BINARY_DIVIDE
|
||||
binary_operator ::= BINARY_DIVIDE
|
||||
"""
|
||||
|
||||
def customize_grammar_rules(self, tokens, customize):
|
||||
|
@@ -341,8 +341,7 @@ class Python26Parser(Python2Parser):
|
||||
WITH_CLEANUP END_FINALLY
|
||||
""")
|
||||
super(Python26Parser, self).customize_grammar_rules(tokens, customize)
|
||||
if self.version >= 2.6:
|
||||
self.check_reduce['and'] = 'AST'
|
||||
self.check_reduce['and'] = 'AST'
|
||||
self.check_reduce['assert_expr_and'] = 'AST'
|
||||
self.check_reduce['list_for'] = 'AST'
|
||||
self.check_reduce['try_except'] = 'tokens'
|
||||
@@ -380,6 +379,7 @@ class Python26Parser(Python2Parser):
|
||||
# or that it jumps to the same place as the end of "and"
|
||||
jmp_false = ast[1][0]
|
||||
jmp_target = jmp_false.offset + jmp_false.attr + 3
|
||||
|
||||
return not (jmp_target == tokens[test_index].offset or
|
||||
tokens[last].pattr == jmp_false.pattr)
|
||||
elif rule == (
|
||||
|
@@ -285,10 +285,10 @@ class Python27Parser(Python2Parser):
|
||||
or jump_target == next_offset(ast[-1].op, ast[-1].opc, ast[-1].offset))
|
||||
elif rule == ("iflaststmtl", ("testexpr", "c_stmts")):
|
||||
testexpr = ast[0]
|
||||
if testexpr[0] == "testfalse":
|
||||
testfalse = testexpr[0]
|
||||
if testfalse[1] == "jmp_false":
|
||||
jmp_false = testfalse[1]
|
||||
if testexpr[0] in ("testfalse", "testtrue"):
|
||||
test = testexpr[0]
|
||||
if len(test) > 1 and test[1].kind.startswith("jmp_"):
|
||||
jmp_target = test[1][0].attr
|
||||
if last == len(tokens):
|
||||
last -= 1
|
||||
while (isinstance(tokens[first].offset, str) and first < last):
|
||||
@@ -297,7 +297,7 @@ class Python27Parser(Python2Parser):
|
||||
return True
|
||||
while (first < last and isinstance(tokens[last].offset, str)):
|
||||
last -= 1
|
||||
return tokens[first].offset < jmp_false[0].attr < tokens[last].offset
|
||||
return tokens[first].off2int() < jmp_target < tokens[last].off2int()
|
||||
pass
|
||||
pass
|
||||
pass
|
||||
|
@@ -835,11 +835,8 @@ class Python3Parser(PythonParser):
|
||||
dict_comp ::= LOAD_DICTCOMP LOAD_STR MAKE_FUNCTION_0 expr
|
||||
GET_ITER CALL_FUNCTION_1
|
||||
classdefdeco1 ::= expr classdefdeco2 CALL_FUNCTION_1
|
||||
classdefdeco1 ::= expr classdefdeco1 CALL_FUNCTION_1
|
||||
"""
|
||||
if self.version < 3.5:
|
||||
rule += """
|
||||
classdefdeco1 ::= expr classdefdeco1 CALL_FUNCTION_1
|
||||
"""
|
||||
self.addRule(rule, nop_func)
|
||||
|
||||
self.custom_classfunc_rule(
|
||||
@@ -893,7 +890,7 @@ class Python3Parser(PythonParser):
|
||||
self.addRule(
|
||||
"""
|
||||
expr ::= get_iter
|
||||
attribute ::= expr GET_ITER
|
||||
get_iter ::= expr GET_ITER
|
||||
""",
|
||||
nop_func,
|
||||
)
|
||||
@@ -1589,6 +1586,12 @@ class Python3Parser(PythonParser):
|
||||
if not isinstance(come_froms, Token):
|
||||
return tokens[first].offset > come_froms[-1].attr
|
||||
return False
|
||||
elif lhs == "ifelsestmt" and rule[1][2] == "jump_forward_else":
|
||||
last = min(last, len(tokens)-1)
|
||||
if tokens[last].off2int() == -1:
|
||||
last -= 1
|
||||
jump_forward_else = ast[2]
|
||||
return tokens[first].off2int() <= jump_forward_else[0].attr < tokens[last].off2int()
|
||||
|
||||
return False
|
||||
|
||||
|
@@ -96,8 +96,8 @@ class Python35Parser(Python34Parser):
|
||||
else_suite COME_FROM_LOOP
|
||||
|
||||
|
||||
inplace_op ::= INPLACE_MATRIX_MULTIPLY
|
||||
binary_op ::= BINARY_MATRIX_MULTIPLY
|
||||
inplace_op ::= INPLACE_MATRIX_MULTIPLY
|
||||
binary_operator ::= BINARY_MATRIX_MULTIPLY
|
||||
|
||||
# Python 3.5+ does jump optimization
|
||||
# In <.3.5 the below is a JUMP_FORWARD to a JUMP_ABSOLUTE.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017-2019 Rocky Bernstein
|
||||
# Copyright (c) 2017-2020 Rocky Bernstein
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@@ -22,7 +22,6 @@ from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
|
||||
from uncompyle6.parsers.parse37base import Python37BaseParser
|
||||
|
||||
class Python37Parser(Python37BaseParser):
|
||||
|
||||
def __init__(self, debug_parser=PARSER_DEFAULT_DEBUG):
|
||||
super(Python37Parser, self).__init__(debug_parser)
|
||||
self.customized = {}
|
||||
@@ -91,8 +90,6 @@ class Python37Parser(Python37BaseParser):
|
||||
else_suitec ::= c_stmts
|
||||
else_suitec ::= returns
|
||||
|
||||
stmt ::= assert
|
||||
|
||||
stmt ::= classdef
|
||||
stmt ::= call_stmt
|
||||
|
||||
@@ -131,7 +128,6 @@ class Python37Parser(Python37BaseParser):
|
||||
|
||||
def p_expr(self, args):
|
||||
"""
|
||||
expr ::= _mklambda
|
||||
expr ::= LOAD_CODE
|
||||
expr ::= LOAD_CONST
|
||||
expr ::= LOAD_DEREF
|
||||
@@ -139,38 +135,42 @@ class Python37Parser(Python37BaseParser):
|
||||
expr ::= LOAD_GLOBAL
|
||||
expr ::= LOAD_NAME
|
||||
expr ::= LOAD_STR
|
||||
expr ::= binary_expr
|
||||
expr ::= list
|
||||
expr ::= _mklambda
|
||||
expr ::= and
|
||||
expr ::= bin_op
|
||||
expr ::= call
|
||||
expr ::= compare
|
||||
expr ::= dict
|
||||
expr ::= and
|
||||
expr ::= generator_exp
|
||||
expr ::= list
|
||||
expr ::= or
|
||||
expr ::= unary_expr
|
||||
expr ::= call
|
||||
expr ::= unary_not
|
||||
expr ::= subscript
|
||||
expr ::= subscript2
|
||||
expr ::= unary_not
|
||||
expr ::= unary_op
|
||||
expr ::= yield
|
||||
expr ::= generator_exp
|
||||
|
||||
binary_expr ::= expr expr binary_op
|
||||
binary_op ::= BINARY_ADD
|
||||
binary_op ::= BINARY_MULTIPLY
|
||||
binary_op ::= BINARY_AND
|
||||
binary_op ::= BINARY_OR
|
||||
binary_op ::= BINARY_XOR
|
||||
binary_op ::= BINARY_SUBTRACT
|
||||
binary_op ::= BINARY_TRUE_DIVIDE
|
||||
binary_op ::= BINARY_FLOOR_DIVIDE
|
||||
binary_op ::= BINARY_MODULO
|
||||
binary_op ::= BINARY_LSHIFT
|
||||
binary_op ::= BINARY_RSHIFT
|
||||
binary_op ::= BINARY_POWER
|
||||
# bin_op (formerly "binary_expr") is the Python AST BinOp
|
||||
bin_op ::= expr expr binary_operator
|
||||
|
||||
unary_expr ::= expr unary_op
|
||||
unary_op ::= UNARY_POSITIVE
|
||||
unary_op ::= UNARY_NEGATIVE
|
||||
unary_op ::= UNARY_INVERT
|
||||
binary_operator ::= BINARY_ADD
|
||||
binary_operator ::= BINARY_MULTIPLY
|
||||
binary_operator ::= BINARY_AND
|
||||
binary_operator ::= BINARY_OR
|
||||
binary_operator ::= BINARY_XOR
|
||||
binary_operator ::= BINARY_SUBTRACT
|
||||
binary_operator ::= BINARY_TRUE_DIVIDE
|
||||
binary_operator ::= BINARY_FLOOR_DIVIDE
|
||||
binary_operator ::= BINARY_MODULO
|
||||
binary_operator ::= BINARY_LSHIFT
|
||||
binary_operator ::= BINARY_RSHIFT
|
||||
binary_operator ::= BINARY_POWER
|
||||
|
||||
# unary_op (formerly "unary_expr") is the Python AST UnaryOp
|
||||
unary_op ::= expr unary_operator
|
||||
unary_operator ::= UNARY_POSITIVE
|
||||
unary_operator ::= UNARY_NEGATIVE
|
||||
unary_operator ::= UNARY_INVERT
|
||||
|
||||
unary_not ::= expr UNARY_NOT
|
||||
|
||||
@@ -327,6 +327,33 @@ class Python37Parser(Python37BaseParser):
|
||||
attributes ::= LOAD_ATTR+
|
||||
"""
|
||||
|
||||
def p_import37(self, args):
|
||||
"""
|
||||
stmt ::= import_as37
|
||||
import_as37 ::= LOAD_CONST LOAD_CONST importlist37 store POP_TOP
|
||||
|
||||
importlist37 ::= importlist37 ROT_TWO IMPORT_FROM
|
||||
importlist37 ::= importlist37 ROT_TWO POP_TOP IMPORT_FROM
|
||||
importlist37 ::= importattr37
|
||||
importattr37 ::= IMPORT_NAME_ATTR IMPORT_FROM
|
||||
|
||||
# The 3.7base scanner adds IMPORT_NAME_ATTR
|
||||
alias ::= IMPORT_NAME_ATTR attributes store
|
||||
alias ::= IMPORT_NAME_ATTR store
|
||||
import_from ::= LOAD_CONST LOAD_CONST importlist POP_TOP
|
||||
|
||||
expr ::= attribute37
|
||||
attribute37 ::= expr LOAD_METHOD
|
||||
|
||||
stmt ::= import_from37
|
||||
importlist37 ::= importlist37 alias37
|
||||
importlist37 ::= alias37
|
||||
alias37 ::= IMPORT_NAME store
|
||||
alias37 ::= IMPORT_FROM store
|
||||
import_from37 ::= LOAD_CONST LOAD_CONST IMPORT_NAME_ATTR importlist37 POP_TOP
|
||||
|
||||
"""
|
||||
|
||||
def p_list_comprehension(self, args):
|
||||
"""
|
||||
expr ::= list_comp
|
||||
@@ -471,7 +498,7 @@ class Python37Parser(Python37BaseParser):
|
||||
# Python 3.5+ async additions
|
||||
|
||||
inplace_op ::= INPLACE_MATRIX_MULTIPLY
|
||||
binary_op ::= BINARY_MATRIX_MULTIPLY
|
||||
binary_operator ::= BINARY_MATRIX_MULTIPLY
|
||||
|
||||
# Python 3.5+ does jump optimization
|
||||
# In <.3.5 the below is a JUMP_FORWARD to a JUMP_ABSOLUTE.
|
||||
@@ -500,104 +527,12 @@ class Python37Parser(Python37BaseParser):
|
||||
iflaststmt ::= testexpr c_stmts_opt JUMP_FORWARD
|
||||
"""
|
||||
|
||||
def p_36misc(self, args):
|
||||
def p_37async(self, args):
|
||||
"""
|
||||
sstmt ::= sstmt RETURN_LAST
|
||||
|
||||
# 3.6 redoes how return_closure works. FIXME: Isolate to LOAD_CLOSURE
|
||||
return_closure ::= LOAD_CLOSURE DUP_TOP STORE_NAME RETURN_VALUE RETURN_LAST
|
||||
|
||||
for_block ::= l_stmts_opt come_from_loops JUMP_BACK
|
||||
come_from_loops ::= COME_FROM_LOOP*
|
||||
|
||||
whilestmt ::= setup_loop testexpr l_stmts_opt
|
||||
JUMP_BACK come_froms POP_BLOCK COME_FROM_LOOP
|
||||
whilestmt ::= setup_loop testexpr l_stmts_opt
|
||||
come_froms JUMP_BACK come_froms POP_BLOCK COME_FROM_LOOP
|
||||
|
||||
# 3.6 due to jump optimization, we sometimes add RETURN_END_IF where
|
||||
# RETURN_VALUE is meant. Specifcally this can happen in
|
||||
# ifelsestmt -> ...else_suite _. suite_stmts... (last) stmt
|
||||
return ::= ret_expr RETURN_END_IF
|
||||
return ::= ret_expr RETURN_VALUE COME_FROM
|
||||
return_stmt_lambda ::= ret_expr RETURN_VALUE_LAMBDA COME_FROM
|
||||
|
||||
# A COME_FROM is dropped off because of JUMP-to-JUMP optimization
|
||||
and ::= expr jmp_false expr
|
||||
and ::= expr jmp_false expr jmp_false
|
||||
|
||||
jf_cf ::= JUMP_FORWARD COME_FROM
|
||||
cf_jf_else ::= come_froms JUMP_FORWARD ELSE
|
||||
|
||||
conditional ::= expr jmp_false expr jf_cf expr COME_FROM
|
||||
|
||||
async_for_stmt ::= setup_loop expr
|
||||
GET_AITER
|
||||
LOAD_CONST YIELD_FROM SETUP_EXCEPT GET_ANEXT LOAD_CONST
|
||||
YIELD_FROM
|
||||
store
|
||||
POP_BLOCK JUMP_FORWARD COME_FROM_EXCEPT DUP_TOP
|
||||
LOAD_GLOBAL COMPARE_OP POP_JUMP_IF_FALSE
|
||||
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_BLOCK
|
||||
JUMP_ABSOLUTE END_FINALLY COME_FROM
|
||||
for_block POP_BLOCK
|
||||
COME_FROM_LOOP
|
||||
|
||||
# Adds a COME_FROM_ASYNC_WITH over 3.5
|
||||
# FIXME: remove corresponding rule for 3.5?
|
||||
|
||||
except_suite ::= c_stmts_opt COME_FROM POP_EXCEPT jump_except COME_FROM
|
||||
|
||||
jb_cfs ::= come_from_opt JUMP_BACK come_froms
|
||||
ifelsestmtl ::= testexpr c_stmts_opt jb_cfs else_suitel
|
||||
ifelsestmtl ::= testexpr c_stmts_opt cf_jf_else else_suitel
|
||||
|
||||
# In 3.6+, A sequence of statements ending in a RETURN can cause
|
||||
# JUMP_FORWARD END_FINALLY to be omitted from try middle
|
||||
|
||||
except_return ::= POP_TOP POP_TOP POP_TOP returns
|
||||
except_handler ::= JUMP_FORWARD COME_FROM_EXCEPT except_return
|
||||
|
||||
# Try middle following a returns
|
||||
except_handler36 ::= COME_FROM_EXCEPT except_stmts END_FINALLY
|
||||
|
||||
stmt ::= try_except36
|
||||
try_except36 ::= SETUP_EXCEPT returns except_handler36
|
||||
opt_come_from_except
|
||||
try_except36 ::= SETUP_EXCEPT suite_stmts
|
||||
try_except36 ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
|
||||
except_handler36 come_from_opt
|
||||
|
||||
# 3.6 omits END_FINALLY sometimes
|
||||
except_handler36 ::= COME_FROM_EXCEPT except_stmts
|
||||
except_handler36 ::= JUMP_FORWARD COME_FROM_EXCEPT except_stmts
|
||||
except_handler ::= jmp_abs COME_FROM_EXCEPT except_stmts
|
||||
|
||||
stmt ::= tryfinally36
|
||||
tryfinally36 ::= SETUP_FINALLY returns
|
||||
COME_FROM_FINALLY suite_stmts
|
||||
tryfinally36 ::= SETUP_FINALLY returns
|
||||
COME_FROM_FINALLY suite_stmts_opt END_FINALLY
|
||||
except_suite_finalize ::= SETUP_FINALLY returns
|
||||
COME_FROM_FINALLY suite_stmts_opt END_FINALLY _jump
|
||||
|
||||
stmt ::= tryfinally_return_stmt
|
||||
tryfinally_return_stmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK LOAD_CONST
|
||||
COME_FROM_FINALLY
|
||||
|
||||
compare_chained2 ::= expr COMPARE_OP come_froms JUMP_FORWARD
|
||||
"""
|
||||
|
||||
def p_37misc(self, args):
|
||||
"""
|
||||
stmt ::= import37
|
||||
stmt ::= async_for_stmt37
|
||||
stmt ::= async_for_stmt
|
||||
stmt ::= async_forelse_stmt
|
||||
|
||||
# Where does the POP_TOP really belong?
|
||||
import37 ::= import POP_TOP
|
||||
|
||||
async_for_stmt ::= setup_loop expr
|
||||
GET_AITER
|
||||
SETUP_EXCEPT GET_ANEXT LOAD_CONST
|
||||
@@ -636,42 +571,36 @@ class Python37Parser(Python37BaseParser):
|
||||
COME_FROM
|
||||
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_TOP POP_BLOCK
|
||||
else_suite COME_FROM_LOOP
|
||||
"""
|
||||
|
||||
attributes ::= IMPORT_FROM ROT_TWO POP_TOP IMPORT_FROM
|
||||
attributes ::= attributes ROT_TWO POP_TOP IMPORT_FROM
|
||||
|
||||
attribute37 ::= expr LOAD_METHOD
|
||||
expr ::= attribute37
|
||||
|
||||
# long except clauses in a loop can sometimes cause a JUMP_BACK to turn into a
|
||||
# JUMP_FORWARD to a JUMP_BACK. And when this happens there is an additional
|
||||
# ELSE added to the except_suite. With better flow control perhaps we can
|
||||
# sort this out better.
|
||||
except_suite ::= c_stmts_opt POP_EXCEPT jump_except ELSE
|
||||
|
||||
def p_37chained(self, args):
|
||||
"""
|
||||
testtrue ::= compare_chained37
|
||||
testfalse ::= compare_chained37_false
|
||||
|
||||
compare_chained ::= compare_chained37
|
||||
compare_chained ::= compare_chained37_false
|
||||
|
||||
compare_chained37 ::= expr compare_chained1a_37
|
||||
compare_chained37 ::= expr compare_chained1b_37
|
||||
compare_chained37 ::= expr compare_chained1c_37
|
||||
|
||||
compare_chained37_false ::= expr compare_chained1_false_37
|
||||
compare_chained37_false ::= expr compare_chained1b_false_37
|
||||
compare_chained37_false ::= expr compare_chained2_false_37
|
||||
|
||||
compare_chained1a_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained1a_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2a_37 COME_FROM POP_TOP COME_FROM
|
||||
compare_chained1b_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2b_37 POP_TOP JUMP_FORWARD COME_FROM
|
||||
compare_chained1b_false_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2b_false_37 POP_TOP _jump COME_FROM
|
||||
|
||||
compare_chained1c_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2a_37 POP_TOP
|
||||
|
||||
compare_chained1_false_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2c_37 POP_TOP JUMP_FORWARD COME_FROM
|
||||
compare_chained1_false_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2b_37 POP_TOP _jump COME_FROM
|
||||
compare_chained2b_false_37 POP_TOP _jump COME_FROM
|
||||
|
||||
compare_chained2_false_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2a_false_37 POP_TOP JUMP_BACK COME_FROM
|
||||
@@ -680,19 +609,26 @@ class Python37Parser(Python37BaseParser):
|
||||
compare_chained2a_37 ::= expr COMPARE_OP come_from_opt POP_JUMP_IF_TRUE JUMP_BACK
|
||||
compare_chained2a_false_37 ::= expr COMPARE_OP come_from_opt POP_JUMP_IF_FALSE jf_cfs
|
||||
|
||||
compare_chained2b_37 ::= expr COMPARE_OP come_from_opt POP_JUMP_IF_FALSE JUMP_FORWARD COME_FROM
|
||||
compare_chained2b_37 ::= expr COMPARE_OP come_from_opt POP_JUMP_IF_FALSE JUMP_FORWARD
|
||||
compare_chained2b_false_37 ::= expr COMPARE_OP come_from_opt POP_JUMP_IF_FALSE JUMP_FORWARD COME_FROM
|
||||
compare_chained2b_false_37 ::= expr COMPARE_OP come_from_opt POP_JUMP_IF_FALSE JUMP_FORWARD
|
||||
|
||||
compare_chained2c_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP come_from_opt POP_JUMP_IF_FALSE
|
||||
compare_chained2a_false_37 ELSE
|
||||
compare_chained2c_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP come_from_opt POP_JUMP_IF_FALSE
|
||||
compare_chained2a_false_37
|
||||
"""
|
||||
|
||||
def p_37conditionals(self, args):
|
||||
"""
|
||||
jf_cfs ::= JUMP_FORWARD _come_froms
|
||||
ifelsestmt ::= testexpr c_stmts_opt jf_cfs else_suite opt_come_from_except
|
||||
|
||||
jmp_false37 ::= POP_JUMP_IF_FALSE COME_FROM
|
||||
list_if ::= expr jmp_false37 list_iter
|
||||
list_iter ::= list_if37
|
||||
list_iter ::= list_if37_not
|
||||
list_if37 ::= compare_chained37_false list_iter
|
||||
list_if37_not ::= compare_chained37 list_iter
|
||||
|
||||
_ifstmts_jump ::= c_stmts_opt come_froms
|
||||
|
||||
@@ -759,6 +695,28 @@ class Python37Parser(Python37BaseParser):
|
||||
comp_iter ::= comp_body
|
||||
"""
|
||||
|
||||
def p_expr3(self, args):
|
||||
"""
|
||||
expr ::= conditionalnot
|
||||
conditionalnot ::= expr jmp_true expr jump_forward_else expr COME_FROM
|
||||
|
||||
# a JUMP_FORWARD to another JUMP_FORWARD can get turned into
|
||||
# a JUMP_ABSOLUTE with no COME_FROM
|
||||
conditional ::= expr jmp_false expr jump_absolute_else expr
|
||||
|
||||
# if_expr_true are for conditions which always evaluate true
|
||||
# There is dead or non-optional remnants of the condition code though,
|
||||
# and we use that to match on to reconstruct the source more accurately
|
||||
expr ::= if_expr_true
|
||||
if_expr_true ::= expr JUMP_FORWARD expr COME_FROM
|
||||
"""
|
||||
|
||||
def p_generator_exp3(self, args):
|
||||
"""
|
||||
load_genexpr ::= LOAD_GENEXPR
|
||||
load_genexpr ::= BUILD_TUPLE_1 LOAD_GENEXPR LOAD_STR
|
||||
"""
|
||||
|
||||
def p_grammar(self, args):
|
||||
"""
|
||||
sstmt ::= stmt
|
||||
@@ -798,16 +756,6 @@ class Python37Parser(Python37BaseParser):
|
||||
classdefdeco ::= classdefdeco1 store
|
||||
|
||||
expr ::= LOAD_ASSERT
|
||||
assert ::= assert_expr jmp_true LOAD_ASSERT RAISE_VARARGS_1 COME_FROM
|
||||
stmt ::= assert2
|
||||
assert2 ::= assert_expr jmp_true LOAD_ASSERT expr
|
||||
CALL_FUNCTION_1 RAISE_VARARGS_1 COME_FROM
|
||||
|
||||
assert_expr ::= expr
|
||||
assert_expr ::= assert_expr_or
|
||||
assert_expr ::= assert_expr_and
|
||||
assert_expr_or ::= assert_expr jmp_true expr
|
||||
assert_expr_and ::= assert_expr jmp_false expr
|
||||
|
||||
ifstmt ::= testexpr _ifstmts_jump
|
||||
|
||||
@@ -827,6 +775,7 @@ class Python37Parser(Python37BaseParser):
|
||||
iflaststmtl ::= testexpr c_stmts JUMP_BACK POP_BLOCK
|
||||
|
||||
# These are used to keep parse tree indices the same
|
||||
jump_forward_else ::= JUMP_FORWARD
|
||||
jump_forward_else ::= JUMP_FORWARD ELSE
|
||||
jump_forward_else ::= JUMP_FORWARD COME_FROM
|
||||
jump_absolute_else ::= JUMP_ABSOLUTE ELSE
|
||||
@@ -950,11 +899,28 @@ class Python37Parser(Python37BaseParser):
|
||||
ret_or ::= expr JUMP_IF_TRUE_OR_POP ret_expr_or_cond COME_FROM
|
||||
ret_cond ::= expr POP_JUMP_IF_FALSE expr RETURN_END_IF COME_FROM ret_expr_or_cond
|
||||
|
||||
or ::= expr JUMP_IF_TRUE_OR_POP expr COME_FROM
|
||||
or ::= expr JUMP_IF_TRUE expr COME_FROM
|
||||
and ::= expr JUMP_IF_FALSE_OR_POP expr COME_FROM
|
||||
jitop_come_from ::= JUMP_IF_TRUE_OR_POP COME_FROM
|
||||
jifop_come_from ::= JUMP_IF_FALSE_OR_POP COME_FROM
|
||||
or ::= and jitop_come_from expr COME_FROM
|
||||
or ::= expr JUMP_IF_TRUE_OR_POP expr COME_FROM
|
||||
or ::= expr JUMP_IF_TRUE expr COME_FROM
|
||||
|
||||
testfalse_not_or ::= expr jmp_false expr jmp_false COME_FROM
|
||||
testfalse_not_and ::= and jmp_true come_froms
|
||||
|
||||
testfalse_not_and ::= expr jmp_false expr jmp_true COME_FROM
|
||||
testfalse ::= testfalse_not_or
|
||||
testfalse ::= testfalse_not_and
|
||||
testfalse ::= or jmp_false COME_FROM
|
||||
or ::= expr jmp_true expr
|
||||
|
||||
and ::= expr JUMP_IF_FALSE_OR_POP expr come_from_opt
|
||||
and ::= expr jifop_come_from expr
|
||||
and ::= expr JUMP_IF_FALSE expr COME_FROM
|
||||
|
||||
pjit_come_from ::= POP_JUMP_IF_TRUE COME_FROM
|
||||
or ::= expr pjit_come_from expr
|
||||
|
||||
## FIXME: Is the below needed or is it covered above??
|
||||
and ::= expr jmp_false expr COME_FROM
|
||||
or ::= expr jmp_true expr COME_FROM
|
||||
@@ -970,6 +936,8 @@ class Python37Parser(Python37BaseParser):
|
||||
"""
|
||||
stmt ::= if_expr_lambda
|
||||
stmt ::= conditional_not_lambda
|
||||
stmt ::= ifstmtl
|
||||
|
||||
if_expr_lambda ::= expr jmp_false expr return_if_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
conditional_not_lambda
|
||||
@@ -984,6 +952,10 @@ class Python37Parser(Python37BaseParser):
|
||||
|
||||
stmt ::= whileTruestmt
|
||||
ifelsestmt ::= testexpr c_stmts_opt JUMP_FORWARD else_suite _come_froms
|
||||
|
||||
_ifstmts_jumpl ::= c_stmts JUMP_BACK
|
||||
_ifstmts_jumpl ::= _ifstmts_jump
|
||||
ifstmtl ::= testexpr _ifstmts_jumpl
|
||||
"""
|
||||
|
||||
def p_loop_stmt3(self, args):
|
||||
@@ -1006,12 +978,20 @@ class Python37Parser(Python37BaseParser):
|
||||
whilestmt ::= setup_loop testexpr l_stmts_opt COME_FROM JUMP_BACK POP_BLOCK
|
||||
COME_FROM_LOOP
|
||||
|
||||
|
||||
whilestmt ::= setup_loop testexpr l_stmts_opt JUMP_BACK POP_BLOCK
|
||||
COME_FROM_LOOP
|
||||
|
||||
whilestmt ::= setup_loop testexpr returns POP_BLOCK
|
||||
COME_FROM_LOOP
|
||||
|
||||
# We can be missing a COME_FROM_LOOP if the "while" statement is nested inside an if/else
|
||||
# so after the POP_BLOCK we have a JUMP_FORWARD which forms the "else" portion of the "if"
|
||||
# This is undoubtedly some sort of JUMP optimization going on.
|
||||
|
||||
whilestmt ::= setup_loop testexpr l_stmts_opt JUMP_BACK come_froms
|
||||
POP_BLOCK
|
||||
|
||||
while1elsestmt ::= setup_loop l_stmts JUMP_BACK
|
||||
else_suitel
|
||||
|
||||
@@ -1019,11 +999,12 @@ class Python37Parser(Python37BaseParser):
|
||||
else_suitel COME_FROM_LOOP
|
||||
|
||||
whileTruestmt ::= setup_loop l_stmts_opt JUMP_BACK POP_BLOCK
|
||||
COME_FROM_LOOP
|
||||
_come_froms
|
||||
|
||||
# FIXME: Python 3.? starts adding branch optimization? Put this starting there.
|
||||
|
||||
while1stmt ::= setup_loop l_stmts COME_FROM_LOOP
|
||||
while1stmt ::= setup_loop l_stmts COME_FROM_LOOP JUMP_BACK POP_BLOCK COME_FROM_LOOP
|
||||
while1stmt ::= setup_loop l_stmts COME_FROM JUMP_BACK COME_FROM_LOOP
|
||||
|
||||
while1elsestmt ::= setup_loop l_stmts JUMP_BACK
|
||||
@@ -1034,26 +1015,105 @@ class Python37Parser(Python37BaseParser):
|
||||
COME_FROM_LOOP
|
||||
"""
|
||||
|
||||
def p_generator_exp3(self, args):
|
||||
def p_36misc(self, args):
|
||||
"""
|
||||
load_genexpr ::= LOAD_GENEXPR
|
||||
load_genexpr ::= BUILD_TUPLE_1 LOAD_GENEXPR LOAD_STR
|
||||
sstmt ::= sstmt RETURN_LAST
|
||||
|
||||
# 3.6 redoes how return_closure works. FIXME: Isolate to LOAD_CLOSURE
|
||||
return_closure ::= LOAD_CLOSURE DUP_TOP STORE_NAME RETURN_VALUE RETURN_LAST
|
||||
|
||||
for_block ::= l_stmts_opt come_from_loops JUMP_BACK
|
||||
come_from_loops ::= COME_FROM_LOOP*
|
||||
|
||||
whilestmt ::= setup_loop testexpr l_stmts_opt
|
||||
JUMP_BACK come_froms POP_BLOCK COME_FROM_LOOP
|
||||
whilestmt ::= setup_loop testexpr l_stmts_opt
|
||||
come_froms JUMP_BACK come_froms POP_BLOCK COME_FROM_LOOP
|
||||
|
||||
# 3.6 due to jump optimization, we sometimes add RETURN_END_IF where
|
||||
# RETURN_VALUE is meant. Specifcally this can happen in
|
||||
# ifelsestmt -> ...else_suite _. suite_stmts... (last) stmt
|
||||
return ::= ret_expr RETURN_END_IF
|
||||
return ::= ret_expr RETURN_VALUE COME_FROM
|
||||
return_stmt_lambda ::= ret_expr RETURN_VALUE_LAMBDA COME_FROM
|
||||
|
||||
# A COME_FROM is dropped off because of JUMP-to-JUMP optimization
|
||||
and ::= expr jmp_false expr
|
||||
and ::= expr jmp_false expr jmp_false
|
||||
|
||||
jf_cf ::= JUMP_FORWARD COME_FROM
|
||||
cf_jf_else ::= come_froms JUMP_FORWARD ELSE
|
||||
|
||||
conditional ::= expr jmp_false expr jf_cf expr COME_FROM
|
||||
|
||||
async_for_stmt ::= setup_loop expr
|
||||
GET_AITER
|
||||
LOAD_CONST YIELD_FROM SETUP_EXCEPT GET_ANEXT LOAD_CONST
|
||||
YIELD_FROM
|
||||
store
|
||||
POP_BLOCK JUMP_FORWARD COME_FROM_EXCEPT DUP_TOP
|
||||
LOAD_GLOBAL COMPARE_OP POP_JUMP_IF_FALSE
|
||||
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_BLOCK
|
||||
JUMP_ABSOLUTE END_FINALLY COME_FROM
|
||||
for_block POP_BLOCK
|
||||
COME_FROM_LOOP
|
||||
|
||||
# Adds a COME_FROM_ASYNC_WITH over 3.5
|
||||
# FIXME: remove corresponding rule for 3.5?
|
||||
|
||||
except_suite ::= c_stmts_opt COME_FROM POP_EXCEPT jump_except COME_FROM
|
||||
|
||||
jb_cfs ::= come_from_opt JUMP_BACK come_froms
|
||||
ifelsestmtl ::= testexpr c_stmts_opt jb_cfs else_suitel
|
||||
ifelsestmtl ::= testexpr c_stmts_opt cf_jf_else else_suitel
|
||||
|
||||
# In 3.6+, A sequence of statements ending in a RETURN can cause
|
||||
# JUMP_FORWARD END_FINALLY to be omitted from try middle
|
||||
|
||||
except_return ::= POP_TOP POP_TOP POP_TOP returns
|
||||
except_handler ::= JUMP_FORWARD COME_FROM_EXCEPT except_return
|
||||
|
||||
# Try middle following a returns
|
||||
except_handler36 ::= COME_FROM_EXCEPT except_stmts END_FINALLY
|
||||
|
||||
stmt ::= try_except36
|
||||
try_except36 ::= SETUP_EXCEPT returns except_handler36
|
||||
opt_come_from_except
|
||||
try_except36 ::= SETUP_EXCEPT suite_stmts
|
||||
try_except36 ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
|
||||
except_handler36 come_from_opt
|
||||
|
||||
# 3.6 omits END_FINALLY sometimes
|
||||
except_handler36 ::= COME_FROM_EXCEPT except_stmts
|
||||
except_handler36 ::= JUMP_FORWARD COME_FROM_EXCEPT except_stmts
|
||||
except_handler ::= jmp_abs COME_FROM_EXCEPT except_stmts
|
||||
|
||||
stmt ::= tryfinally36
|
||||
tryfinally36 ::= SETUP_FINALLY returns
|
||||
COME_FROM_FINALLY suite_stmts
|
||||
tryfinally36 ::= SETUP_FINALLY returns
|
||||
COME_FROM_FINALLY suite_stmts_opt END_FINALLY
|
||||
except_suite_finalize ::= SETUP_FINALLY returns
|
||||
COME_FROM_FINALLY suite_stmts_opt END_FINALLY _jump
|
||||
|
||||
stmt ::= tryfinally_return_stmt
|
||||
tryfinally_return_stmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK LOAD_CONST
|
||||
COME_FROM_FINALLY
|
||||
|
||||
compare_chained2 ::= expr COMPARE_OP come_froms JUMP_FORWARD
|
||||
"""
|
||||
|
||||
def p_expr3(self, args):
|
||||
def p_37misc(self, args):
|
||||
"""
|
||||
expr ::= conditionalnot
|
||||
conditionalnot ::= expr jmp_true expr jump_forward_else expr COME_FROM
|
||||
# long except clauses in a loop can sometimes cause a JUMP_BACK to turn into a
|
||||
# JUMP_FORWARD to a JUMP_BACK. And when this happens there is an additional
|
||||
# ELSE added to the except_suite. With better flow control perhaps we can
|
||||
# sort this out better.
|
||||
except_suite ::= c_stmts_opt POP_EXCEPT jump_except ELSE
|
||||
|
||||
# a JUMP_FORWARD to another JUMP_FORWARD can get turned into
|
||||
# a JUMP_ABSOLUTE with no COME_FROM
|
||||
conditional ::= expr jmp_false expr jump_absolute_else expr
|
||||
|
||||
# if_expr_true are for conditions which always evaluate true
|
||||
# There is dead or non-optional remnants of the condition code though,
|
||||
# and we use that to match on to reconstruct the source more accurately
|
||||
expr ::= if_expr_true
|
||||
if_expr_true ::= expr JUMP_FORWARD expr COME_FROM
|
||||
# FIXME: the below is to work around test_grammar expecting a "call" to be
|
||||
# on the LHS because it is also somewhere on in a rule.
|
||||
call ::= expr CALL_METHOD_0
|
||||
"""
|
||||
|
||||
def customize_grammar_rules(self, tokens, customize):
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2016-2017, 2019 Rocky Bernstein
|
||||
# Copyright (c) 2016-2017, 2019-2020 Rocky Bernstein
|
||||
"""
|
||||
Python 3.7 base code. We keep non-custom-generated grammar rules out of this file.
|
||||
"""
|
||||
@@ -424,6 +424,7 @@ class Python37BaseParser(PythonParser):
|
||||
dict_comp ::= LOAD_DICTCOMP LOAD_STR MAKE_FUNCTION_0 expr
|
||||
GET_ITER CALL_FUNCTION_1
|
||||
classdefdeco1 ::= expr classdefdeco2 CALL_FUNCTION_1
|
||||
classdefdeco1 ::= expr classdefdeco1 CALL_FUNCTION_1
|
||||
"""
|
||||
self.addRule(rule, nop_func)
|
||||
|
||||
@@ -492,23 +493,57 @@ class Python37BaseParser(PythonParser):
|
||||
self.addRule(
|
||||
"""
|
||||
expr ::= get_iter
|
||||
attribute ::= expr GET_ITER
|
||||
get_iter ::= expr GET_ITER
|
||||
""",
|
||||
nop_func,
|
||||
)
|
||||
custom_ops_processed.add(opname)
|
||||
elif opname == "GET_AITER":
|
||||
self.addRule(
|
||||
"""
|
||||
expr ::= generator_exp_async
|
||||
generator_exp_async ::= load_genexpr LOAD_STR MAKE_FUNCTION_0 expr
|
||||
GET_AITER CALL_FUNCTION_1
|
||||
|
||||
stmt ::= genexpr_func_async
|
||||
|
||||
func_async_prefix ::= SETUP_EXCEPT GET_ANEXT LOAD_CONST YIELD_FROM
|
||||
func_async_middle ::= POP_BLOCK JUMP_FORWARD COME_FROM_EXCEPT
|
||||
DUP_TOP LOAD_GLOBAL COMPARE_OP POP_JUMP_IF_TRUE
|
||||
END_FINALLY COME_FROM
|
||||
genexpr_func_async ::= LOAD_FAST func_async_prefix
|
||||
store func_async_middle comp_iter
|
||||
JUMP_BACK COME_FROM
|
||||
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_TOP
|
||||
|
||||
expr ::= listcomp_async
|
||||
listcomp_async ::= LOAD_LISTCOMP LOAD_STR MAKE_FUNCTION_0
|
||||
expr GET_AITER CALL_FUNCTION_1
|
||||
GET_AWAITABLE LOAD_CONST
|
||||
YIELD_FROM
|
||||
|
||||
expr ::= listcomp_async
|
||||
listcomp_async ::= BUILD_LIST_0 LOAD_FAST func_async_prefix
|
||||
store func_async_middle list_iter
|
||||
JUMP_BACK COME_FROM
|
||||
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_TOP
|
||||
|
||||
""",
|
||||
nop_func,
|
||||
)
|
||||
custom_ops_processed.add(opname)
|
||||
elif opname == "JUMP_IF_NOT_DEBUG":
|
||||
v = token.attr
|
||||
self.addRule(
|
||||
"""
|
||||
stmt ::= assert_pypy
|
||||
stmt ::= assert2_pypy", nop_func)
|
||||
assert_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
|
||||
assert_pypy ::= JUMP_IF_NOT_DEBUG expr jmp_true
|
||||
LOAD_ASSERT RAISE_VARARGS_1 COME_FROM
|
||||
assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
|
||||
LOAD_ASSERT expr CALL_FUNCTION_1
|
||||
RAISE_VARARGS_1 COME_FROM
|
||||
assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
|
||||
assert2_pypy ::= JUMP_IF_NOT_DEBUG expr jmp_true
|
||||
LOAD_ASSERT expr CALL_FUNCTION_1
|
||||
RAISE_VARARGS_1 COME_FROM,
|
||||
""",
|
||||
@@ -546,6 +581,18 @@ class Python37BaseParser(PythonParser):
|
||||
elif opname == "LOAD_LISTCOMP":
|
||||
self.add_unique_rule("expr ::= listcomp", opname, token.attr, customize)
|
||||
custom_ops_processed.add(opname)
|
||||
elif opname == "LOAD_NAME":
|
||||
if token.attr == "__annotations__" and "SETUP_ANNOTATIONS" in self.seen_ops:
|
||||
token.kind = "LOAD_ANNOTATION"
|
||||
self.addRule(
|
||||
"""
|
||||
stmt ::= SETUP_ANNOTATIONS
|
||||
stmt ::= ann_assign
|
||||
ann_assign ::= expr LOAD_ANNOTATION LOAD_STR STORE_SUBSCR
|
||||
""",
|
||||
nop_func,
|
||||
)
|
||||
pass
|
||||
elif opname == "LOAD_SETCOMP":
|
||||
# Should this be generalized and put under MAKE_FUNCTION?
|
||||
if has_get_iter_call_function1:
|
||||
@@ -927,6 +974,7 @@ class Python37BaseParser(PythonParser):
|
||||
pass
|
||||
|
||||
self.check_reduce["and"] = "AST"
|
||||
self.check_reduce["annotate_tuple"] = "noAST"
|
||||
self.check_reduce["aug_assign1"] = "AST"
|
||||
self.check_reduce["aug_assign2"] = "AST"
|
||||
self.check_reduce["while1stmt"] = "noAST"
|
||||
@@ -936,7 +984,9 @@ class Python37BaseParser(PythonParser):
|
||||
self.check_reduce["iflaststmt"] = "AST"
|
||||
self.check_reduce["iflaststmtl"] = "AST"
|
||||
self.check_reduce["ifstmt"] = "AST"
|
||||
self.check_reduce["annotate_tuple"] = "noAST"
|
||||
self.check_reduce["ifstmtl"] = "AST"
|
||||
self.check_reduce["import_from37"] = "AST"
|
||||
self.check_reduce["or"] = "tokens"
|
||||
|
||||
# FIXME: remove parser errors caused by the below
|
||||
# self.check_reduce['while1elsestmt'] = 'noAST'
|
||||
@@ -1028,7 +1078,6 @@ class Python37BaseParser(PythonParser):
|
||||
|
||||
if lhs == "and" and ast:
|
||||
# FIXME: put in a routine somewhere
|
||||
# Compare with parse30.py of uncompyle6
|
||||
jmp = ast[1]
|
||||
if jmp.kind.startswith("jmp_"):
|
||||
if last == n:
|
||||
@@ -1043,7 +1092,14 @@ class Python37BaseParser(PythonParser):
|
||||
return jmp_target != jmp2_target
|
||||
elif rule == ("and", ("expr", "jmp_false", "expr")):
|
||||
if tokens[last] == "POP_JUMP_IF_FALSE":
|
||||
# Ok if jump_target doesn't jump to last instruction
|
||||
return jmp_target != tokens[last].attr
|
||||
elif tokens[last] in ("POP_JUMP_IF_TRUE", "JUMP_IF_TRUE_OR_POP"):
|
||||
# Ok if jump_target jumps to a COME_FROM after
|
||||
# the last instruction or jumps right after last instruction
|
||||
if last + 1 < n and tokens[last + 1] == "COME_FROM":
|
||||
return jmp_target != tokens[last + 1].off2int()
|
||||
return jmp_target + 2 != tokens[last].attr
|
||||
elif rule == ("and", ("expr", "jmp_false", "expr", "COME_FROM")):
|
||||
return ast[-1].attr != jmp_offset
|
||||
# elif rule == ("and", ("expr", "jmp_false", "expr", "COME_FROM")):
|
||||
@@ -1056,6 +1112,17 @@ class Python37BaseParser(PythonParser):
|
||||
return True
|
||||
elif lhs == "annotate_tuple":
|
||||
return not isinstance(tokens[first].attr, tuple)
|
||||
elif lhs == "or":
|
||||
# FIXME: This is a cheap test. Should we do something with an AST like we
|
||||
# do with "and"?
|
||||
# "or"s with constants like this will have "COME_FROM" at the end
|
||||
return tokens[last] in (
|
||||
"LOAD_ASSERT",
|
||||
"LOAD_STR",
|
||||
"LOAD_CODE",
|
||||
"LOAD_CONST",
|
||||
"RAISE_VARARGS_1",
|
||||
)
|
||||
elif lhs == "while1elsestmt":
|
||||
|
||||
if last == n:
|
||||
@@ -1094,7 +1161,7 @@ class Python37BaseParser(PythonParser):
|
||||
for i in range(cfl - 1, first, -1):
|
||||
if tokens[i] != "POP_BLOCK":
|
||||
break
|
||||
if tokens[i].kind not in ("JUMP_BACK", "RETURN_VALUE"):
|
||||
if tokens[i].kind not in ("JUMP_BACK", "RETURN_VALUE", "RAISE_VARARGS_1"):
|
||||
if not tokens[i].kind.startswith("COME_FROM"):
|
||||
return True
|
||||
|
||||
@@ -1107,9 +1174,8 @@ class Python37BaseParser(PythonParser):
|
||||
last -= 1
|
||||
offset = tokens[last].off2int()
|
||||
assert tokens[first] == "SETUP_LOOP"
|
||||
if offset != tokens[first].attr:
|
||||
return True
|
||||
return False
|
||||
# SETUP_LOOP location must jump either to the last token or the token after the last one
|
||||
return tokens[first].attr not in (offset, offset + 2)
|
||||
elif lhs == "_ifstmts_jump" and len(rule[1]) > 1 and ast:
|
||||
come_froms = ast[-1]
|
||||
# Make sure all of the "come froms" offset at the
|
||||
@@ -1143,6 +1209,10 @@ class Python37BaseParser(PythonParser):
|
||||
return False
|
||||
|
||||
if isinstance(come_froms, Token):
|
||||
if tokens[pop_jump_index].attr < tokens[pop_jump_index].offset and ast[0] != "pass":
|
||||
# This is a jump backwards to a loop. All bets are off here when there the
|
||||
# unless statement is "pass" which has no instructions associated with it.
|
||||
return False
|
||||
return (
|
||||
come_froms.attr is not None
|
||||
and tokens[pop_jump_index].offset > come_froms.attr
|
||||
@@ -1153,36 +1223,76 @@ class Python37BaseParser(PythonParser):
|
||||
else:
|
||||
return tokens[pop_jump_index].offset > come_froms[-1].attr
|
||||
|
||||
elif lhs == "ifstmt" and ast:
|
||||
elif lhs in ("ifstmt", "ifstmtl"):
|
||||
# FIXME: put in a routine somewhere
|
||||
testexpr = ast[0]
|
||||
|
||||
if (last + 1) < n and tokens[last + 1] == "COME_FROM_LOOP":
|
||||
# iflastsmtl jumped outside of loop. No good.
|
||||
return True
|
||||
n = len(tokens)
|
||||
if lhs == "ifstmtl":
|
||||
if last == n:
|
||||
last -= 1
|
||||
pass
|
||||
if tokens[last].attr and isinstance(tokens[last].attr, int):
|
||||
return tokens[first].offset < tokens[last].attr
|
||||
pass
|
||||
|
||||
if testexpr[0] in ("testtrue", "testfalse"):
|
||||
test = testexpr[0]
|
||||
if len(test) > 1 and test[1].kind.startswith("jmp_"):
|
||||
if last == n:
|
||||
last -= 1
|
||||
jmp_target = test[1][0].attr
|
||||
if tokens[first].off2int() <= jmp_target < tokens[last].off2int():
|
||||
return True
|
||||
# jmp_target less than tokens[first] is okay - is to a loop
|
||||
# jmp_target equal tokens[last] is also okay: normal non-optimized non-loop jump
|
||||
if jmp_target > tokens[last].off2int():
|
||||
# One more weird case to look out for
|
||||
# if c1:
|
||||
# if c2: # Jumps around the *outer* "else"
|
||||
# ...
|
||||
# else:
|
||||
if jmp_target == tokens[last - 1].attr:
|
||||
return False
|
||||
if last < n and tokens[last].kind.startswith("JUMP"):
|
||||
return False
|
||||
# Make sure jumps don't extend beyond the end of the if statement.
|
||||
l = last
|
||||
if l == n:
|
||||
l -= 1
|
||||
if isinstance(tokens[l].offset, str):
|
||||
last_offset = int(tokens[l].offset.split("_")[0], 10)
|
||||
else:
|
||||
last_offset = tokens[l].offset
|
||||
for i in range(first, l):
|
||||
t = tokens[i]
|
||||
if t.kind == "POP_JUMP_IF_FALSE":
|
||||
pjif_target = t.attr
|
||||
if pjif_target > last_offset:
|
||||
# In come cases, where we have long bytecode, a
|
||||
# "POP_JUMP_IF_FALSE" offset might be too
|
||||
# large for the instruction; so instead it
|
||||
# jumps to a JUMP_FORWARD. Allow that here.
|
||||
if tokens[l] == "JUMP_FORWARD":
|
||||
return tokens[l].attr != pjif_target
|
||||
return True
|
||||
pass
|
||||
pass
|
||||
pass
|
||||
|
||||
if ast:
|
||||
testexpr = ast[0]
|
||||
|
||||
if (last + 1) < n and tokens[last + 1] == "COME_FROM_LOOP":
|
||||
# iflastsmtl jumped outside of loop. No good.
|
||||
return True
|
||||
|
||||
if testexpr[0] in ("testtrue", "testfalse"):
|
||||
test = testexpr[0]
|
||||
if len(test) > 1 and test[1].kind.startswith("jmp_"):
|
||||
if last == n:
|
||||
last -= 1
|
||||
jmp_target = test[1][0].attr
|
||||
if (
|
||||
tokens[first].off2int()
|
||||
<= jmp_target
|
||||
< tokens[last].off2int()
|
||||
):
|
||||
return True
|
||||
# jmp_target less than tokens[first] is okay - is to a loop
|
||||
# jmp_target equal tokens[last] is also okay: normal non-optimized non-loop jump
|
||||
if jmp_target > tokens[last].off2int():
|
||||
# One more weird case to look out for
|
||||
# if c1:
|
||||
# if c2: # Jumps around the *outer* "else"
|
||||
# ...
|
||||
# else:
|
||||
if jmp_target == tokens[last - 1].attr:
|
||||
return False
|
||||
if last < n and tokens[last].kind.startswith("JUMP"):
|
||||
return False
|
||||
return True
|
||||
|
||||
pass
|
||||
pass
|
||||
return False
|
||||
elif lhs in ("iflaststmt", "iflaststmtl") and ast:
|
||||
@@ -1201,7 +1311,11 @@ class Python37BaseParser(PythonParser):
|
||||
# jmp_target less than tokens[first] is okay - is to a loop
|
||||
# jmp_target equal tokens[last] is also okay: normal non-optimized non-loop jump
|
||||
|
||||
if (last + 1) < n and tokens[last - 1] != "JUMP_BACK" and tokens[last + 1] == "COME_FROM_LOOP":
|
||||
if (
|
||||
(last + 1) < n
|
||||
and tokens[last - 1] != "JUMP_BACK"
|
||||
and tokens[last + 1] == "COME_FROM_LOOP"
|
||||
):
|
||||
# iflastsmtl is not at the end of a loop, but jumped outside of loop. No good.
|
||||
# FIXME: check that tokens[last] == "POP_BLOCK"? Or allow for it not to appear?
|
||||
return True
|
||||
@@ -1245,6 +1359,36 @@ class Python37BaseParser(PythonParser):
|
||||
"_come_froms",
|
||||
),
|
||||
),
|
||||
(
|
||||
"ifelsestmt",
|
||||
(
|
||||
"testexpr",
|
||||
"c_stmts_opt",
|
||||
"jump_forward_else",
|
||||
"else_suite",
|
||||
'\\e__come_froms'
|
||||
),
|
||||
),
|
||||
(
|
||||
"ifelsestmt",
|
||||
(
|
||||
"testexpr",
|
||||
"c_stmts_opt",
|
||||
"jf_cfs",
|
||||
"else_suite",
|
||||
'\\e_opt_come_from_except',
|
||||
),
|
||||
),
|
||||
(
|
||||
"ifelsestmt",
|
||||
(
|
||||
"testexpr",
|
||||
"c_stmts_opt",
|
||||
"come_froms",
|
||||
"else_suite",
|
||||
'come_froms',
|
||||
),
|
||||
),
|
||||
(
|
||||
"ifelsestmt",
|
||||
(
|
||||
@@ -1267,7 +1411,8 @@ class Python37BaseParser(PythonParser):
|
||||
if come_froms == "opt_come_from_except" and len(come_froms) > 0:
|
||||
come_froms = come_froms[0]
|
||||
if not isinstance(come_froms, Token):
|
||||
return tokens[first].offset > come_froms[-1].attr
|
||||
if len(come_froms):
|
||||
return tokens[first].offset > come_froms[-1].attr
|
||||
elif tokens[first].offset > come_froms.attr:
|
||||
return True
|
||||
|
||||
@@ -1285,20 +1430,46 @@ class Python37BaseParser(PythonParser):
|
||||
|
||||
# Check that the condition portion of the "if"
|
||||
# jumps to the "else" part.
|
||||
# Compare with parse30.py of uncompyle6
|
||||
if testexpr[0] in ("testtrue", "testfalse"):
|
||||
test = testexpr[0]
|
||||
|
||||
else_suite = ast[3]
|
||||
assert else_suite == "else_suite"
|
||||
|
||||
if len(test) > 1 and test[1].kind.startswith("jmp_"):
|
||||
if last == n:
|
||||
last -= 1
|
||||
jmp = test[1]
|
||||
jmp_target = jmp[0].attr
|
||||
|
||||
# FIXME: the jump inside "else" check below should be added.
|
||||
#
|
||||
# add this until we can find out what's wrong with
|
||||
# not being able to parse:
|
||||
# if a and b or c:
|
||||
# x = 1
|
||||
# else:
|
||||
# x = 2
|
||||
|
||||
# FIXME: add this
|
||||
# if jmp_target < else_suite.first_child().off2int():
|
||||
# return True
|
||||
|
||||
if tokens[first].off2int() > jmp_target:
|
||||
return True
|
||||
|
||||
return (jmp_target > tokens[last].off2int()) and tokens[
|
||||
last
|
||||
] != "JUMP_FORWARD"
|
||||
|
||||
return False
|
||||
elif lhs == "import_from37":
|
||||
importlist37 = ast[3]
|
||||
alias37 = importlist37[0]
|
||||
if importlist37 == "importlist37" and alias37 == "alias37":
|
||||
store = alias37[1]
|
||||
assert store == "store"
|
||||
return alias37[0].attr != store[0].attr
|
||||
return False
|
||||
|
||||
return False
|
||||
|
@@ -37,14 +37,15 @@ class Python38Parser(Python37Parser):
|
||||
stmt ::= forelsestmt38
|
||||
stmt ::= forelselaststmt38
|
||||
stmt ::= forelselaststmtl38
|
||||
stmt ::= tryfinally38
|
||||
stmt ::= tryfinally38stmt
|
||||
stmt ::= tryfinally38rstmt
|
||||
stmt ::= tryfinally38astmt
|
||||
stmt ::= try_elsestmtl38
|
||||
stmt ::= try_except_ret38
|
||||
stmt ::= try_except38
|
||||
stmt ::= whilestmt38
|
||||
stmt ::= whileTruestmt38
|
||||
stmt ::= call
|
||||
stmt ::= ifstmtl
|
||||
|
||||
break ::= POP_BLOCK BREAK_LOOP
|
||||
break ::= POP_BLOCK POP_TOP BREAK_LOOP
|
||||
@@ -80,25 +81,6 @@ class Python38Parser(Python37Parser):
|
||||
END_ASYNC_FOR
|
||||
else_suite
|
||||
|
||||
|
||||
async_with_stmt ::= expr BEFORE_ASYNC_WITH GET_AWAITABLE LOAD_CONST YIELD_FROM
|
||||
SETUP_ASYNC_WITH POP_TOP
|
||||
suite_stmts
|
||||
POP_TOP POP_BLOCK
|
||||
BEGIN_FINALLY COME_FROM_ASYNC_WITH
|
||||
WITH_CLEANUP_START
|
||||
GET_AWAITABLE LOAD_CONST YIELD_FROM
|
||||
WITH_CLEANUP_FINISH END_FINALLY
|
||||
|
||||
async_with_as_stmt ::= expr BEFORE_ASYNC_WITH GET_AWAITABLE LOAD_CONST YIELD_FROM
|
||||
SETUP_ASYNC_WITH store
|
||||
suite_stmts
|
||||
POP_TOP POP_BLOCK
|
||||
BEGIN_FINALLY COME_FROM_ASYNC_WITH
|
||||
WITH_CLEANUP_START
|
||||
GET_AWAITABLE LOAD_CONST YIELD_FROM
|
||||
WITH_CLEANUP_FINISH END_FINALLY
|
||||
|
||||
return ::= ret_expr ROT_TWO POP_TOP RETURN_VALUE
|
||||
|
||||
# 3.8 can push a looping JUMP_BACK into into a JUMP_ from a statement that jumps to it
|
||||
@@ -106,10 +88,6 @@ class Python38Parser(Python37Parser):
|
||||
ifpoplaststmtl ::= testexpr POP_TOP c_stmts_opt JUMP_BACK
|
||||
ifelsestmtl ::= testexpr c_stmts_opt jb_cfs else_suitel JUMP_BACK come_froms
|
||||
|
||||
_ifstmts_jumpl ::= c_stmts JUMP_BACK
|
||||
_ifstmts_jumpl ::= _ifstmts_jump
|
||||
ifstmtl ::= testexpr _ifstmts_jumpl
|
||||
|
||||
for38 ::= expr get_iter store for_block JUMP_BACK
|
||||
for38 ::= expr get_for_iter store for_block JUMP_BACK
|
||||
for38 ::= expr get_for_iter store for_block JUMP_BACK POP_BLOCK
|
||||
@@ -167,17 +145,22 @@ class Python38Parser(Python37Parser):
|
||||
tryfinallystmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
|
||||
BEGIN_FINALLY COME_FROM_FINALLY suite_stmts_opt
|
||||
END_FINALLY
|
||||
tryfinally38 ::= SETUP_FINALLY POP_BLOCK CALL_FINALLY
|
||||
tryfinally38rstmt ::= SETUP_FINALLY POP_BLOCK CALL_FINALLY
|
||||
returns
|
||||
COME_FROM_FINALLY END_FINALLY suite_stmts
|
||||
tryfinally38 ::= SETUP_FINALLY POP_BLOCK CALL_FINALLY
|
||||
tryfinally38rstmt ::= SETUP_FINALLY POP_BLOCK CALL_FINALLY
|
||||
returns
|
||||
COME_FROM_FINALLY POP_FINALLY returns
|
||||
END_FINALLY
|
||||
tryfinally_return_stmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
|
||||
BEGIN_FINALLY COME_FROM_FINALLY
|
||||
POP_FINALLY suite_stmts_opt END_FINALLY
|
||||
|
||||
tryfinally38stmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
|
||||
BEGIN_FINALLY COME_FROM_FINALLY
|
||||
POP_FINALLY suite_stmts_opt END_FINALLY
|
||||
tryfinally38stmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
|
||||
BEGIN_FINALLY COME_FROM_FINALLY
|
||||
POP_FINALLY suite_stmts_opt END_FINALLY
|
||||
tryfinally38astmt ::= LOAD_CONST SETUP_FINALLY suite_stmts_opt POP_BLOCK
|
||||
BEGIN_FINALLY COME_FROM_FINALLY
|
||||
POP_FINALLY POP_TOP suite_stmts_opt END_FINALLY POP_TOP
|
||||
"""
|
||||
|
||||
def __init__(self, debug_parser=PARSER_DEFAULT_DEBUG):
|
||||
@@ -256,10 +239,8 @@ class Python38Parser(Python37Parser):
|
||||
def customize_grammar_rules(self, tokens, customize):
|
||||
super(Python37Parser, self).customize_grammar_rules(tokens, customize)
|
||||
self.remove_rules_38()
|
||||
self.check_reduce["ifstmt"] = "tokens"
|
||||
self.check_reduce["whileTruestmt38"] = "tokens"
|
||||
self.check_reduce["whilestmt38"] = "tokens"
|
||||
self.check_reduce["ifstmtl"] = "tokens"
|
||||
|
||||
def reduce_is_invalid(self, rule, ast, tokens, first, last):
|
||||
invalid = super(Python38Parser,
|
||||
@@ -269,30 +250,7 @@ class Python38Parser(Python37Parser):
|
||||
if invalid:
|
||||
return invalid
|
||||
lhs = rule[0]
|
||||
if lhs == "ifstmt":
|
||||
# Make sure jumps don't extend beyond the end of the if statement.
|
||||
l = last
|
||||
if l == len(tokens):
|
||||
l -= 1
|
||||
if isinstance(tokens[l].offset, str):
|
||||
last_offset = int(tokens[l].offset.split("_")[0], 10)
|
||||
else:
|
||||
last_offset = tokens[l].offset
|
||||
for i in range(first, l):
|
||||
t = tokens[i]
|
||||
if t.kind == "POP_JUMP_IF_FALSE":
|
||||
if t.attr > last_offset:
|
||||
return True
|
||||
pass
|
||||
pass
|
||||
pass
|
||||
elif lhs == "ifstmtl":
|
||||
if last == len(tokens):
|
||||
last -= 1
|
||||
if (tokens[last].attr and isinstance(tokens[last].attr, int)):
|
||||
return tokens[first].offset < tokens[last].attr
|
||||
pass
|
||||
elif lhs in ("whileTruestmt38", "whilestmt38"):
|
||||
if lhs in ("whileTruestmt38", "whilestmt38"):
|
||||
jb_index = last - 1
|
||||
while jb_index > 0 and tokens[jb_index].kind.startswith("COME_FROM"):
|
||||
jb_index -= 1
|
||||
|
@@ -358,6 +358,10 @@ class Scanner37Base(Scanner):
|
||||
# other parts like n_LOAD_CONST in pysource.py for example.
|
||||
pattr = const
|
||||
pass
|
||||
elif opname == "IMPORT_NAME":
|
||||
if "." in inst.argval:
|
||||
opname = "IMPORT_NAME_ATTR"
|
||||
pass
|
||||
elif opname in ("MAKE_FUNCTION", "MAKE_CLOSURE"):
|
||||
flags = argval
|
||||
opname = "MAKE_FUNCTION_%d" % (flags)
|
||||
@@ -776,25 +780,6 @@ class Scanner37Base(Scanner):
|
||||
)
|
||||
elif op in self.pop_jump_tf:
|
||||
target = inst.argval
|
||||
prev_op = self.prev_op
|
||||
|
||||
# FIXME: hack upon hack, test_pysource.py fails with this
|
||||
# Until the grammar is corrected we do this fiction...
|
||||
pretarget = self.get_inst(prev_op[target])
|
||||
if (
|
||||
pretarget.opcode in self.pop_jump_if_pop
|
||||
and (target > offset)
|
||||
and pretarget.offset != offset
|
||||
):
|
||||
|
||||
if pretarget.argval != target:
|
||||
# FIXME: this is not accurate The commented out below
|
||||
# is what it should be. However grammar rules right now
|
||||
# assume the incorrect offsets.
|
||||
# self.fixed_jumps[offset] = target
|
||||
self.fixed_jumps[offset] = pretarget.offset
|
||||
return
|
||||
|
||||
self.fixed_jumps[offset] = target
|
||||
|
||||
elif self.version < 3.8 and op == self.opc.SETUP_EXCEPT:
|
||||
@@ -907,31 +892,6 @@ class Scanner37Base(Scanner):
|
||||
elif op in self.setup_opts_no_loop:
|
||||
count_SETUP_ += 1
|
||||
|
||||
def rem_or(self, start, end, instr, target=None, include_beyond_target=False):
|
||||
"""
|
||||
Find offsets of all requested <instr> between <start> and <end>,
|
||||
optionally <target>ing specified offset, and return list found
|
||||
<instr> offsets which are not within any POP_JUMP_IF_TRUE jumps.
|
||||
"""
|
||||
assert start >= 0 and end <= len(self.code) and start <= end
|
||||
|
||||
# Find all offsets of requested instructions
|
||||
instr_offsets = self.inst_matches(
|
||||
start, end, instr, target, include_beyond_target
|
||||
)
|
||||
# Get all POP_JUMP_IF_TRUE (or) offsets
|
||||
jump_true_op = self.opc.POP_JUMP_IF_TRUE
|
||||
pjit_offsets = self.inst_matches(start, end, jump_true_op)
|
||||
filtered = []
|
||||
for pjit_offset in pjit_offsets:
|
||||
pjit_tgt = self.get_target(pjit_offset) - 3
|
||||
for instr_offset in instr_offsets:
|
||||
if instr_offset <= pjit_offset or instr_offset >= pjit_tgt:
|
||||
filtered.append(instr_offset)
|
||||
instr_offsets = filtered
|
||||
filtered = []
|
||||
return instr_offsets
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from uncompyle6 import PYTHON_VERSION
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017-2019 by Rocky Bernstein
|
||||
# Copyright (c) 2017-2020 by Rocky Bernstein
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@@ -42,6 +42,13 @@ else:
|
||||
# various templates we use odd values. Avoiding equal-precedent comparisons
|
||||
# avoids ambiguity what to do when the precedence is equal.
|
||||
|
||||
# The precidence of a key below applies the key, a node, and the its
|
||||
# *parent*. A node however sometimes sets the precidence for its
|
||||
# children. For example, "call" has precidence 2 so we don't get
|
||||
# additional the additional parenthesis of: ".. op (call())". However
|
||||
# for call's children, it parameters, we set the the precidence high,
|
||||
# say to 100, to make sure we avoid additional prenthesis in
|
||||
# call((.. op ..)).
|
||||
|
||||
PRECEDENCE = {
|
||||
'yield': 102,
|
||||
@@ -81,7 +88,7 @@ PRECEDENCE = {
|
||||
'BINARY_MULTIPLY': 8, # *
|
||||
'BINARY_TRUE_DIVIDE': 8, # Division /
|
||||
|
||||
'unary_expr': 6, # +x, -x, ~x
|
||||
'unary_op': 6, # +x, -x, ~x
|
||||
|
||||
'BINARY_POWER': 4, # Exponentiation, *
|
||||
|
||||
@@ -189,16 +196,20 @@ TABLE_DIRECT = {
|
||||
'INPLACE_AND': ( '&=' ,),
|
||||
'INPLACE_OR': ( '|=' ,),
|
||||
'INPLACE_XOR': ( '^=' ,),
|
||||
'binary_expr': ( '%c %c %c', 0,
|
||||
(-1, 'binary_op'),
|
||||
|
||||
# bin_op (formerly "binary_expr") is the Python AST BinOp
|
||||
'bin_op': ( '%c %c %c', 0,
|
||||
(-1, 'binary_operator'),
|
||||
( 1, 'expr' ) ),
|
||||
|
||||
'UNARY_POSITIVE': ( '+',),
|
||||
'UNARY_NEGATIVE': ( '-',),
|
||||
'UNARY_INVERT': ( '~'),
|
||||
'unary_expr': ( '%c%c',
|
||||
(1, 'unary_op'),
|
||||
(0, 'expr') ),
|
||||
|
||||
# unary_op (formerly "unary_expr") is the Python AST UnaryOp
|
||||
'unary_op': ( '%c%c',
|
||||
(1, 'unary_operator'),
|
||||
(0, 'expr') ),
|
||||
|
||||
'unary_not': ( 'not %c',
|
||||
(0, 'expr' ) ),
|
||||
@@ -219,6 +230,7 @@ TABLE_DIRECT = {
|
||||
(1, 100), (2, 100) ),
|
||||
|
||||
'IMPORT_FROM': ( '%{pattr}', ),
|
||||
'IMPORT_NAME_ATTR': ( '%{pattr}', ),
|
||||
'attribute': ( '%c.%[1]{pattr}',
|
||||
(0, 'expr')),
|
||||
'LOAD_STR': ( '%{pattr}', ),
|
||||
@@ -312,10 +324,18 @@ TABLE_DIRECT = {
|
||||
'compare_chained1': ( '%[3]{pattr.replace("-", " ")} %p %p', (0, 19), (-2, 19)),
|
||||
'compare_chained2': ( '%[1]{pattr.replace("-", " ")} %p', (0, 19)),
|
||||
# 'classdef': (), # handled by n_classdef()
|
||||
|
||||
# A custom rule in n_function def distinguishes whether to call this or
|
||||
# function_def_async
|
||||
'function_def': ( '\n\n%|def %c\n', -2), # -2 to handle closures
|
||||
|
||||
'function_def_deco': ( '\n\n%c', 0),
|
||||
'mkfuncdeco': ( '%|@%c\n%c', 0, 1),
|
||||
|
||||
# A custom rule in n_function def distinguishes whether to call this or
|
||||
# function_def_async
|
||||
'mkfuncdeco0': ( '%|def %c\n', 0),
|
||||
|
||||
'classdefdeco': ( '\n\n%c', 0),
|
||||
'classdefdeco1': ( '%|@%c\n%c', 0, 1),
|
||||
'kwarg': ( '%[0]{pattr}=%c', 1), # Change when Python 2 does LOAD_STR
|
||||
|
@@ -47,6 +47,7 @@ def customize_for_version(self, is_pypy, version):
|
||||
# Without PyPy
|
||||
#######################
|
||||
TABLE_DIRECT.update({
|
||||
# "assert" and "assert_expr" are added via transform rules.
|
||||
"assert": ("%|assert %c\n", (0, "assert_expr")),
|
||||
"assert2": ("%|assert %c, %c\n", (0, "assert_expr"), 3),
|
||||
|
||||
|
@@ -19,8 +19,13 @@
|
||||
from uncompyle6.semantics.consts import TABLE_DIRECT
|
||||
|
||||
from xdis.code import iscode
|
||||
from uncompyle6.semantics.helper import gen_function_parens_adjust
|
||||
from uncompyle6.semantics.make_function import make_function3_annotate
|
||||
from uncompyle6.scanner import Code
|
||||
from uncompyle6.semantics.helper import (
|
||||
find_code_node,
|
||||
gen_function_parens_adjust,
|
||||
)
|
||||
|
||||
from uncompyle6.semantics.make_function3 import make_function3_annotate
|
||||
from uncompyle6.semantics.customize35 import customize_for_version35
|
||||
from uncompyle6.semantics.customize36 import customize_for_version36
|
||||
from uncompyle6.semantics.customize37 import customize_for_version37
|
||||
@@ -48,13 +53,121 @@ def customize_for_version3(self, version):
|
||||
"raise_stmt2": ("%|raise %c from %c\n", 0, 1),
|
||||
"store_locals": ("%|# inspect.currentframe().f_locals = __locals__\n",),
|
||||
"withstmt": ("%|with %c:\n%+%c%-", 0, 3),
|
||||
"withasstmt": ("%|with %c as (%c):\n%+%c%-", 0, 2, 3),
|
||||
"withasstmt": ("%|with %c as %c:\n%+%c%-", 0, 2, 3),
|
||||
}
|
||||
)
|
||||
|
||||
assert version >= 3.0
|
||||
|
||||
def listcomp_closure3(node):
|
||||
"""List comprehensions in Python 3 when handled as a closure.
|
||||
See if we can combine code.
|
||||
"""
|
||||
p = self.prec
|
||||
self.prec = 27
|
||||
|
||||
code = Code(node[1].attr, self.scanner, self.currentclass)
|
||||
ast = self.build_ast(code._tokens, code._customize)
|
||||
self.customize(code._customize)
|
||||
|
||||
# skip over: sstmt, stmt, return, ret_expr
|
||||
# and other singleton derivations
|
||||
while len(ast) == 1 or (
|
||||
ast in ("sstmt", "return") and ast[-1] in ("RETURN_LAST", "RETURN_VALUE")
|
||||
):
|
||||
self.prec = 100
|
||||
ast = ast[0]
|
||||
|
||||
n = ast[1]
|
||||
|
||||
# collections is the name of the expression(s) we are iterating over
|
||||
collections = [node[-3]]
|
||||
list_ifs = []
|
||||
|
||||
if self.version == 3.0 and n != "list_iter":
|
||||
# FIXME 3.0 is a snowflake here. We need
|
||||
# special code for this. Not sure if this is totally
|
||||
# correct.
|
||||
stores = [ast[3]]
|
||||
assert ast[4] == "comp_iter"
|
||||
n = ast[4]
|
||||
# Find the list comprehension body. It is the inner-most
|
||||
# node that is not comp_.. .
|
||||
while n == "comp_iter":
|
||||
if n[0] == "comp_for":
|
||||
n = n[0]
|
||||
stores.append(n[2])
|
||||
n = n[3]
|
||||
elif n[0] in ("comp_if", "comp_if_not"):
|
||||
n = n[0]
|
||||
# FIXME: just a guess
|
||||
if n[0].kind == "expr":
|
||||
list_ifs.append(n)
|
||||
else:
|
||||
list_ifs.append([1])
|
||||
n = n[2]
|
||||
pass
|
||||
else:
|
||||
break
|
||||
pass
|
||||
|
||||
# Skip over n[0] which is something like: _[1]
|
||||
self.preorder(n[1])
|
||||
|
||||
else:
|
||||
assert n == "list_iter"
|
||||
stores = []
|
||||
# Find the list comprehension body. It is the inner-most
|
||||
# node that is not list_.. .
|
||||
while n == "list_iter":
|
||||
n = n[0] # recurse one step
|
||||
if n == "list_for":
|
||||
stores.append(n[2])
|
||||
n = n[3]
|
||||
if n[0] == "list_for":
|
||||
# Dog-paddle down largely singleton reductions
|
||||
# to find the collection (expr)
|
||||
c = n[0][0]
|
||||
if c == "expr":
|
||||
c = c[0]
|
||||
# FIXME: grammar is wonky here? Is this really an attribute?
|
||||
if c == "attribute":
|
||||
c = c[0]
|
||||
collections.append(c)
|
||||
pass
|
||||
elif n in ("list_if", "list_if_not"):
|
||||
# FIXME: just a guess
|
||||
if n[0].kind == "expr":
|
||||
list_ifs.append(n)
|
||||
else:
|
||||
list_ifs.append([1])
|
||||
n = n[2]
|
||||
pass
|
||||
elif n == "list_if37":
|
||||
list_ifs.append(n)
|
||||
n = n[-1]
|
||||
pass
|
||||
pass
|
||||
|
||||
assert n == "lc_body", ast
|
||||
|
||||
self.preorder(n[0])
|
||||
|
||||
# FIXME: add indentation around "for"'s and "in"'s
|
||||
for i, store in enumerate(stores):
|
||||
self.write(" for ")
|
||||
self.preorder(store)
|
||||
self.write(" in ")
|
||||
self.preorder(collections[i])
|
||||
if i < len(list_ifs):
|
||||
self.preorder(list_ifs[i])
|
||||
pass
|
||||
pass
|
||||
self.prec = p
|
||||
self.listcomp_closure3 = listcomp_closure3
|
||||
|
||||
def n_classdef3(node):
|
||||
|
||||
# class definition ('class X(A,B,C):')
|
||||
cclass = self.currentclass
|
||||
|
||||
@@ -125,10 +238,10 @@ def customize_for_version3(self, version):
|
||||
# Python 3.3 classes with closures work like this.
|
||||
# Note have to test before 3.2 case because
|
||||
# index -2 also has an attr.
|
||||
subclass_code = load_closure[-3].attr
|
||||
subclass_code = find_code_node(load_closure, -3).attr
|
||||
elif hasattr(load_closure[-2], "attr"):
|
||||
# Python 3.2 works like this
|
||||
subclass_code = load_closure[-2].attr
|
||||
subclass_code = find_code_node(load_closure, -2).attr
|
||||
else:
|
||||
raise "Internal Error n_classdef: cannot find class body"
|
||||
if hasattr(build_class[3], "__len__"):
|
||||
|
@@ -17,44 +17,47 @@
|
||||
|
||||
from xdis.code import iscode
|
||||
from xdis.util import COMPILER_FLAG_BIT
|
||||
from uncompyle6.semantics.consts import (
|
||||
INDENT_PER_LEVEL, TABLE_DIRECT)
|
||||
from uncompyle6.semantics.helper import (
|
||||
flatten_list, gen_function_parens_adjust)
|
||||
from uncompyle6.semantics.consts import INDENT_PER_LEVEL, TABLE_DIRECT
|
||||
from uncompyle6.semantics.helper import flatten_list, gen_function_parens_adjust
|
||||
|
||||
#######################
|
||||
# Python 3.5+ Changes #
|
||||
#######################
|
||||
def customize_for_version35(self, version):
|
||||
TABLE_DIRECT.update({
|
||||
'await_expr': ( 'await %c', 0),
|
||||
'await_stmt': ( '%|%c\n', 0),
|
||||
'async_for_stmt': (
|
||||
'%|async for %c in %c:\n%+%|%c%-\n\n', 9, 1, 25 ),
|
||||
'async_forelse_stmt': (
|
||||
'%|async for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n',
|
||||
9, 1, 25, (27, 'else_suite') ),
|
||||
'async_with_stmt': (
|
||||
'%|async with %c:\n%+%|%c%-',
|
||||
(0, 'expr'), 7 ),
|
||||
'async_with_as_stmt': (
|
||||
'%|async with %c as %c:\n%+%|%c%-',
|
||||
(0, 'expr'), (6, 'store'), 7),
|
||||
'unmap_dict': ( '{**%C}', (0, -1, ', **') ),
|
||||
# 'unmapexpr': ( '{**%c}', 0), # done by n_unmapexpr
|
||||
|
||||
})
|
||||
TABLE_DIRECT.update(
|
||||
{
|
||||
"await_expr": ("await %c", 0),
|
||||
"await_stmt": ("%|%c\n", 0),
|
||||
"async_for_stmt": ("%|async for %c in %c:\n%+%|%c%-\n\n", 9, 1, 25),
|
||||
"async_forelse_stmt": (
|
||||
"%|async for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n",
|
||||
9,
|
||||
1,
|
||||
25,
|
||||
(27, "else_suite"),
|
||||
),
|
||||
"async_with_stmt": ("%|async with %c:\n%+%c%-", (0, "expr"), 7),
|
||||
"async_with_as_stmt": (
|
||||
"%|async with %c as %c:\n%+%c%-",
|
||||
(0, "expr"),
|
||||
(6, "store"),
|
||||
7,
|
||||
),
|
||||
"unmap_dict": ("{**%C}", (0, -1, ", **")),
|
||||
# "unmapexpr": ( "{**%c}", 0), # done by n_unmapexpr
|
||||
}
|
||||
)
|
||||
|
||||
def async_call(node):
|
||||
self.f.write('async ')
|
||||
node.kind == 'call'
|
||||
self.f.write("async ")
|
||||
node.kind == "call"
|
||||
p = self.prec
|
||||
self.prec = 80
|
||||
self.template_engine(('%c(%P)', 0, (1, -4, ', ',
|
||||
100)), node)
|
||||
self.template_engine(("%c(%P)", 0, (1, -4, ", ", 100)), node)
|
||||
self.prec = p
|
||||
node.kind == 'async_call'
|
||||
node.kind == "async_call"
|
||||
self.prune()
|
||||
|
||||
self.n_async_call = async_call
|
||||
|
||||
def n_build_list_unpack(node):
|
||||
@@ -90,11 +93,13 @@ def customize_for_version35(self, version):
|
||||
if value.startswith("("):
|
||||
assert value.endswith(")")
|
||||
use_star = False
|
||||
value = value[1:-1].rstrip(" ") # Remove starting '(' and trailing ')' and additional spaces
|
||||
value = value[1:-1].rstrip(
|
||||
" "
|
||||
) # Remove starting "(" and trailing ")" and additional spaces
|
||||
if value == "":
|
||||
pass
|
||||
else:
|
||||
if value.endswith(","): # if args has only one item
|
||||
if value.endswith(","): # if args has only one item
|
||||
value = value[:-1]
|
||||
if line_number != self.line_number:
|
||||
sep += "\n" + self.indent + INDENT_PER_LEVEL[:-1]
|
||||
@@ -114,16 +119,19 @@ def customize_for_version35(self, version):
|
||||
self.prec = p
|
||||
self.prune()
|
||||
return
|
||||
|
||||
self.n_build_list_unpack = n_build_list_unpack
|
||||
|
||||
def n_call(node):
|
||||
p = self.prec
|
||||
self.prec = 100
|
||||
mapping = self._get_mapping(node)
|
||||
table = mapping[0]
|
||||
key = node
|
||||
for i in mapping[1:]:
|
||||
key = key[i]
|
||||
pass
|
||||
if key.kind.startswith('CALL_FUNCTION_VAR_KW'):
|
||||
if key.kind.startswith("CALL_FUNCTION_VAR_KW"):
|
||||
# Python 3.5 changes the stack position of
|
||||
# *args: kwargs come after *args whereas
|
||||
# in earlier Pythons, *args is at the end
|
||||
@@ -137,12 +145,12 @@ def customize_for_version35(self, version):
|
||||
kwarg_pos = entry[2][1]
|
||||
args_pos = kwarg_pos - 1
|
||||
# Put last node[args_pos] after subsequent kwargs
|
||||
while node[kwarg_pos] == 'kwarg' and kwarg_pos < len(node):
|
||||
while node[kwarg_pos] == "kwarg" and kwarg_pos < len(node):
|
||||
# swap node[args_pos] with node[kwargs_pos]
|
||||
node[kwarg_pos], node[args_pos] = node[args_pos], node[kwarg_pos]
|
||||
args_pos = kwarg_pos
|
||||
kwarg_pos += 1
|
||||
elif key.kind.startswith('CALL_FUNCTION_VAR'):
|
||||
elif key.kind.startswith("CALL_FUNCTION_VAR"):
|
||||
# CALL_FUNCTION_VAR's top element of the stack contains
|
||||
# the variable argument list, then comes
|
||||
# annotation args, then keyword args.
|
||||
@@ -153,59 +161,81 @@ def customize_for_version35(self, version):
|
||||
kwargs = (argc >> 8) & 0xFF
|
||||
# FIXME: handle annotation args
|
||||
if nargs > 0:
|
||||
template = ('%c(%C, ', 0, (1, nargs+1, ', '))
|
||||
template = ("%c(%P, ", 0, (1, nargs + 1, ", ", 100))
|
||||
else:
|
||||
template = ('%c(', 0)
|
||||
template = ("%c(", 0)
|
||||
self.template_engine(template, node)
|
||||
|
||||
args_node = node[-2]
|
||||
if args_node in ('pos_arg', 'expr'):
|
||||
args_node = node[-2]
|
||||
if args_node in ("pos_arg", "expr"):
|
||||
args_node = args_node[0]
|
||||
if args_node == 'build_list_unpack':
|
||||
template = ('*%P)', (0, len(args_node)-1, ', *', 100))
|
||||
if args_node == "build_list_unpack":
|
||||
template = ("*%P)", (0, len(args_node) - 1, ", *", 100))
|
||||
self.template_engine(template, args_node)
|
||||
else:
|
||||
if len(node) - nargs > 3:
|
||||
template = ('*%c, %C)', nargs+1, (nargs+kwargs+1, -1, ', '))
|
||||
template = ("*%c, %P)", nargs + 1, (nargs + kwargs + 1, -1, ", ", 100))
|
||||
else:
|
||||
template = ('*%c)', nargs+1)
|
||||
template = ("*%c)", nargs + 1)
|
||||
self.template_engine(template, node)
|
||||
self.prec = p
|
||||
self.prune()
|
||||
else:
|
||||
gen_function_parens_adjust(key, node)
|
||||
|
||||
self.prec = 100
|
||||
self.default(node)
|
||||
|
||||
self.n_call = n_call
|
||||
|
||||
def n_function_def(node):
|
||||
n0 = node[0]
|
||||
is_code = False
|
||||
for i in list(range(len(n0)-2, -1, -1)):
|
||||
code_node = n0[i]
|
||||
if hasattr(code_node, 'attr') and iscode(code_node.attr):
|
||||
is_code = True
|
||||
def is_async_fn(node):
|
||||
code_node = node[0][0]
|
||||
for n in node[0]:
|
||||
if hasattr(n, "attr") and iscode(n.attr):
|
||||
code_node = n
|
||||
break
|
||||
pass
|
||||
pass
|
||||
|
||||
if (is_code and
|
||||
(code_node.attr.co_flags & COMPILER_FLAG_BIT['COROUTINE'])):
|
||||
self.template_engine(('\n\n%|async def %c\n',
|
||||
-2), node)
|
||||
is_code = hasattr(code_node, "attr") and iscode(code_node.attr)
|
||||
return is_code and (
|
||||
code_node.attr.co_flags
|
||||
& (
|
||||
COMPILER_FLAG_BIT["COROUTINE"]
|
||||
| COMPILER_FLAG_BIT["ITERABLE_COROUTINE"]
|
||||
| COMPILER_FLAG_BIT["ASYNC_GENERATOR"]
|
||||
)
|
||||
)
|
||||
|
||||
def n_function_def(node):
|
||||
if is_async_fn(node):
|
||||
self.template_engine(("\n\n%|async def %c\n", -2), node)
|
||||
else:
|
||||
self.template_engine(('\n\n%|def %c\n', -2),
|
||||
node)
|
||||
self.default(node)
|
||||
self.prune()
|
||||
|
||||
self.n_function_def = n_function_def
|
||||
|
||||
def n_mkfuncdeco0(node):
|
||||
if is_async_fn(node):
|
||||
self.template_engine(("%|async def %c\n", 0), node)
|
||||
else:
|
||||
self.default(node)
|
||||
self.prune()
|
||||
|
||||
self.n_mkfuncdeco0 = n_mkfuncdeco0
|
||||
|
||||
def unmapexpr(node):
|
||||
last_n = node[0][-1]
|
||||
for n in node[0]:
|
||||
self.preorder(n)
|
||||
if n != last_n:
|
||||
self.f.write(', **')
|
||||
self.f.write(", **")
|
||||
pass
|
||||
pass
|
||||
self.prune()
|
||||
pass
|
||||
|
||||
self.n_unmapexpr = unmapexpr
|
||||
|
||||
# FIXME: start here
|
||||
@@ -222,67 +252,75 @@ def customize_for_version35(self, version):
|
||||
# then the first * has already been printed.
|
||||
# Until I have a better way to check for CALL_FUNCTION_VAR,
|
||||
# will assume that if the text ends in *.
|
||||
last_was_star = self.f.getvalue().endswith('*')
|
||||
last_was_star = self.f.getvalue().endswith("*")
|
||||
|
||||
if lastnodetype.startswith('BUILD_LIST'):
|
||||
self.write('['); endchar = ']'
|
||||
elif lastnodetype.startswith('BUILD_TUPLE'):
|
||||
if lastnodetype.startswith("BUILD_LIST"):
|
||||
self.write("[")
|
||||
endchar = "]"
|
||||
elif lastnodetype.startswith("BUILD_TUPLE"):
|
||||
# Tuples can appear places that can NOT
|
||||
# have parenthesis around them, like array
|
||||
# subscripts. We check for that by seeing
|
||||
# if a tuple item is some sort of slice.
|
||||
no_parens = False
|
||||
for n in node:
|
||||
if n == 'expr' and n[0].kind.startswith('build_slice'):
|
||||
if n == "expr" and n[0].kind.startswith("build_slice"):
|
||||
no_parens = True
|
||||
break
|
||||
pass
|
||||
if no_parens:
|
||||
endchar = ''
|
||||
endchar = ""
|
||||
else:
|
||||
self.write('('); endchar = ')'
|
||||
self.write("(")
|
||||
endchar = ")"
|
||||
pass
|
||||
|
||||
elif lastnodetype.startswith('BUILD_SET'):
|
||||
self.write('{'); endchar = '}'
|
||||
elif lastnodetype.startswith('BUILD_MAP_UNPACK'):
|
||||
self.write('{*'); endchar = '}'
|
||||
elif lastnodetype.startswith('ROT_TWO'):
|
||||
self.write('('); endchar = ')'
|
||||
elif lastnodetype.startswith("BUILD_SET"):
|
||||
self.write("{")
|
||||
endchar = "}"
|
||||
elif lastnodetype.startswith("BUILD_MAP_UNPACK"):
|
||||
self.write("{*")
|
||||
endchar = "}"
|
||||
elif lastnodetype.startswith("ROT_TWO"):
|
||||
self.write("(")
|
||||
endchar = ")"
|
||||
else:
|
||||
raise TypeError('Internal Error: n_build_list expects list, tuple, set, or unpack')
|
||||
raise TypeError(
|
||||
"Internal Error: n_build_list expects list, tuple, set, or unpack"
|
||||
)
|
||||
|
||||
flat_elems = flatten_list(node)
|
||||
|
||||
self.indent_more(INDENT_PER_LEVEL)
|
||||
sep = ''
|
||||
sep = ""
|
||||
for elem in flat_elems:
|
||||
if elem in ('ROT_THREE', 'EXTENDED_ARG'):
|
||||
if elem in ("ROT_THREE", "EXTENDED_ARG"):
|
||||
continue
|
||||
assert elem == 'expr'
|
||||
assert elem == "expr"
|
||||
line_number = self.line_number
|
||||
value = self.traverse(elem)
|
||||
if elem[0] == 'tuple':
|
||||
assert value[0] == '('
|
||||
assert value[-1] == ')'
|
||||
if elem[0] == "tuple":
|
||||
assert value[0] == "("
|
||||
assert value[-1] == ")"
|
||||
value = value[1:-1]
|
||||
if value[-1] == ',':
|
||||
if value[-1] == ",":
|
||||
# singleton tuple
|
||||
value = value[:-1]
|
||||
else:
|
||||
value = '*' + value
|
||||
value = "*" + value
|
||||
if line_number != self.line_number:
|
||||
sep += '\n' + self.indent + INDENT_PER_LEVEL[:-1]
|
||||
sep += "\n" + self.indent + INDENT_PER_LEVEL[:-1]
|
||||
else:
|
||||
if sep != '': sep += ' '
|
||||
if sep != "":
|
||||
sep += " "
|
||||
if not last_was_star:
|
||||
pass
|
||||
else:
|
||||
last_was_star = False
|
||||
self.write(sep, value)
|
||||
sep = ','
|
||||
if lastnode.attr == 1 and lastnodetype.startswith('BUILD_TUPLE'):
|
||||
self.write(',')
|
||||
sep = ","
|
||||
if lastnode.attr == 1 and lastnodetype.startswith("BUILD_TUPLE"):
|
||||
self.write(",")
|
||||
self.write(endchar)
|
||||
self.indent_less(INDENT_PER_LEVEL)
|
||||
|
||||
|
@@ -219,6 +219,7 @@ def customize_for_version36(self, version):
|
||||
# FIXME: decide if the below test be on kwargs == 'dict'
|
||||
if (call_function_ex.attr & 1 and
|
||||
(not isinstance(kwargs, Token) and kwargs != 'attribute')
|
||||
and kwargs != "call_kw36"
|
||||
and not kwargs[0].kind.startswith('kvlist')):
|
||||
self.call36_dict(kwargs)
|
||||
else:
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2019 by Rocky Bernstein
|
||||
# Copyright (c) 2019-2020 by Rocky Bernstein
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@@ -15,57 +15,153 @@
|
||||
"""Isolate Python 3.7 version-specific semantic actions here.
|
||||
"""
|
||||
|
||||
from uncompyle6.semantics.consts import PRECEDENCE, TABLE_DIRECT
|
||||
from uncompyle6.semantics.consts import (
|
||||
PRECEDENCE,
|
||||
TABLE_DIRECT,
|
||||
maxint,
|
||||
)
|
||||
|
||||
def customize_for_version37(self, version):
|
||||
########################
|
||||
# Python 3.7+ changes
|
||||
#######################
|
||||
|
||||
PRECEDENCE['attribute37'] = 2
|
||||
PRECEDENCE['if_exp_37a'] = 28
|
||||
PRECEDENCE['if_exp_37b'] = 28
|
||||
PRECEDENCE["attribute37"] = 2
|
||||
PRECEDENCE["call_ex"] = 1
|
||||
PRECEDENCE["call_ex_kw"] = 1
|
||||
PRECEDENCE["call_ex_kw2"] = 1
|
||||
PRECEDENCE["call_ex_kw3"] = 1
|
||||
PRECEDENCE["call_ex_kw4"] = 1
|
||||
PRECEDENCE["call_kw"] = 0
|
||||
PRECEDENCE["call_kw36"] = 1
|
||||
PRECEDENCE["formatted_value1"] = 100
|
||||
PRECEDENCE["if_exp_37a"] = 28
|
||||
PRECEDENCE["if_exp_37b"] = 28
|
||||
PRECEDENCE["unmap_dict"] = 0
|
||||
|
||||
TABLE_DIRECT.update({
|
||||
'and_not': ( '%c and not %c',
|
||||
(0, 'expr'), (2, 'expr') ),
|
||||
'async_forelse_stmt': (
|
||||
'%|async for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n',
|
||||
(7, 'store'), (1, 'expr'), (17, 'for_block'), (25, 'else_suite') ),
|
||||
'async_for_stmt': (
|
||||
'%|async for %c in %c:\n%+%c%-%-\n\n',
|
||||
(7, 'store'), (1, 'expr'), (17, 'for_block')),
|
||||
'async_for_stmt37': (
|
||||
'%|async for %c in %c:\n%+%c%-%-\n\n',
|
||||
(7, 'store'), (1, 'expr'), (16, 'for_block') ),
|
||||
'attribute37': ( '%c.%[1]{pattr}', 0 ),
|
||||
'compare_chained1a_37': (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19), (-4, 19)),
|
||||
'compare_chained1_false_37': (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19), (-4, 19)),
|
||||
'compare_chained2_false_37': (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19), (-5, 19)),
|
||||
'compare_chained1b_37': (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19), (-4, 19)),
|
||||
'compare_chained1c_37': (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19), (-2, 19)),
|
||||
'compare_chained2a_37': (
|
||||
'%[1]{pattr.replace("-", " ")} %p',
|
||||
(0, 19) ),
|
||||
'compare_chained2b_37': (
|
||||
'%[1]{pattr.replace("-", " ")} %p',
|
||||
(0, 19) ),
|
||||
'compare_chained2a_false_37': (
|
||||
'%[1]{pattr.replace("-", " ")} %p',
|
||||
(0, 19 ) ),
|
||||
'compare_chained2c_37': (
|
||||
'%[3]{pattr.replace("-", " ")} %p %p', (0, 19), (6, 19) ),
|
||||
'if_exp_37a': ( '%p if %p else %p', (1, 'expr', 27), (0, 27), (4, 'expr', 27) ),
|
||||
'if_exp_37b': ( '%p if %p else %p', (2, 'expr', 27), (0, 'expr', 27), (5, 'expr', 27) ),
|
||||
TABLE_DIRECT.update(
|
||||
{
|
||||
"and_not": ("%c and not %c", (0, "expr"), (2, "expr")),
|
||||
"ann_assign": (
|
||||
"%|%[2]{attr}: %c\n", 0,
|
||||
),
|
||||
"ann_assign_init": (
|
||||
"%|%[2]{attr}: %c = %c\n", 0, 1,
|
||||
),
|
||||
"async_for_stmt": (
|
||||
"%|async for %c in %c:\n%+%c%-\n\n",
|
||||
(7, "store"),
|
||||
(1, "expr"),
|
||||
(17, "for_block"),
|
||||
),
|
||||
"async_for_stmt36": (
|
||||
"%|async for %c in %c:\n%+%c%-%-\n\n",
|
||||
(9, "store"),
|
||||
(1, "expr"),
|
||||
(18, "for_block"),
|
||||
),
|
||||
"async_for_stmt37": (
|
||||
"%|async for %c in %c:\n%+%c%-%-\n\n",
|
||||
(7, "store"),
|
||||
(1, "expr"),
|
||||
(16, "for_block"),
|
||||
),
|
||||
"async_with_stmt": ("%|async with %c:\n%+%c%-", (0, "expr"), 7),
|
||||
"async_with_as_stmt": (
|
||||
"%|async with %c as %c:\n%+%c%-",
|
||||
(0, "expr"),
|
||||
(6, "store"),
|
||||
7,
|
||||
),
|
||||
"async_forelse_stmt": (
|
||||
"%|async for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n",
|
||||
(7, "store"),
|
||||
(1, "expr"),
|
||||
(17, "for_block"),
|
||||
(25, "else_suite"),
|
||||
),
|
||||
"attribute37": ("%c.%[1]{pattr}", 0),
|
||||
"attributes37": ("%[0]{pattr} import %c",
|
||||
(0, "IMPORT_NAME_ATTR"),
|
||||
(1, "IMPORT_FROM")),
|
||||
"await_expr": ("await %c", 0),
|
||||
"await_stmt": ("%|%c\n", 0),
|
||||
"call_ex": ("%c(%p)", (0, "expr"), (1, 100)),
|
||||
"compare_chained1a_37": (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19),
|
||||
(-4, 19),
|
||||
),
|
||||
"compare_chained1_false_37": (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19),
|
||||
(-4, 19),
|
||||
),
|
||||
"compare_chained2_false_37": (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19),
|
||||
(-5, 19),
|
||||
),
|
||||
"compare_chained1b_false_37": (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19),
|
||||
(-4, 19),
|
||||
),
|
||||
"compare_chained1c_37": (
|
||||
' %[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19),
|
||||
(-2, 19),
|
||||
),
|
||||
"compare_chained2a_37": ('%[1]{pattr.replace("-", " ")} %p', (0, 19)),
|
||||
"compare_chained2b_false_37": ('%[1]{pattr.replace("-", " ")} %p', (0, 19)),
|
||||
"compare_chained2a_false_37": ('%[1]{pattr.replace("-", " ")} %p', (0, 19)),
|
||||
"compare_chained2c_37": (
|
||||
'%[3]{pattr.replace("-", " ")} %p %p',
|
||||
(0, 19),
|
||||
(6, 19),
|
||||
),
|
||||
"except_return": ("%|except:\n%+%c%-", 3),
|
||||
"if_exp_37a": (
|
||||
"%p if %p else %p",
|
||||
(1, "expr", 27),
|
||||
(0, 27),
|
||||
(4, "expr", 27),
|
||||
),
|
||||
"if_exp_37b": (
|
||||
"%p if %p else %p",
|
||||
(2, "expr", 27),
|
||||
(0, "expr", 27),
|
||||
(5, "expr", 27),
|
||||
),
|
||||
"ifstmtl": ("%|if %c:\n%+%c%-", (0, "testexpr"), (1, "_ifstmts_jumpl")),
|
||||
'import_as37': ( '%|import %c as %c\n', 2, -2),
|
||||
'import_from37': ( '%|from %[2]{pattr} import %c\n',
|
||||
(3, 'importlist37') ),
|
||||
|
||||
})
|
||||
"importattr37": ("%c", (0, "IMPORT_NAME_ATTR")),
|
||||
"importlist37": ("%C", (0, maxint, ", ")),
|
||||
"list_if37": (" if %p%c", (0, 27), 1),
|
||||
"list_if37_not": (" if not %p%c", (0, 27), 1),
|
||||
"testfalse_not_or": ("not %c or %c", (0, "expr"), (2, "expr")),
|
||||
"testfalse_not_and": ("not (%c)", 0),
|
||||
"try_except36": ("%|try:\n%+%c%-%c\n\n", 1, -2),
|
||||
"tryfinally36": ("%|try:\n%+%c%-%|finally:\n%+%c%-\n\n", (1, "returns"), 3),
|
||||
"unmap_dict": ("{**%C}", (0, -1, ", **")),
|
||||
"unpack_list": ("*%c", (0, "list")),
|
||||
"yield_from": ("yield from %c", (0, "expr")),
|
||||
}
|
||||
)
|
||||
|
||||
def n_importlist37(node):
|
||||
if len(node) == 1:
|
||||
self.default(node)
|
||||
return
|
||||
n = len(node) - 1
|
||||
for i in range(n, -1, -1):
|
||||
if node[i] != "ROT_TWO":
|
||||
break
|
||||
self.template_engine(("%C", (0, i + 1, ', ')), node)
|
||||
self.prune()
|
||||
return
|
||||
|
||||
self.n_importlist37 = n_importlist37
|
||||
|
@@ -104,9 +104,17 @@ def customize_for_version38(self, version):
|
||||
'try_except_ret38': (
|
||||
'%|try:\n%+%|return %c%-\n%|except:\n%+%|%c%-\n\n',
|
||||
(1, 'expr'), (-1, 'except_ret38a') ),
|
||||
'tryfinally38': (
|
||||
'tryfinally38rstmt': (
|
||||
'%|try:\n%+%c%-%|finally:\n%+%c%-\n\n',
|
||||
(3, 'returns'), 6 ),
|
||||
'tryfinally38stmt': (
|
||||
'%|try:\n%+%c%-%|finally:\n%+%c%-\n\n',
|
||||
(1, "suite_stmts_opt"),
|
||||
(6, "suite_stmts_opt") ),
|
||||
'tryfinally38astmt': (
|
||||
'%|try:\n%+%c%-%|finally:\n%+%c%-\n\n',
|
||||
(2, "suite_stmts_opt"),
|
||||
(8, "suite_stmts_opt") ),
|
||||
"named_expr": ( # AKA "walrus operator"
|
||||
"%c := %c", (2, "store"), (0, "expr")
|
||||
)
|
||||
|
@@ -410,7 +410,7 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
def n_expr(self, node):
|
||||
start = len(self.f.getvalue())
|
||||
p = self.prec
|
||||
if node[0].kind.startswith("binary_expr"):
|
||||
if node[0].kind.startswith("bin_op"):
|
||||
n = node[0][-1][0]
|
||||
else:
|
||||
n = node[0]
|
||||
@@ -444,13 +444,14 @@ class FragmentsWalker(pysource.SourceWalker, object):
|
||||
super(FragmentsWalker, self).n_ret_expr(node)
|
||||
self.set_pos_info(node, start, len(self.f.getvalue()))
|
||||
|
||||
def n_binary_expr(self, node):
|
||||
def n_bin_op(self, node):
|
||||
"""bin_op (formerly "binary_expr") is the Python AST BinOp"""
|
||||
start = len(self.f.getvalue())
|
||||
for n in node:
|
||||
n.parent = node
|
||||
self.last_finish = len(self.f.getvalue())
|
||||
try:
|
||||
super(FragmentsWalker, self).n_binary_expr(node)
|
||||
super(FragmentsWalker, self).n_bin_op(node)
|
||||
except GenericASTTraversalPruningException:
|
||||
pass
|
||||
self.set_pos_info(node, start, len(self.f.getvalue()))
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import sys
|
||||
|
||||
from xdis.code import iscode
|
||||
from uncompyle6.parsers.treenode import SyntaxTree
|
||||
|
||||
from uncompyle6 import PYTHON3
|
||||
@@ -16,6 +17,23 @@ read_global_ops = frozenset(('STORE_GLOBAL', 'DELETE_GLOBAL'))
|
||||
# NOTE: we also need to check that the variable name is a free variable, not a cell variable.
|
||||
nonglobal_ops = frozenset(('STORE_DEREF', 'DELETE_DEREF'))
|
||||
|
||||
def escape_string(s, quotes=('"', "'", '"""', "'''")):
|
||||
quote = None
|
||||
for q in quotes:
|
||||
if s.find(q) == -1:
|
||||
quote = q
|
||||
break
|
||||
pass
|
||||
if quote is None:
|
||||
quote = '"""'
|
||||
s = s.replace('"""', '\\"""')
|
||||
|
||||
for (orig, replace) in (('\t', '\\t'),
|
||||
('\n', '\\n'),
|
||||
('\r', '\\r')):
|
||||
s = s.replace(orig, replace)
|
||||
return "%s%s%s" % (quote, s, quote)
|
||||
|
||||
# FIXME: this and find_globals could be paramaterized with one of the
|
||||
# above global ops
|
||||
def find_all_globals(node, globs):
|
||||
@@ -27,6 +45,30 @@ def find_all_globals(node, globs):
|
||||
globs.add(n.pattr)
|
||||
return globs
|
||||
|
||||
# def find_globals(node, globs, global_ops=mkfunc_globals):
|
||||
# """Find globals in this statement."""
|
||||
# for n in node:
|
||||
# # print("XXX", n.kind, global_ops)
|
||||
# if isinstance(n, SyntaxTree):
|
||||
# # FIXME: do I need a caser for n.kind="mkfunc"?
|
||||
# if n.kind in ("if_expr_lambda", "return_lambda"):
|
||||
# globs = find_globals(n, globs, mklambda_globals)
|
||||
# else:
|
||||
# globs = find_globals(n, globs, global_ops)
|
||||
# elif n.kind in frozenset(global_ops):
|
||||
# globs.add(n.pattr)
|
||||
# return globs
|
||||
|
||||
def find_code_node(node, start):
|
||||
for i in range(-start, len(node) + 1):
|
||||
if node[-i].kind == "LOAD_CODE":
|
||||
code_node = node[-i]
|
||||
assert iscode(code_node.attr)
|
||||
return code_node
|
||||
pass
|
||||
assert False, "did not find code node starting at %d in %s" % (start, node)
|
||||
|
||||
|
||||
def find_globals_and_nonlocals(node, globs, nonlocals, code, version):
|
||||
"""search a node of parse tree to find variable names that need a
|
||||
either 'global' or 'nonlocal' statements added."""
|
||||
@@ -44,20 +86,6 @@ def find_globals_and_nonlocals(node, globs, nonlocals, code, version):
|
||||
nonlocals.add(n.pattr)
|
||||
return globs, nonlocals
|
||||
|
||||
# def find_globals(node, globs, global_ops=mkfunc_globals):
|
||||
# """Find globals in this statement."""
|
||||
# for n in node:
|
||||
# # print("XXX", n.kind, global_ops)
|
||||
# if isinstance(n, SyntaxTree):
|
||||
# # FIXME: do I need a caser for n.kind="mkfunc"?
|
||||
# if n.kind in ("if_expr_lambda", "return_lambda"):
|
||||
# globs = find_globals(n, globs, mklambda_globals)
|
||||
# else:
|
||||
# globs = find_globals(n, globs, global_ops)
|
||||
# elif n.kind in frozenset(global_ops):
|
||||
# globs.add(n.pattr)
|
||||
# return globs
|
||||
|
||||
def find_none(node):
|
||||
for n in node:
|
||||
if isinstance(n, SyntaxTree):
|
||||
@@ -68,35 +96,47 @@ def find_none(node):
|
||||
return True
|
||||
return False
|
||||
|
||||
def escape_string(str, quotes=('"', "'", '"""', "'''")):
|
||||
quote = None
|
||||
for q in quotes:
|
||||
if str.find(q) == -1:
|
||||
quote = q
|
||||
break
|
||||
def flatten_list(node):
|
||||
"""
|
||||
List of expressions may be nested in groups of 32 and 1024
|
||||
items. flatten that out and return the list
|
||||
"""
|
||||
flat_elems = []
|
||||
for elem in node:
|
||||
if elem == 'expr1024':
|
||||
for subelem in elem:
|
||||
assert subelem == 'expr32'
|
||||
for subsubelem in subelem:
|
||||
flat_elems.append(subsubelem)
|
||||
elif elem == 'expr32':
|
||||
for subelem in elem:
|
||||
assert subelem == 'expr'
|
||||
flat_elems.append(subelem)
|
||||
else:
|
||||
flat_elems.append(elem)
|
||||
pass
|
||||
pass
|
||||
if quote is None:
|
||||
quote = '"""'
|
||||
str = str.replace('"""', '\\"""')
|
||||
return flat_elems
|
||||
|
||||
for (orig, replace) in (('\t', '\\t'),
|
||||
('\n', '\\n'),
|
||||
('\r', '\\r')):
|
||||
str = str.replace(orig, replace)
|
||||
return "%s%s%s" % (quote, str, quote)
|
||||
# Note: this is only used in Python > 3.0
|
||||
# Should move this somewhere more specific?
|
||||
def gen_function_parens_adjust(mapping_key, node):
|
||||
"""If we can avoid the outer parenthesis
|
||||
of a generator function, set the node key to
|
||||
'call_generator' and the caller will do the default
|
||||
action on that. Otherwise we do nothing.
|
||||
"""
|
||||
if mapping_key.kind != 'CALL_FUNCTION_1':
|
||||
return
|
||||
|
||||
def strip_quotes(str):
|
||||
if str.startswith("'''") and str.endswith("'''"):
|
||||
str = str[3:-3]
|
||||
elif str.startswith('"""') and str.endswith('"""'):
|
||||
str = str[3:-3]
|
||||
elif str.startswith("'") and str.endswith("'"):
|
||||
str = str[1:-1]
|
||||
elif str.startswith('"') and str.endswith('"'):
|
||||
str = str[1:-1]
|
||||
args_node = node[-2]
|
||||
if args_node == 'pos_arg':
|
||||
assert args_node[0] == 'expr'
|
||||
n = args_node[0][0]
|
||||
if n == 'generator_exp':
|
||||
node.kind = 'call_generator'
|
||||
pass
|
||||
return str
|
||||
|
||||
return
|
||||
|
||||
def print_docstring(self, indent, docstring):
|
||||
quote = '"""'
|
||||
@@ -173,48 +213,18 @@ def print_docstring(self, indent, docstring):
|
||||
self.println(lines[-1], quote)
|
||||
return True
|
||||
|
||||
|
||||
def flatten_list(node):
|
||||
"""
|
||||
List of expressions may be nested in groups of 32 and 1024
|
||||
items. flatten that out and return the list
|
||||
"""
|
||||
flat_elems = []
|
||||
for elem in node:
|
||||
if elem == 'expr1024':
|
||||
for subelem in elem:
|
||||
assert subelem == 'expr32'
|
||||
for subsubelem in subelem:
|
||||
flat_elems.append(subsubelem)
|
||||
elif elem == 'expr32':
|
||||
for subelem in elem:
|
||||
assert subelem == 'expr'
|
||||
flat_elems.append(subelem)
|
||||
else:
|
||||
flat_elems.append(elem)
|
||||
pass
|
||||
def strip_quotes(s):
|
||||
if s.startswith("'''") and s.endswith("'''"):
|
||||
s = s[3:-3]
|
||||
elif s.startswith('"""') and s.endswith('"""'):
|
||||
s = s[3:-3]
|
||||
elif s.startswith("'") and s.endswith("'"):
|
||||
s = s[1:-1]
|
||||
elif s.startswith('"') and s.endswith('"'):
|
||||
s = s[1:-1]
|
||||
pass
|
||||
return flat_elems
|
||||
return s
|
||||
|
||||
# Note: this is only used in Python > 3.0
|
||||
# Should move this somewhere more specific?
|
||||
def gen_function_parens_adjust(mapping_key, node):
|
||||
"""If we can avoid the outer parenthesis
|
||||
of a generator function, set the node key to
|
||||
'call_generator' and the caller will do the default
|
||||
action on that. Otherwise we do nothing.
|
||||
"""
|
||||
if mapping_key.kind != 'CALL_FUNCTION_1':
|
||||
return
|
||||
|
||||
args_node = node[-2]
|
||||
if args_node == 'pos_arg':
|
||||
assert args_node[0] == 'expr'
|
||||
n = args_node[0][0]
|
||||
if n == 'generator_exp':
|
||||
node.kind = 'call_generator'
|
||||
pass
|
||||
return
|
||||
|
||||
|
||||
# if __name__ == '__main__':
|
||||
|
207
uncompyle6/semantics/make_function2.py
Normal file
207
uncompyle6/semantics/make_function2.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# Copyright (c) 2015-2019 by Rocky Bernstein
|
||||
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
All the crazy things we have to do to handle Python functions in Python before 3.0.
|
||||
The saga of changes continues in 3.0 and above and in other files.
|
||||
"""
|
||||
from xdis.code import iscode, code_has_star_arg, code_has_star_star_arg
|
||||
from uncompyle6.scanner import Code
|
||||
from uncompyle6.parsers.treenode import SyntaxTree
|
||||
from uncompyle6 import PYTHON3
|
||||
from uncompyle6.semantics.parser_error import ParserError
|
||||
from uncompyle6.parser import ParserError as ParserError2
|
||||
from uncompyle6.semantics.helper import (
|
||||
print_docstring,
|
||||
find_all_globals,
|
||||
find_globals_and_nonlocals,
|
||||
find_none,
|
||||
)
|
||||
|
||||
if PYTHON3:
|
||||
from itertools import zip_longest
|
||||
else:
|
||||
from itertools import izip_longest as zip_longest
|
||||
|
||||
from uncompyle6.show import maybe_show_tree_param_default
|
||||
|
||||
def make_function2(self, node, is_lambda, nested=1, code_node=None):
|
||||
"""
|
||||
Dump function defintion, doc string, and function body.
|
||||
This code is specialied for Python 2.
|
||||
"""
|
||||
|
||||
def build_param(ast, name, default):
|
||||
"""build parameters:
|
||||
- handle defaults
|
||||
- handle format tuple parameters
|
||||
"""
|
||||
# if formal parameter is a tuple, the paramater name
|
||||
# starts with a dot (eg. '.1', '.2')
|
||||
if name.startswith("."):
|
||||
# replace the name with the tuple-string
|
||||
name = self.get_tuple_parameter(ast, name)
|
||||
pass
|
||||
|
||||
if default:
|
||||
value = self.traverse(default, indent="")
|
||||
maybe_show_tree_param_default(self.showast, name, value)
|
||||
result = "%s=%s" % (name, value)
|
||||
if result[-2:] == "= ": # default was 'LOAD_CONST None'
|
||||
result += "None"
|
||||
return result
|
||||
else:
|
||||
return name
|
||||
|
||||
# MAKE_FUNCTION_... or MAKE_CLOSURE_...
|
||||
assert node[-1].kind.startswith("MAKE_")
|
||||
|
||||
args_node = node[-1]
|
||||
if isinstance(args_node.attr, tuple):
|
||||
# positional args are after kwargs
|
||||
defparams = node[1 : args_node.attr[0] + 1]
|
||||
pos_args, kw_args, annotate_argc = args_node.attr
|
||||
else:
|
||||
defparams = node[: args_node.attr]
|
||||
kw_args = 0
|
||||
pass
|
||||
|
||||
lambda_index = None
|
||||
|
||||
if lambda_index and is_lambda and iscode(node[lambda_index].attr):
|
||||
assert node[lambda_index].kind == "LOAD_LAMBDA"
|
||||
code = node[lambda_index].attr
|
||||
else:
|
||||
code = code_node.attr
|
||||
|
||||
assert iscode(code)
|
||||
code = Code(code, self.scanner, self.currentclass)
|
||||
|
||||
# add defaults values to parameter names
|
||||
argc = code.co_argcount
|
||||
paramnames = list(code.co_varnames[:argc])
|
||||
|
||||
# defaults are for last n parameters, thus reverse
|
||||
paramnames.reverse()
|
||||
defparams.reverse()
|
||||
|
||||
try:
|
||||
ast = self.build_ast(
|
||||
code._tokens,
|
||||
code._customize,
|
||||
is_lambda=is_lambda,
|
||||
noneInNames=("None" in code.co_names),
|
||||
)
|
||||
except (ParserError, ParserError2) as p:
|
||||
self.write(str(p))
|
||||
if not self.tolerate_errors:
|
||||
self.ERROR = p
|
||||
return
|
||||
|
||||
kw_pairs = 0
|
||||
indent = self.indent
|
||||
|
||||
# build parameters
|
||||
params = [
|
||||
build_param(ast, name, default)
|
||||
for name, default in zip_longest(paramnames, defparams, fillvalue=None)
|
||||
]
|
||||
params.reverse() # back to correct order
|
||||
|
||||
if code_has_star_arg(code):
|
||||
params.append("*%s" % code.co_varnames[argc])
|
||||
argc += 1
|
||||
|
||||
# dump parameter list (with default values)
|
||||
if is_lambda:
|
||||
self.write("lambda ", ", ".join(params))
|
||||
# If the last statement is None (which is the
|
||||
# same thing as "return None" in a lambda) and the
|
||||
# next to last statement is a "yield". Then we want to
|
||||
# drop the (return) None since that was just put there
|
||||
# to have something to after the yield finishes.
|
||||
# FIXME: this is a bit hoaky and not general
|
||||
if (
|
||||
len(ast) > 1
|
||||
and self.traverse(ast[-1]) == "None"
|
||||
and self.traverse(ast[-2]).strip().startswith("yield")
|
||||
):
|
||||
del ast[-1]
|
||||
# Now pick out the expr part of the last statement
|
||||
ast_expr = ast[-1]
|
||||
while ast_expr.kind != "expr":
|
||||
ast_expr = ast_expr[0]
|
||||
ast[-1] = ast_expr
|
||||
pass
|
||||
else:
|
||||
self.write("(", ", ".join(params))
|
||||
|
||||
if kw_args > 0:
|
||||
if not (4 & code.co_flags):
|
||||
if argc > 0:
|
||||
self.write(", *, ")
|
||||
else:
|
||||
self.write("*, ")
|
||||
pass
|
||||
else:
|
||||
self.write(", ")
|
||||
|
||||
for n in node:
|
||||
if n == "pos_arg":
|
||||
continue
|
||||
else:
|
||||
self.preorder(n)
|
||||
break
|
||||
pass
|
||||
|
||||
if code_has_star_star_arg(code):
|
||||
if argc > 0:
|
||||
self.write(", ")
|
||||
self.write("**%s" % code.co_varnames[argc + kw_pairs])
|
||||
|
||||
if is_lambda:
|
||||
self.write(": ")
|
||||
else:
|
||||
self.println("):")
|
||||
|
||||
if (
|
||||
len(code.co_consts) > 0 and code.co_consts[0] is not None and not is_lambda
|
||||
): # ugly
|
||||
# docstring exists, dump it
|
||||
print_docstring(self, indent, code.co_consts[0])
|
||||
|
||||
code._tokens = None # save memory
|
||||
if not is_lambda:
|
||||
assert ast == "stmts"
|
||||
|
||||
all_globals = find_all_globals(ast, set())
|
||||
|
||||
globals, nonlocals = find_globals_and_nonlocals(
|
||||
ast, set(), set(), code, self.version
|
||||
)
|
||||
|
||||
# Python 2 doesn't support the "nonlocal" statement
|
||||
assert self.version >= 3.0 or not nonlocals
|
||||
|
||||
for g in sorted((all_globals & self.mod_globs) | globals):
|
||||
self.println(self.indent, "global ", g)
|
||||
self.mod_globs -= all_globals
|
||||
has_none = "None" in code.co_names
|
||||
rn = has_none and not find_none(ast)
|
||||
self.gen_source(
|
||||
ast, code.co_name, code._customize, is_lambda=is_lambda, returnNone=rn
|
||||
)
|
||||
code._tokens = None
|
||||
code._customize = None # save memory
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user