You've already forked python-uncompyle6
mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2025-08-03 08:49:51 +08:00
Compare commits
187 Commits
release-py
...
release-py
Author | SHA1 | Date | |
---|---|---|---|
|
e56088b566 | ||
|
40d2ef3071 | ||
|
e39a902e56 | ||
|
e2914ed552 | ||
|
5afa14a945 | ||
|
4f5ad533c3 | ||
|
f425db33b7 | ||
|
7f7487206a | ||
|
82d8e0cd47 | ||
|
1c21e1c9d2 | ||
|
68c5b2338f | ||
|
e55a0410c9 | ||
|
0fe8961418 | ||
|
8cd331a32b | ||
|
4c76931807 | ||
|
7b7f794913 | ||
|
50e46531ce | ||
|
cd2072b8e3 | ||
|
67ef34977f | ||
|
32c7b8f23d | ||
|
2f06d1eeb0 | ||
|
999f1fb0f9 | ||
|
76eef9a149 | ||
|
c8b945fb56 | ||
|
a1e7c16dbe | ||
|
35f14e4357 | ||
|
49d1a50354 | ||
|
0dc19a8fdd | ||
|
f6aa8b2baf | ||
|
887a006849 | ||
|
e26c7407a0 | ||
|
18bb1bc9e3 | ||
|
c0e8ce22af | ||
|
69823af553 | ||
|
e96498eaf0 | ||
|
9d6d6a355d | ||
|
04c53c1086 | ||
|
96866f94a7 | ||
|
d371839c99 | ||
|
24afe072b7 | ||
|
e2d7f01298 | ||
|
72a95e7cce | ||
|
b39112b601 | ||
|
3983aa1b92 | ||
|
8d85e78960 | ||
|
20b513fc81 | ||
|
d369017122 | ||
|
d3eca29934 | ||
|
6675ea2cd0 | ||
|
4b82806d6c | ||
|
f3b72884c6 | ||
|
504164fcea | ||
|
3c06b82931 | ||
|
c680416f92 | ||
|
58c8fe5a66 | ||
|
aea1adeb85 | ||
|
c871a4ecc5 | ||
|
cd9eca7bff | ||
|
002720988c | ||
|
08f23567a6 | ||
|
43348d7d24 | ||
|
164e9d4b5c | ||
|
37e4754268 | ||
|
c3257a9b79 | ||
|
70b0704967 | ||
|
76dcaf9bf0 | ||
|
21fd506fbb | ||
|
efe0914814 | ||
|
5981c7eae9 | ||
|
36ef1607af | ||
|
b2d97f9847 | ||
|
24ba5d7f40 | ||
|
eae3f0d77b | ||
|
a54fba7993 | ||
|
719d2d7232 | ||
|
e82cabc278 | ||
|
9ab086b207 | ||
|
4022e80d6d | ||
|
9811c5bc42 | ||
|
354796fffd | ||
|
ab696b316a | ||
|
2f99da8199 | ||
|
fd5f4fa5b8 | ||
|
8e4168674d | ||
|
c8fc6a704c | ||
|
622d6f849c | ||
|
aa21fe0b31 | ||
|
2995acb8d9 | ||
|
10d8aed4c0 | ||
|
86fd5dbf7a | ||
|
9fe1752359 | ||
|
48ae7a6964 | ||
|
117b4ff4f1 | ||
|
e9002038f8 | ||
|
9d47b99932 | ||
|
59b012df6f | ||
|
44d7cbcf6f | ||
|
9bae73679f | ||
|
ceebe9ab60 | ||
|
b7e22b4530 | ||
|
c7b20edba0 | ||
|
64e35b09db | ||
|
3436a3a256 | ||
|
a0d4daf5ff | ||
|
afa6a00db8 | ||
|
d634c5c17a | ||
|
d8f0d31475 | ||
|
dd76a6f253 | ||
|
cb40caa73c | ||
|
fd59879510 | ||
|
c9cae2d09e | ||
|
af209dc142 | ||
|
f9fd63d5f5 | ||
|
ad419e0ed9 | ||
|
ee5c7da790 | ||
|
39c12704a8 | ||
|
3b3fc09b60 | ||
|
f7697ccd7b | ||
|
e364499bb9 | ||
|
9db59f1b80 | ||
|
a5cdb50154 | ||
|
792ef5b5b8 | ||
|
123be56e5d | ||
|
7f46d8bb2a | ||
|
47ed0795b2 | ||
|
cccf33573b | ||
|
3c3e5c82fc | ||
|
436260dc9a | ||
|
8f0674706b | ||
|
01cc184716 | ||
|
2771cb46ab | ||
|
9ed4326f7e | ||
|
e3b10b62d7 | ||
|
59b8f18486 | ||
|
bcf6939312 | ||
|
3b7f49c01d | ||
|
ae976e991a | ||
|
60d96b6a5a | ||
|
8fe6309650 | ||
|
4c4aa393df | ||
|
a8b8c2908c | ||
|
cb406e2581 | ||
|
20b16c44ff | ||
|
3abe8d11d3 | ||
|
26140934da | ||
|
b62752eca1 | ||
|
9db446d928 | ||
|
46acb74745 | ||
|
44e1288e2f | ||
|
ce9270dda0 | ||
|
3d732db3cc | ||
|
009a74da7d | ||
|
251eb6da1b | ||
|
8b5e0f49f8 | ||
|
1cc08d9598 | ||
|
d99e78d46d | ||
|
b94cce7b12 | ||
|
fe786b2b95 | ||
|
bf56fbeeec | ||
|
6d8d9fd83b | ||
|
78ca6a0c1f | ||
|
86dd321256 | ||
|
4db364f701 | ||
|
c03b039714 | ||
|
d97509495e | ||
|
4d793ba1b2 | ||
|
590d2f44f1 | ||
|
e875b79a75 | ||
|
b57ca392a2 | ||
|
a132e2ace6 | ||
|
f9bb0b0a46 | ||
|
b05500dd49 | ||
|
325bba5be5 | ||
|
65307f257c | ||
|
715bf9cbab | ||
|
8909fe8d37 | ||
|
733a44e22f | ||
|
8187fdf4a6 | ||
|
f2f17740ee | ||
|
393e5c9303 | ||
|
8c611476fe | ||
|
6df65a87bc | ||
|
bb94c7f5bc | ||
|
8e9ce0be31 | ||
|
bc49469704 | ||
|
5905cce1de | ||
|
af816c9e60 |
@@ -115,7 +115,7 @@ mechanisms and addressed problems and extensions by some other means.
|
||||
Specifically, in `uncompyle`, decompilation of python bytecode 2.5 &
|
||||
2.6 is done by transforming the byte code into a pseudo-2.7 Python
|
||||
bytecode and is based on code from Eloi Vanderbeken. A bit of this
|
||||
could have bene easily added by modifying grammar rules.
|
||||
could have been easily added by modifying grammar rules.
|
||||
|
||||
This project, `uncompyle6`, abandons that approach for various
|
||||
reasons. Having a grammar per Python version is much cleaner and it
|
||||
|
82
NEWS.md
82
NEWS.md
@@ -1,3 +1,60 @@
|
||||
3.3.5 2019-07-03 Pre Independence Day
|
||||
=====================================
|
||||
|
||||
Again, most of the work in this is release is thanks to x0ret.
|
||||
|
||||
- Handle annotation args in Python 3.x
|
||||
- Fix vararg and function signatures in 3.x
|
||||
- Some 3.x < 3.6 while(1)/if fixes - others remain
|
||||
- Start reinstating else if -> elif
|
||||
- LOAD_CONST -> LOAD_CODE where appropriate
|
||||
- option `weak-verify` is now `syntax-verify`
|
||||
- code cleanups, start using "blacken" to reformat text
|
||||
|
||||
|
||||
3.3.4 2019-06-19 Fleetwood at 65
|
||||
================================
|
||||
|
||||
Most of the work in this is release is thanks to x0ret.
|
||||
|
||||
- Major work was done by x0ret to correct function signatures and include annotation types
|
||||
- Handle Python 3.6 STORE_ANNOTATION [#58](https://github.com/rocky/python-uncompyle6/issues/58)
|
||||
- Friendlier assembly output
|
||||
- `LOAD_CONST` replaced by `LOAD_STR` where appropriate to simplify parsing and improve clarity
|
||||
- remove unneeded parenthesis in a generator expression when it is the single argument to the function [#247](https://github.com/rocky/python-uncompyle6/issues/246)
|
||||
- Bug in noting an async function [#246](https://github.com/rocky/python-uncompyle6/issues/246)
|
||||
- Handle unicode docstrings and fix docstring bugs [#241](https://github.com/rocky/python-uncompyle6/issues/241)
|
||||
- Add short option -T as an alternate for --tree+
|
||||
- Some grammar cleanup
|
||||
|
||||
3.3.3 2019-05-19 Henry and Lewis
|
||||
================================
|
||||
|
||||
As before, decomplation bugs fixed. The focus has primarily been on
|
||||
Python 3.7. But with this release, releases will be put on hold,as a
|
||||
better control-flow detection is worked on . This has been needed for a
|
||||
while, and is long overdue. It will probably also take a while to get
|
||||
done as good as what we have now.
|
||||
|
||||
However this work will be done in a new project
|
||||
[decompyle3](https://github.com/rocky/python-decompile3). In contrast
|
||||
to _uncompyle6_ the code will be written assuming a modern Python 3,
|
||||
e.g. 3.7. It is originally intended to decompile Python version 3.7
|
||||
and greater.
|
||||
|
||||
* A number of Python 3.7+ chained comparisons were fixed
|
||||
* Revise Python 3.6ish format string handling
|
||||
* Go over operator precedence, e.g. for AST `IfExp`
|
||||
|
||||
Reported Bug Fixes
|
||||
------------------
|
||||
|
||||
* [#239: 3.7 handling of 4-level attribute import](https://github.com/rocky/python-uncompyle6/issues/239),
|
||||
* [#229: Inconsistent if block in python3.6](https://github.com/rocky/python-uncompyle6/issues/229),
|
||||
* [#227: Args not appearing in decompiled src when kwargs is specified explicitly (call_ex_kw)](https://github.com/rocky/python-uncompyle6/issues/227)
|
||||
2.7 confusion around "and" versus comprehension "if"
|
||||
* [#225: 2.7 confusion around "and" vs comprehension "if"](https://github.com/rocky/python-uncompyle6/issues/225)
|
||||
|
||||
3.3.2 2019-05-03 Better Friday
|
||||
==============================
|
||||
|
||||
@@ -12,7 +69,6 @@ get addressed in future releases
|
||||
|
||||
Pypy 3.6 support was started. Pypy 3.x detection fixed (via xdis)
|
||||
|
||||
|
||||
3.3.1 2019-04-19 Good Friday
|
||||
==========================
|
||||
|
||||
@@ -20,14 +76,14 @@ Lots of decomplation bugs, especially in the 3.x series fixed. Don't worry thoug
|
||||
|
||||
* Add annotation return values in 3.6+
|
||||
* Fix 3.6+ lambda parameter handling decompilation
|
||||
* Fix 3.7+ chained comparision decompilation
|
||||
* Fix 3.7+ chained comparison decompilation
|
||||
* split out semantic-action customization into more separate files
|
||||
* Add 3.8 try/else
|
||||
* Fix 2.7 generator decompilation
|
||||
* Fix some parser failures fixes in 3.4+ using test_pyenvlib
|
||||
* Add more run tests
|
||||
|
||||
3.3.0 2019-43-14 Holy Week
|
||||
3.3.0 2019-04-14 Holy Week
|
||||
==========================
|
||||
|
||||
* First cut at Python 3.8 (many bug remain)
|
||||
@@ -41,23 +97,25 @@ Mostly more of the same: bug fixes and pull requests.
|
||||
Bug Fixes
|
||||
-----------
|
||||
|
||||
* [#155: Python 3.x bytecode confusing "try/else" with "try" in a loop](https://github.com/rocky/python-uncompyle6/issues/155),
|
||||
* [#200: Python 3 bug in not detecting end bounds of an "if" ... "elif"](https://github.com/rocky/python-uncompyle6/issues/200),
|
||||
* [#208: Comma placement in 3.6 and 3.7 **kwargs](https://github.com/rocky/python-uncompyle6/issues/208),
|
||||
* [#209: Fix "if" return boundary in 3.6+](https://github.com/rocky/python-uncompyle6/issues/209),
|
||||
* [#221: Wrong grammar for nested ifelsestmt (in Python 3.7 at least)](https://github.com/rocky/python-uncompyle6/issues/221)
|
||||
* [#215: 2.7 can have two JUMP_BACKs at the end of a while loop](https://github.com/rocky/python-uncompyle6/issues/215)
|
||||
* [#209: Fix "if" return boundary in 3.6+](https://github.com/rocky/python-uncompyle6/issues/209),
|
||||
* [#208: Comma placement in 3.6 and 3.7 **kwargs](https://github.com/rocky/python-uncompyle6/issues/208),
|
||||
* [#200: Python 3 bug in not detecting end bounds of an "if" ... "elif"](https://github.com/rocky/python-uncompyle6/issues/200),
|
||||
* [#155: Python 3.x bytecode confusing "try/else" with "try" in a loop](https://github.com/rocky/python-uncompyle6/issues/155),
|
||||
|
||||
|
||||
Pull Requests
|
||||
----------------
|
||||
|
||||
* [#202: Better "assert" statement detemination in Python 2.7](https://github.com/rocky/python-uncompyle6/pull/211)
|
||||
* [#202: Better "assert" statement determination in Python 2.7](https://github.com/rocky/python-uncompyle6/pull/211)
|
||||
* [#204: Python 3.7 testing](https://github.com/rocky/python-uncompyle6/pull/204)
|
||||
* [#205: Run more f-string tests on Python 3.7](https://github.com/rocky/python-uncompyle6/pull/205)
|
||||
* [#211: support utf-8 chars in Python 3 sourcecode](https://github.com/rocky/python-uncompyle6/pull/202)
|
||||
|
||||
|
||||
|
||||
3.2.5 2018-12-30 Clearout sale
|
||||
3.2.5 2018-12-30 Clear-out sale
|
||||
======================================
|
||||
|
||||
- 3.7.2 Remove deprecation warning on regexp string that isn't raw
|
||||
@@ -122,14 +180,14 @@ Jesus on Friday's New York Times puzzle: "I'm stuck on 2A"
|
||||
- reduce 3.5, 3.6 control-flow bugs
|
||||
- reduce ambiguity in rules that lead to long (exponential?) parses
|
||||
- limit/isolate some 2.6/2.7,3.x grammar rules
|
||||
- more runtime testing of decompiled code
|
||||
- more removal of parenthesis around calls via setting precidence
|
||||
- more run-time testing of decompiled code
|
||||
- more removal of parenthesis around calls via setting precedence
|
||||
|
||||
3.1.0 2018-03-21 Equinox
|
||||
==============================
|
||||
|
||||
- Add code_deparse_with_offset() fragment function.
|
||||
- Correct paramenter call fragment deparse_code()
|
||||
- Correct parameter call fragment deparse_code()
|
||||
- Lots of 3.6, 3.x, and 2.7 bug fixes
|
||||
About 5% of 3.6 fail parsing now. But
|
||||
semantics still needs much to be desired.
|
||||
|
69
README.rst
69
README.rst
@@ -93,8 +93,8 @@ This uses setup.py, so it follows the standard Python routine:
|
||||
A GNU makefile is also provided so :code:`make install` (possibly as root or
|
||||
sudo) will do the steps above.
|
||||
|
||||
Testing
|
||||
-------
|
||||
Running Tests
|
||||
-------------
|
||||
|
||||
::
|
||||
|
||||
@@ -122,16 +122,32 @@ For usage help:
|
||||
|
||||
$ uncompyle6 -h
|
||||
|
||||
If you want strong verification of the correctness of the
|
||||
decompilation process, add the `--verify` option. But there are
|
||||
situations where this will indicate a failure, although the generated
|
||||
program is semantically equivalent. Using option `--weak-verify` will
|
||||
tell you if there is something definitely wrong. Generally, large
|
||||
swaths of code are decompiled correctly, if not the entire program.
|
||||
Verification
|
||||
------------
|
||||
|
||||
You can also cross compare the results with pycdc_ . Since they work
|
||||
differently, bugs here often aren't in that, and vice versa.
|
||||
In older versions of Python it was possible to verify bytecode by
|
||||
decompiling bytecode, and then compiling using the Python interpreter
|
||||
for that bytecode version. Having done this the bytecode produced
|
||||
could be compared with the original bytecode. However as Python's code
|
||||
generation got better, this no longer was feasible.
|
||||
|
||||
If you want Python syntax verification of the correctness of the
|
||||
decompilation process, add the `--syntax-verify` option. However since
|
||||
Python syntax changes, you should use this option if the bytecode is
|
||||
the right bytecode for the Python interpreter that will be checking
|
||||
the syntax.
|
||||
|
||||
You can also cross compare the results with another python decompiler
|
||||
like pycdc_ . Since they work differently, bugs here often aren't in
|
||||
that, and vice versa.
|
||||
|
||||
There is an interesting class of these programs that is readily
|
||||
available give stronger verification: those programs that when run
|
||||
test themselves. Our test suite includes these.
|
||||
|
||||
And Python comes with another a set of programs like this: its test
|
||||
suite for the standard library. We have some code in `test/stdlib` to
|
||||
facilitate this kind of checking too.
|
||||
|
||||
Known Bugs/Restrictions
|
||||
-----------------------
|
||||
@@ -146,27 +162,6 @@ All of the Python decompilers that I have looked at have problems
|
||||
decompiling Python's control flow. In some cases we can detect an
|
||||
erroneous decompilation and report that.
|
||||
|
||||
In older versions of Python it was possible to verify bytecode by
|
||||
decompiling bytecode, and then compiling using the Python interpreter
|
||||
for that bytecode version. Having done this the bytecode produced
|
||||
could be compared with the original bytecode. However as Python's code
|
||||
generation got better, this is no longer feasible.
|
||||
|
||||
There verification that we use that doesn't check bytecode for
|
||||
equivalence but does check to see if the resulting decompiled source
|
||||
is a valid Python program by running the Python interpreter. Because
|
||||
the Python language has changed so much, for best results you should
|
||||
use the same Python version in checking as was used in creating the
|
||||
bytecode.
|
||||
|
||||
There are however an interesting class of these programs that is
|
||||
readily available give stronger verification: those programs that
|
||||
when run check some computation, or even better themselves.
|
||||
|
||||
And already Python has a set of programs like this: the test suite
|
||||
for the standard library that comes with Python. We have some
|
||||
code in `test/stdlib` to facilitate this kind of checking.
|
||||
|
||||
Python support is strongest in Python 2 for 2.7 and drops off as you
|
||||
get further away from that. Support is also probably pretty good for
|
||||
python 2.3-2.4 since a lot of the goodness of early the version of the
|
||||
@@ -194,8 +189,12 @@ Between Python 3.5, 3.6 and 3.7 there have been major changes to the
|
||||
|
||||
Currently not all Python magic numbers are supported. Specifically in
|
||||
some versions of Python, notably Python 3.6, the magic number has
|
||||
changes several times within a version. We support only the released
|
||||
magic. There are also customized Python interpreters, notably Dropbox,
|
||||
changes several times within a version.
|
||||
|
||||
**We support only released versions, not candidate versions.** Note however
|
||||
that the magic of a released version is usually the same as the *last* candidate version prior to release.
|
||||
|
||||
There are also customized Python interpreters, notably Dropbox,
|
||||
which use their own magic and encrypt bytcode. With the exception of
|
||||
the Dropbox's old Python 2.5 interpreter this kind of thing is not
|
||||
handled.
|
||||
@@ -218,7 +217,7 @@ See Also
|
||||
* https://github.com/zrax/pycdc : purports to support all versions of Python. It is written in C++ and is most accurate for Python versions around 2.7 and 3.3 when the code was more actively developed. Accuracy for more recent versions of Python 3 and early versions of Python are especially lacking. See its `issue tracker <https://github.com/zrax/pycdc/issues>`_ for details. Currently lightly maintained.
|
||||
* https://code.google.com/archive/p/unpyc3/ : supports Python 3.2 only. The above projects use a different decompiling technique than what is used here. Currently unmaintained.
|
||||
* https://github.com/figment/unpyc3/ : fork of above, but supports Python 3.3 only. Includes some fixes like supporting function annotations. Currently unmaintained.
|
||||
* https://github.com/wibiti/uncompyle2 : supports Python 2.7 only, but does that fairly well. There are situtations where `uncompyle6` results are incorrect while `uncompyle2` results are not, but more often uncompyle6 is correct when uncompyle2 is not. Because `uncompyle6` adheres to accuracy over idiomatic Python, `uncompyle2` can produce more natural-looking code when it is correct. Currently `uncompyle2` is lightly maintained. See its issue `tracker <https://github.com/wibiti/uncompyle2/issues>`_ for more details
|
||||
* https://github.com/wibiti/uncompyle2 : supports Python 2.7 only, but does that fairly well. There are situations where `uncompyle6` results are incorrect while `uncompyle2` results are not, but more often uncompyle6 is correct when uncompyle2 is not. Because `uncompyle6` adheres to accuracy over idiomatic Python, `uncompyle2` can produce more natural-looking code when it is correct. Currently `uncompyle2` is lightly maintained. See its issue `tracker <https://github.com/wibiti/uncompyle2/issues>`_ for more details
|
||||
* `How to report a bug <https://github.com/rocky/python-uncompyle6/blob/master/HOW-TO-REPORT-A-BUG.md>`_
|
||||
* The HISTORY_ file.
|
||||
* https://github.com/rocky/python-xdis : Cross Python version disassembler
|
||||
@@ -226,7 +225,7 @@ See Also
|
||||
* https://github.com/rocky/python-uncompyle6/wiki : Wiki Documents which describe the code and aspects of it in more detail
|
||||
|
||||
|
||||
.. _trepan: https://pypi.python.org/pypi/trepan2
|
||||
.. _trepan: https://pypi.python.org/pypi/trepan2g
|
||||
.. _compiler: https://pypi.python.org/pypi/spark_parser
|
||||
.. _HISTORY: https://github.com/rocky/python-uncompyle6/blob/master/HISTORY.md
|
||||
.. _debuggers: https://pypi.python.org/pypi/trepan3k
|
||||
|
@@ -58,7 +58,7 @@ entry_points = {
|
||||
]}
|
||||
ftp_url = None
|
||||
install_requires = ['spark-parser >= 1.8.7, < 1.9.0',
|
||||
'xdis >= 4.0.1, < 4.1.0']
|
||||
'xdis >= 4.0.2, < 4.1.0']
|
||||
|
||||
license = 'GPL3'
|
||||
mailing_list = 'python-debugger@googlegroups.com'
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
PYTHON_VERSION=3.6.5
|
||||
PYTHON_VERSION=3.6.8
|
||||
|
||||
# FIXME put some of the below in a common routine
|
||||
function finish {
|
||||
|
@@ -61,7 +61,7 @@ build_script:
|
||||
|
||||
test_script:
|
||||
# Run the project tests
|
||||
- "%CMD_IN_ENV% python test/test_pyenvlib.py --native --weak-verify"
|
||||
- "%CMD_IN_ENV% python test/test_pyenvlib.py --native --syntax-verify"
|
||||
|
||||
after_test:
|
||||
# If tests are successful, create binary packages for the project.
|
||||
|
@@ -1,78 +0,0 @@
|
||||
import sys
|
||||
from uncompyle6 import PYTHON3
|
||||
if PYTHON3:
|
||||
from io import StringIO
|
||||
minint = -sys.maxsize-1
|
||||
maxint = sys.maxsize
|
||||
else:
|
||||
from StringIO import StringIO
|
||||
minint = -sys.maxint-1
|
||||
maxint = sys.maxint
|
||||
from uncompyle6.semantics.helper import print_docstring
|
||||
|
||||
class PrintFake:
|
||||
def __init__(self):
|
||||
self.pending_newlines = 0
|
||||
self.f = StringIO()
|
||||
|
||||
def write(self, *data):
|
||||
if (len(data) == 0) or (len(data) == 1 and data[0] == ''):
|
||||
return
|
||||
out = ''.join((str(j) for j in data))
|
||||
n = 0
|
||||
for i in out:
|
||||
if i == '\n':
|
||||
n += 1
|
||||
if n == len(out):
|
||||
self.pending_newlines = max(self.pending_newlines, n)
|
||||
return
|
||||
elif n:
|
||||
self.pending_newlines = max(self.pending_newlines, n)
|
||||
out = out[n:]
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
if self.pending_newlines > 0:
|
||||
self.f.write('\n'*self.pending_newlines)
|
||||
self.pending_newlines = 0
|
||||
|
||||
for i in out[::-1]:
|
||||
if i == '\n':
|
||||
self.pending_newlines += 1
|
||||
else:
|
||||
break
|
||||
|
||||
if self.pending_newlines:
|
||||
out = out[:-self.pending_newlines]
|
||||
self.f.write(out)
|
||||
def println(self, *data):
|
||||
if data and not(len(data) == 1 and data[0] == ''):
|
||||
self.write(*data)
|
||||
self.pending_newlines = max(self.pending_newlines, 1)
|
||||
return
|
||||
pass
|
||||
|
||||
def test_docstring():
|
||||
|
||||
for doc, expect in (
|
||||
("Now is the time",
|
||||
' """Now is the time"""'),
|
||||
("""
|
||||
Now is the time
|
||||
""",
|
||||
''' """
|
||||
Now is the time
|
||||
"""''')
|
||||
|
||||
# (r'''func placeholder - ' and with ("""\nstring\n """)''',
|
||||
# """ r'''func placeholder - ' and with (\"\"\"\nstring\n\"\"\")'''"""),
|
||||
# (r"""func placeholder - ' and with ('''\nstring\n''') and \"\"\"\nstring\n\"\"\" """,
|
||||
# """ r\"\"\"func placeholder - ' and with ('''\nstring\n''') and \"\"\"\nstring\n\"\"\" \"\"\"""")
|
||||
):
|
||||
|
||||
o = PrintFake()
|
||||
# print(doc)
|
||||
# print(expect)
|
||||
print_docstring(o, ' ', doc)
|
||||
assert expect == o.f.getvalue()
|
@@ -9,6 +9,7 @@ def test_grammar():
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
remain_tokens = set([re.sub(r'_\d+$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('LOAD_CODE$','', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
assert remain_tokens == set([]), \
|
||||
"Remaining tokens %s\n====\n%s" % (remain_tokens, p.dump_grammar())
|
||||
@@ -88,7 +89,7 @@ def test_grammar():
|
||||
COME_FROM_EXCEPT_CLAUSE
|
||||
COME_FROM_LOOP COME_FROM_WITH
|
||||
COME_FROM_FINALLY ELSE
|
||||
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP
|
||||
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP LOAD_STR LOAD_CODE
|
||||
LAMBDA_MARKER
|
||||
RETURN_END_IF RETURN_END_IF_LAMBDA RETURN_VALUE_LAMBDA RETURN_LAST
|
||||
""".split())
|
||||
|
@@ -1,3 +1,4 @@
|
||||
from uncompyle6 import PYTHON_VERSION
|
||||
from uncompyle6.scanners.tok import Token
|
||||
|
||||
def test_token():
|
||||
@@ -16,7 +17,7 @@ def test_token():
|
||||
# Make sure formatting of: LOAD_CONST False. We assume False is the 0th index
|
||||
# of co_consts.
|
||||
t = Token('LOAD_CONST', offset=1, attr=False, pattr=False, has_arg=True)
|
||||
expect = ' 1 LOAD_CONST 0 False'
|
||||
expect = ' 1 LOAD_CONST False'
|
||||
assert t.format() == expect
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
2
pytest/testdata/if-2.7.right
vendored
2
pytest/testdata/if-2.7.right
vendored
@@ -8,5 +8,5 @@
|
||||
9 STORE_NAME 2 'b'
|
||||
12 JUMP_FORWARD 0 'to 15'
|
||||
15_0 COME_FROM 12 '12'
|
||||
15 LOAD_CONST 0 None
|
||||
15 LOAD_CONST None
|
||||
18 RETURN_VALUE
|
||||
|
6
pytest/testdata/ifelse-2.7.right
vendored
6
pytest/testdata/ifelse-2.7.right
vendored
@@ -4,12 +4,12 @@
|
||||
3 0 LOAD_NAME 0 'True'
|
||||
3 POP_JUMP_IF_FALSE 15 'to 15'
|
||||
|
||||
4 6 LOAD_CONST 0 1
|
||||
4 6 LOAD_CONST 1
|
||||
9 STORE_NAME 1 'b'
|
||||
12 JUMP_FORWARD 6 'to 21'
|
||||
|
||||
6 15 LOAD_CONST 1 2
|
||||
6 15 LOAD_CONST 2
|
||||
18 STORE_NAME 2 'd'
|
||||
21_0 COME_FROM 12 '12'
|
||||
21 LOAD_CONST 2 None
|
||||
21 LOAD_CONST None
|
||||
24 RETURN_VALUE
|
||||
|
@@ -1,25 +1,28 @@
|
||||
# future
|
||||
from __future__ import print_function
|
||||
# std
|
||||
import os
|
||||
import difflib
|
||||
import subprocess
|
||||
import tempfile
|
||||
from StringIO import StringIO
|
||||
|
||||
import functools
|
||||
# uncompyle6 / xdis
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY, deparse_code
|
||||
from uncompyle6 import PYTHON_VERSION, PYTHON3, IS_PYPY, code_deparse
|
||||
# TODO : I think we can get xdis to support the dis api (python 3 version) by doing something like this there
|
||||
from xdis.bytecode import Bytecode
|
||||
from xdis.main import get_opcode
|
||||
opc = get_opcode(PYTHON_VERSION, IS_PYPY)
|
||||
from StringIO import StringIO
|
||||
Bytecode = functools.partial(Bytecode, opc=opc)
|
||||
import six
|
||||
|
||||
if PYTHON3:
|
||||
from io import StringIO
|
||||
else:
|
||||
from StringIO import StringIO
|
||||
|
||||
def _dis_to_text(co):
|
||||
return Bytecode(co).dis()
|
||||
|
||||
try:
|
||||
import functools
|
||||
Bytecode = functools.partial(Bytecode, opc=opc)
|
||||
def _dis_to_text(co):
|
||||
return Bytecode(co).dis()
|
||||
except:
|
||||
pass
|
||||
|
||||
def print_diff(original, uncompyled):
|
||||
"""
|
||||
@@ -42,11 +45,8 @@ def print_diff(original, uncompyled):
|
||||
print('\nTo display diff highlighting run:\n pip install BeautifulSoup4')
|
||||
diff = difflib.HtmlDiff().make_table(*args)
|
||||
|
||||
f = tempfile.NamedTemporaryFile(delete=False)
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile(delete=False) as f:
|
||||
f.write(str(diff).encode('utf-8'))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
try:
|
||||
print()
|
||||
@@ -63,7 +63,8 @@ def print_diff(original, uncompyled):
|
||||
print('\nFor side by side diff install elinks')
|
||||
diff = difflib.Differ().compare(original_lines, uncompyled_lines)
|
||||
print('\n'.join(diff))
|
||||
os.unlink(f.name)
|
||||
finally:
|
||||
os.unlink(f.name)
|
||||
|
||||
|
||||
def are_instructions_equal(i1, i2):
|
||||
@@ -124,11 +125,10 @@ def validate_uncompyle(text, mode='exec'):
|
||||
original_dis = _dis_to_text(original_code)
|
||||
original_text = text
|
||||
|
||||
deparsed = deparse_code(PYTHON_VERSION, original_code,
|
||||
|
||||
compile_mode=mode,
|
||||
out=StringIO(),
|
||||
is_pypy=IS_PYPY)
|
||||
deparsed = code_deparse(original_code,
|
||||
out=six.StringIO(),
|
||||
version=PYTHON_VERSION,
|
||||
compile_mode=mode)
|
||||
uncompyled_text = deparsed.text
|
||||
uncompyled_code = compile(uncompyled_text, '<string>', 'exec')
|
||||
|
||||
|
@@ -34,47 +34,47 @@ check-2.4 check-2.5 check-2.6 check-2.7: check-bytecode-2 check-bytecode-3 check
|
||||
#: Run working tests from Python 3.0
|
||||
check-3.0: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.0-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.0 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.0 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run working tests from Python 3.1
|
||||
check-3.1: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run working tests from Python 3.2
|
||||
check-3.2: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run working tests from Python 3.3
|
||||
check-3.3: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run working tests from Python 3.4
|
||||
check-3.4: check-bytecode check-3.4-ok check-2.7-ok
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run working tests from Python 3.5
|
||||
check-3.5: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.5-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.5 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.5 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run working tests from Python 3.6
|
||||
check-3.6: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run working tests from Python 3.7
|
||||
check-3.7: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.7-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.7 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.7 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run working tests from Python 3.8
|
||||
check-3.8: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.8-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.8 --weak-verify $(COMPILE)
|
||||
# #: Run working tests from Python 3.8
|
||||
# check-3.8: check-bytecode
|
||||
# $(PYTHON) test_pythonlib.py --bytecode-3.8-run --verify-run
|
||||
# $(PYTHON) test_pythonlib.py --bytecode-3.8 --syntax-verify $(COMPILE)
|
||||
|
||||
# FIXME
|
||||
#: this is called when running under pypy3.5-5.8.0 or pypy2-5.6.0
|
||||
@@ -98,7 +98,7 @@ check-bytecode-3:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.0 \
|
||||
--bytecode-3.1 --bytecode-3.2 --bytecode-3.3 \
|
||||
--bytecode-3.4 --bytecode-3.5 --bytecode-3.6 \
|
||||
--bytecode-3.7 --bytecode-3.8 \
|
||||
--bytecode-3.7 \
|
||||
--bytecode-pypy3.2
|
||||
|
||||
#: Check deparsing on selected bytecode 3.x
|
||||
@@ -177,7 +177,7 @@ grammar-coverage-2.6:
|
||||
grammar-coverage-2.7:
|
||||
-rm $(COVER_DIR)/spark-grammar-2.7.cover || true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.7.cover $(PYTHON) test_pythonlib.py --bytecode-2.7
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.7.cover $(PYTHON) test_pyenvlib.py --2.7.14 --max=600
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.7.cover $(PYTHON) test_pyenvlib.py --2.7.16 --max=600
|
||||
|
||||
#: Get grammar coverage for Python 3.0
|
||||
grammar-coverage-3.0:
|
||||
@@ -220,65 +220,71 @@ grammar-coverage-3.5:
|
||||
grammar-coverage-3.6:
|
||||
rm $(COVER_DIR)/spark-grammar-3.6.cover || /bin/true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.6.cover $(PYTHON) test_pythonlib.py --bytecode-3.6
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.6.cover $(PYTHON) test_pyenvlib.py --3.6.4 --max=280
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.6.cover $(PYTHON) test_pyenvlib.py --3.6.8 --max=280
|
||||
|
||||
#: Get grammar coverage for Python 3.7
|
||||
grammar-coverage-3.7:
|
||||
rm $(COVER_DIR)/spark-grammar-3.7.cover || /bin/true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.7.cover $(PYTHON) test_pyenvlib.py --3.7.3 --max=500
|
||||
|
||||
#: Check deparsing Python 2.6
|
||||
check-bytecode-2.6:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.6-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.6 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.6 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 2.7
|
||||
check-bytecode-2.7:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.7-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.7 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.7 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.0
|
||||
check-bytecode-3.0:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.0-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.0 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.0 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.1
|
||||
check-bytecode-3.1:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.2
|
||||
check-bytecode-3.2:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.3
|
||||
check-bytecode-3.3:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.4
|
||||
check-bytecode-3.4:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.5
|
||||
check-bytecode-3.5:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.5-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.5 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.5 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.6
|
||||
check-bytecode-3.6:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.6 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.7
|
||||
check-bytecode-3.7:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.7 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.7-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.7 --syntax-verify
|
||||
|
||||
#: Check deparsing Python 3.8
|
||||
check-bytecode-3.8:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.8-run --verify-run
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.8 --weak-verify
|
||||
# #: Check deparsing Python 3.8
|
||||
# check-bytecode-3.8:
|
||||
# $(PYTHON) test_pythonlib.py --bytecode-3.8-run --verify-run
|
||||
# $(PYTHON) test_pythonlib.py --bytecode-3.8 --syntax-verify
|
||||
|
||||
#: short tests for bytecodes only for this version of Python
|
||||
check-native-short:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION) --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION) --syntax-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION)-run --verify-run $(COMPILE)
|
||||
|
||||
#: Run longer Python 2.6's lib files known to be okay
|
||||
@@ -287,19 +293,19 @@ check-2.4-ok:
|
||||
|
||||
#: Run longer Python 2.6's lib files known to be okay
|
||||
check-2.6-ok:
|
||||
$(PYTHON) test_pythonlib.py --ok-2.6 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --ok-2.6 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run longer Python 2.7's lib files known to be okay
|
||||
check-2.7-ok:
|
||||
$(PYTHON) test_pythonlib.py --ok-2.7 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --ok-2.7 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run longer Python 3.2's lib files known to be okay
|
||||
check-3.2-ok:
|
||||
$(PYTHON) test_pythonlib.py --ok-3.2 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --ok-3.2 --syntax-verify $(COMPILE)
|
||||
|
||||
#: Run longer Python 3.4's lib files known to be okay
|
||||
check-3.4-ok:
|
||||
$(PYTHON) test_pythonlib.py --ok-3.4 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --ok-3.4 --syntax-verify $(COMPILE)
|
||||
|
||||
#: PyPy of some sort. E.g. [PyPy 5.0.1 with GCC 4.8.4]
|
||||
# Skip for now
|
||||
|
BIN
test/bytecode_2.4_run/00_docstring.pyc
Normal file
BIN
test/bytecode_2.4_run/00_docstring.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.6_run/04_ifelse_parens.pyc
Normal file
BIN
test/bytecode_2.6_run/04_ifelse_parens.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.7_run/00_docstring.pyc
Normal file
BIN
test/bytecode_2.7_run/00_docstring.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.7_run/04_ifelse_parens.pyc
Normal file
BIN
test/bytecode_2.7_run/04_ifelse_parens.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.0_run/04_def_annotate.pyc
Normal file
BIN
test/bytecode_3.0_run/04_def_annotate.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.1_run/04_def_annotate.pyc
Normal file
BIN
test/bytecode_3.1_run/04_def_annotate.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.2_run/04_def_annotate.pyc
Normal file
BIN
test/bytecode_3.2_run/04_def_annotate.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.3/09_yield_from.pyc
Normal file
BIN
test/bytecode_3.3/09_yield_from.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.3_run/04_def_annotate.pyc
Normal file
BIN
test/bytecode_3.3_run/04_def_annotate.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.3_run/08_while_if.pyc
Normal file
BIN
test/bytecode_3.3_run/08_while_if.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.4/09_yield_from.pyc
Normal file
BIN
test/bytecode_3.4/09_yield_from.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.4_run/04_def_annotate.pyc
Normal file
BIN
test/bytecode_3.4_run/04_def_annotate.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.4_run/08_while_if.pyc
Normal file
BIN
test/bytecode_3.4_run/08_while_if.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.5/02_async.pyc
Normal file
BIN
test/bytecode_3.5/02_async.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.5_run/04_def_annotate.pyc
Normal file
BIN
test/bytecode_3.5_run/04_def_annotate.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.6/05_ann_mopdule2.pyc
Normal file
BIN
test/bytecode_3.6/05_ann_mopdule2.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6_run/00_docstring.pyc
Normal file
BIN
test/bytecode_3.6_run/00_docstring.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.6_run/02_call_ex_kw.pyc
Normal file
BIN
test/bytecode_3.6_run/02_call_ex_kw.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6_run/04_def_annotate.pyc
Normal file
BIN
test/bytecode_3.6_run/04_def_annotate.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.7/01_chained_compare.pyc
Normal file
BIN
test/bytecode_3.7/01_chained_compare.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7/02_async.pyc
Normal file
BIN
test/bytecode_3.7/02_async.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.7_run/00_docstring.pyc
Normal file
BIN
test/bytecode_3.7_run/00_docstring.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7_run/01_and_not_else.pyc
Normal file
BIN
test/bytecode_3.7_run/01_and_not_else.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.7_run/02_call_ex_kw.pyc
Normal file
BIN
test/bytecode_3.7_run/02_call_ex_kw.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.7_run/04_def_annotate.pyc
Normal file
BIN
test/bytecode_3.7_run/04_def_annotate.pyc
Normal file
Binary file not shown.
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Remake Python grammar statistics
|
||||
|
||||
typeset -A ALL_VERS=([2.4]=2.4.6 [2.5]=2.5.6 [2.6]=2.6.9 [2.7]=2.7.14 [3.2]=3.2.6 [3.3]=3.3.6 [3.4]=3.4.8 [3.5]=3.5.5 [3.6]=3.6.4)
|
||||
typeset -A ALL_VERS=([2.4]=2.4.6 [2.5]=2.5.6 [2.6]=2.6.9 [2.7]=2.7.16 [3.2]=3.2.6 [3.3]=3.3.6 [3.4]=3.4.8 [3.5]=3.5.6 [3.6]=3.6.8, [3.7]=3.7.3)
|
||||
|
||||
if (( $# == 0 )); then
|
||||
echo 1>&2 "usage: $0 two-digit-version"
|
||||
|
@@ -42,7 +42,7 @@ for VERSION in $PYVERSION ; do
|
||||
echo Python Version $(pyenv local) > $LOGFILE
|
||||
echo "" >> $LOGFILE
|
||||
typeset -i ALL_FILES_STARTTIME=$(date +%s)
|
||||
python ./test_pyenvlib.py --max ${MAX_TESTS} --weak-verify --$VERSION >>$LOGFILE 2>&1
|
||||
python ./test_pyenvlib.py --max ${MAX_TESTS} --syntax-verify --$VERSION >>$LOGFILE 2>&1
|
||||
rc=$?
|
||||
|
||||
echo Python Version $(pyenv local) >> $LOGFILE
|
||||
|
@@ -22,7 +22,7 @@ assert i[0]('a') == True
|
||||
assert i[0]('A') == False
|
||||
|
||||
# Issue #170. Bug is needing an "conditional_not_lambda" grammar rule
|
||||
# in addition the the "conditional_lambda" rule
|
||||
# in addition the the "if_expr_lambda" rule
|
||||
j = lambda a: False if not a else True
|
||||
assert j(True) == True
|
||||
assert j(False) == False
|
||||
|
@@ -2,3 +2,10 @@
|
||||
# This is RUNNABLE!
|
||||
assert [False, True, True, True, True] == [False if not a else True for a in range(5)]
|
||||
assert [True, False, False, False, False] == [False if a else True for a in range(5)]
|
||||
|
||||
# From bug #225
|
||||
m = ['hi', 'he', 'ih', 'who', 'ho']
|
||||
ms = {}
|
||||
for f in (f for f in m if f.startswith('h')):
|
||||
ms[f] = 5
|
||||
assert ms == {'hi': 5, 'he': 5, 'ho': 5}
|
||||
|
@@ -8,7 +8,7 @@ list(x for x in range(10) if x % 2 if x % 3)
|
||||
|
||||
# expresion which evaluates True unconditionally,
|
||||
# but leave dead code or junk around that we have to match on.
|
||||
# Tests "conditional_true" rule
|
||||
# Tests "if_expr_true" rule
|
||||
5 if 1 else 2
|
||||
|
||||
0 or max(5, 3) if 0 else 3
|
||||
|
9
test/simple_source/bug26/04_ifelse_parens.py
Normal file
9
test/simple_source/bug26/04_ifelse_parens.py
Normal file
@@ -0,0 +1,9 @@
|
||||
# From 3.7.3 dataclasses.py
|
||||
# Bug was handling precedence. Need parenthesis before IfExp.
|
||||
#
|
||||
# RUNNABLE!
|
||||
def _hash_add(fields):
|
||||
flds = [f for f in fields if (4 if f is None else f)]
|
||||
return flds
|
||||
|
||||
assert _hash_add([None, True, False, 3]) == [None, True, 3]
|
@@ -1,6 +1,6 @@
|
||||
# Bug found in 2.7 test_itertools.py
|
||||
# Bug was erroneously using reduction to unconditional_true
|
||||
# A proper fix would be to use unconditional_true only when we
|
||||
# Bug was erroneously using reduction to if_expr_true
|
||||
# A proper fix would be to use if_expr_true only when we
|
||||
# can determine there is or was dead code.
|
||||
from itertools import izip_longest
|
||||
for args in [['abc', range(6)]]:
|
||||
|
@@ -1,13 +1,61 @@
|
||||
# Python 3 annotations
|
||||
# Python 3 positional, kwonly, varargs, and annotations. Ick.
|
||||
|
||||
def foo(a, b: 'annotating b', c: int) -> float:
|
||||
print(a + b + c)
|
||||
# RUNNABLE!
|
||||
def test1(args_1, c: int, w=4, *varargs: int, **kwargs: 'annotating kwargs') -> tuple:
|
||||
return (args_1, c, w, kwargs)
|
||||
|
||||
def test2(args_1, args_2, c: int, w=4, *varargs: int, **kwargs: 'annotating kwargs'):
|
||||
return (args_1, args_2, c, w, varargs, kwargs)
|
||||
|
||||
def test3(c: int, w=4, *varargs: int, **kwargs: 'annotating kwargs') -> float:
|
||||
return 5.4
|
||||
|
||||
def test4(a: float, c: int, *varargs: int, **kwargs: 'annotating kwargs') -> float:
|
||||
return 5.4
|
||||
|
||||
def test5(a: float, c: int = 5, *varargs: int, **kwargs: 'annotating kwargs') -> float:
|
||||
return 5.4
|
||||
|
||||
def test6(a: float, c: int, test=None):
|
||||
return (a, c, test)
|
||||
|
||||
def test7(*varargs: int, **kwargs):
|
||||
return (varargs, kwargs)
|
||||
|
||||
def test8(x=55, *varargs: int, **kwargs) -> list:
|
||||
return (x, varargs, kwargs)
|
||||
|
||||
def test9(arg_1=55, *varargs: int, y=5, **kwargs):
|
||||
return x, varargs, int, y, kwargs
|
||||
|
||||
def test10(args_1, b: 'annotating b', c: int) -> float:
|
||||
return 5.4
|
||||
|
||||
def test11(*, name):
|
||||
return args, name
|
||||
|
||||
def test12(a, *args, name):
|
||||
return a, args
|
||||
pass
|
||||
|
||||
def test13(*args, name):
|
||||
return args, name
|
||||
|
||||
def test14(*args, name: int=1, qname):
|
||||
return args, name, qname
|
||||
|
||||
def test15(*args, name='S', fname, qname=4):
|
||||
return args, name, fname, qname
|
||||
|
||||
# From 3.4 /asyncio/streams.py open_connection
|
||||
_DEFAULT_LIMIT = 5
|
||||
def test16(host=None, port=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
return host, port, loop, limit, kwds
|
||||
|
||||
# Python 3.1 _pyio.py uses the -> "IOBase" annotation
|
||||
def open(file, mode = "r", buffering = None,
|
||||
encoding = None, errors = None,
|
||||
newline = None, closefd = True) -> "IOBase":
|
||||
return text
|
||||
def o(f, mode = "r", buffering = None) -> "IOBase":
|
||||
return (f, mode, buffering)
|
||||
|
||||
def foo1(x: 'an argument that defaults to 5' = 5):
|
||||
print(x)
|
||||
@@ -18,13 +66,87 @@ def div(a: dict(type=float, help='the dividend'),
|
||||
"""Divide a by b"""
|
||||
return a / b
|
||||
|
||||
class TestSignatureObject(unittest.TestCase):
|
||||
class TestSignatureObject1():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(*, a:float, b:str) -> int:
|
||||
def test(*, a:float, b:str, c:str = 'test', **kwargs: int) -> int:
|
||||
pass
|
||||
|
||||
class SupportsInt(_Protocol):
|
||||
class TestSignatureObject2():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(*, c='test', a:float, b:str="S", **kwargs: int) -> int:
|
||||
pass
|
||||
|
||||
class TestSignatureObject3():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(*, c='test', a:float, kwargs:str="S", **b: int) -> int:
|
||||
pass
|
||||
|
||||
class TestSignatureObject4():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(x=55, *args, c:str='test', a:float, kwargs:str="S", **b: int) -> int:
|
||||
pass
|
||||
|
||||
class TestSignatureObject5():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(x=55, *args: int, c='test', a:float, kwargs:str="S", **b: int) -> int:
|
||||
pass
|
||||
|
||||
class TestSignatureObject5():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(x:int=55, *args: (int, str), c='test', a:float, kwargs:str="S", **b: int) -> int:
|
||||
pass
|
||||
|
||||
class TestSignatureObject7():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(c='test', kwargs:str="S", **b: int) -> int:
|
||||
pass
|
||||
|
||||
class TestSignatureObject8():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(**b: int) -> int:
|
||||
pass
|
||||
|
||||
class TestSignatureObject9():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(a, **b: int) -> int:
|
||||
pass
|
||||
|
||||
class SupportsInt():
|
||||
|
||||
@abstractmethod
|
||||
def __int__(self) -> int:
|
||||
pass
|
||||
|
||||
def ann1(args_1, b: 'annotating b', c: int, *varargs: str) -> float:
|
||||
assert ann1.__annotations__['b'] == 'annotating b'
|
||||
assert ann1.__annotations__['c'] == int
|
||||
assert ann1.__annotations__['varargs'] == str
|
||||
assert ann1.__annotations__['return'] == float
|
||||
|
||||
def ann2(args_1, b: int = 5, **kwargs: float) -> float:
|
||||
assert ann2.__annotations__['b'] == int
|
||||
assert ann2.__annotations__['kwargs'] == float
|
||||
assert ann2.__annotations__['return'] == float
|
||||
assert b == 5
|
||||
|
||||
class TestSignatureObject():
|
||||
def test_signature_on_wkwonly(self):
|
||||
def test(x:int=55, *args: (int, str), c='test', a:float, kwargs:str="S", **b: int) -> int:
|
||||
pass
|
||||
|
||||
assert test1(1, 5) == (1, 5, 4, {})
|
||||
assert test1(1, 5, 6, foo='bar') == (1, 5, 6, {'foo': 'bar'})
|
||||
assert test2(2, 3, 4) == (2, 3, 4, 4, (), {})
|
||||
assert test3(10, foo='bar') == 5.4
|
||||
assert test4(9.5, 7, 6, 4, bar='baz') == 5.4
|
||||
### FIXME: fill in...
|
||||
assert test6(1.2, 3) == (1.2, 3, None)
|
||||
assert test6(2.3, 4, 5) == (2.3, 4, 5)
|
||||
|
||||
ann1(1, 'test', 5)
|
||||
ann2(1)
|
||||
|
||||
### FIXME: fill in...
|
||||
|
||||
assert test12(1, 2, 3, name='hi') == (1, (2, 3)), "a, *args, name"
|
||||
assert test13(1, 2, 3, name='hi') == ((1, 2, 3), 'hi'), "*args, name"
|
||||
assert test16('localhost', loop=2, limit=3, a='b') == ('localhost', None, 2, 3, {'a': 'b'})
|
||||
|
34
test/simple_source/bug34/08_while_if.py
Normal file
34
test/simple_source/bug34/08_while_if.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# Testing "while 1" versus "while" handling with if/elif/else's
|
||||
|
||||
def while_test(a, b, c):
|
||||
while a != 2:
|
||||
if b:
|
||||
a += 1
|
||||
elif c:
|
||||
c = 0
|
||||
else:
|
||||
break
|
||||
return a, b, c
|
||||
|
||||
|
||||
def while1_test(a, b, c):
|
||||
while 1:
|
||||
if a != 2:
|
||||
if b:
|
||||
a = 3
|
||||
b = 0
|
||||
elif c:
|
||||
c = 0
|
||||
else:
|
||||
a += b + c
|
||||
break
|
||||
return a, b, c
|
||||
|
||||
|
||||
assert while_test(2, 0, 0) == (2, 0, 0), "no while loops"
|
||||
assert while_test(0, 1, 0) == (2, 1, 0), "two while loops of b branch"
|
||||
assert while_test(0, 0, 0) == (0, 0, 0), "0 while loops, else branch"
|
||||
|
||||
# FIXME: put this in a timer, and try with a=2
|
||||
assert while1_test(4, 1, 1) == (3, 0, 0), "three while1 loops"
|
||||
assert while1_test(4, 0, 0) == (4, 0, 0), " one while1 loop"
|
17
test/simple_source/bug35/02_async.py
Normal file
17
test/simple_source/bug35/02_async.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# From 3.7.3 asyncio/base_events.py
|
||||
# We had (still have) screwy logic. Python 3.5 code node detection was off too.
|
||||
|
||||
async def create_connection(self):
|
||||
infos = await self._ensure_resolved()
|
||||
|
||||
laddr_infos = await self._ensure_resolved()
|
||||
for family in infos:
|
||||
for laddr in laddr_infos:
|
||||
family = 1
|
||||
else:
|
||||
continue
|
||||
await self.sock_connect()
|
||||
else:
|
||||
raise OSError('Multiple exceptions: {}' for exc in family)
|
||||
|
||||
return
|
@@ -1,5 +1,5 @@
|
||||
# Adapted from Python 3.6 trace.py
|
||||
# Bug was in handling BUID_TUPLE_UNPACK created via
|
||||
# Bug was in handling BUILD_TUPLE_UNPACK created via
|
||||
# *opts.arguments
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
@@ -7,4 +7,4 @@ parser.add_argument('filename', nargs='?')
|
||||
parser.add_argument('arguments', nargs=argparse.REMAINDER)
|
||||
opts = parser.parse_args(["foo", "a", "b"])
|
||||
argv = opts.filename, *opts.arguments
|
||||
assert argv == ('foo', 'a', 'b')
|
||||
assert argv == ('foo', 'a', 'b'), "Reconstruct tuple using '*' and BUILD_TUPLE_UNPACK"
|
||||
|
@@ -4,8 +4,8 @@
|
||||
var1 = 'x'
|
||||
var2 = 'y'
|
||||
abc = 'def'
|
||||
assert (f'interpolate {var1} strings {var2!r} {var2!s} py36' ==
|
||||
"interpolate x strings 'y' y py36")
|
||||
assert (f"interpolate {var1} strings {var2!r} {var2!s} 'py36" ==
|
||||
"interpolate x strings 'y' y 'py36")
|
||||
assert 'def0' == f'{abc}0'
|
||||
assert 'defdef' == f'{abc}{abc!s}'
|
||||
|
||||
@@ -38,4 +38,31 @@ filename = '.'
|
||||
source = 'foo'
|
||||
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
|
||||
+ source + "\ndel __file__")
|
||||
print(source)
|
||||
|
||||
# Note how { and } are *not* escaped here
|
||||
f = 'one'
|
||||
name = 'two'
|
||||
assert(f"{f}{'{{name}}'} {f}{'{name}'}") == 'one{{name}} one{name}'
|
||||
|
||||
# From 3.7.3 dataclasses.py
|
||||
log_rounds = 5
|
||||
assert "05$" == f'{log_rounds:02d}$'
|
||||
|
||||
|
||||
def testit(a, b, l):
|
||||
# print(l)
|
||||
return l
|
||||
|
||||
# The call below shows the need for BUILD_STRING to count expr arguments.
|
||||
# Also note that we use {{ }} to escape braces in contrast to the example
|
||||
# above.
|
||||
def _repr_fn(fields):
|
||||
return testit('__repr__',
|
||||
('self',),
|
||||
['return xx + f"(' +
|
||||
', '.join([f"{f}={{self.{f}!r}}"
|
||||
for f in fields]) +
|
||||
')"'])
|
||||
|
||||
fields = ['a', 'b', 'c']
|
||||
assert _repr_fn(fields) == ['return xx + f"(a={self.a!r}, b={self.b!r}, c={self.c!r})"']
|
||||
|
47
test/simple_source/bug36/02_call_ex_kw.py
Normal file
47
test/simple_source/bug36/02_call_ex_kw.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# From #227
|
||||
# Bug was not handling call_ex_kw correctly
|
||||
# This appears in
|
||||
# showparams(c, test="A", **extra_args)
|
||||
# below
|
||||
|
||||
def showparams(c, test, **extra_args):
|
||||
return {'c': c, **extra_args, 'test': test}
|
||||
|
||||
def f(c, **extra_args):
|
||||
return showparams(c, test="A", **extra_args)
|
||||
|
||||
def f1(c, d, **extra_args):
|
||||
return showparams(c, test="B", **extra_args)
|
||||
|
||||
def f2(**extra_args):
|
||||
return showparams(1, test="C", **extra_args)
|
||||
|
||||
def f3(c, *args, **extra_args):
|
||||
return showparams(c, *args, **extra_args)
|
||||
|
||||
assert f(1, a=2, b=3) == {'c': 1, 'a': 2, 'b': 3, 'test': 'A'}
|
||||
|
||||
a = {'param1': 2}
|
||||
assert f1('2', '{\'test\': "4"}', test2='a', **a) \
|
||||
== {'c': '2', 'test2': 'a', 'param1': 2, 'test': 'B'}
|
||||
assert f1(2, '"3"', test2='a', **a) \
|
||||
== {'c': 2, 'test2': 'a', 'param1': 2, 'test': 'B'}
|
||||
assert f1(False, '"3"', test2='a', **a) \
|
||||
== {'c': False, 'test2': 'a', 'param1': 2, 'test': 'B'}
|
||||
assert f(2, test2='A', **a) \
|
||||
== {'c': 2, 'test2': 'A', 'param1': 2, 'test': 'A'}
|
||||
assert f(str(2) + str(1), test2='a', **a) \
|
||||
== {'c': '21', 'test2': 'a', 'param1': 2, 'test': 'A'}
|
||||
assert f1((a.get('a'), a.get('b')), a, test3='A', **a) \
|
||||
== {'c': (None, None), 'test3': 'A', 'param1': 2, 'test': 'B'}
|
||||
|
||||
b = {'b1': 1, 'b2': 2}
|
||||
assert f2(**a, **b) == \
|
||||
{'c': 1, 'param1': 2, 'b1': 1, 'b2': 2, 'test': 'C'}
|
||||
|
||||
c = (2,)
|
||||
d = (2, 3)
|
||||
assert f(2, **a) == {'c': 2, 'param1': 2, 'test': 'A'}
|
||||
assert f3(2, *c, **a) == {'c': 2, 'param1': 2, 'test': 2}
|
||||
assert f3(*d, **a) == {'c': 2, 'param1': 2, 'test': 3}
|
||||
|
37
test/simple_source/bug36/05_ann_mopdule2.py
Normal file
37
test/simple_source/bug36/05_ann_mopdule2.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# This is from Python 3.6's test directory.
|
||||
"""
|
||||
Some correct syntax for variable annotation here.
|
||||
More examples are in test_grammar and test_parser.
|
||||
"""
|
||||
|
||||
from typing import no_type_check, ClassVar
|
||||
|
||||
i: int = 1
|
||||
j: int
|
||||
x: float = i/10
|
||||
|
||||
def f():
|
||||
class C: ...
|
||||
return C()
|
||||
|
||||
f().new_attr: object = object()
|
||||
|
||||
class C:
|
||||
def __init__(self, x: int) -> None:
|
||||
self.x = x
|
||||
|
||||
c = C(5)
|
||||
c.new_attr: int = 10
|
||||
|
||||
__annotations__ = {}
|
||||
|
||||
|
||||
@no_type_check
|
||||
class NTC:
|
||||
def meth(self, param: complex) -> None:
|
||||
...
|
||||
|
||||
class CV:
|
||||
var: ClassVar['CV']
|
||||
|
||||
CV.var = CV()
|
16
test/simple_source/bug37/01_and_not_else.py
Normal file
16
test/simple_source/bug37/01_and_not_else.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# From 3.7.3 base64.py
|
||||
# Bug was handling "and not" in an
|
||||
# if/else in the presence of better Python bytecode generatation
|
||||
|
||||
# RUNNABLE!
|
||||
def foo(foldnuls, word):
|
||||
x = 5 if foldnuls and not word else 6
|
||||
return x
|
||||
|
||||
for expect, foldnuls, word in (
|
||||
(6, True, True),
|
||||
(5, True, False),
|
||||
(6, False, True),
|
||||
(6, False, False)
|
||||
):
|
||||
assert foo(foldnuls, word) == expect
|
@@ -11,9 +11,16 @@ def chained_compare_b(a, obj):
|
||||
if -0x80000000 <= obj <= 0x7fffffff:
|
||||
return 5
|
||||
|
||||
def chained_compare_c(a, d):
|
||||
for i in len(d):
|
||||
if a == d[i] != 2:
|
||||
return 5
|
||||
|
||||
chained_compare_a(3)
|
||||
try:
|
||||
chained_compare_a(8)
|
||||
except ValueError:
|
||||
pass
|
||||
chained_compare_b(True, 0x0)
|
||||
|
||||
chained_compare_c(3, [3])
|
||||
|
@@ -8,4 +8,7 @@ def x(s):
|
||||
if not k.startswith('_')
|
||||
}
|
||||
|
||||
assert x((('_foo', None),)) == {}
|
||||
# Yes, the print() is funny. This is
|
||||
# to test though a 2-arg assert where
|
||||
# the 2nd argument is not a string.
|
||||
assert x((('_foo', None),)) == {}, print("See issue #162")
|
||||
|
@@ -11,6 +11,9 @@
|
||||
def _walk_dir(dir, dfile, ddir=None):
|
||||
yield from _walk_dir(dir, ddir=dfile)
|
||||
|
||||
def ybug(g):
|
||||
yield from g
|
||||
|
||||
# From 3.5.1 _wakrefset.py
|
||||
#
|
||||
# 3.5:
|
||||
|
@@ -1,10 +1,55 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# uncompyle2 bug was not escaping """ properly
|
||||
r'''func placeholder - with ("""\nstring\n""")'''
|
||||
def foo():
|
||||
r'''func placeholder - ' and with ("""\nstring\n""")'''
|
||||
|
||||
def bar():
|
||||
# RUNNABLE!
|
||||
r'''func placeholder - with ("""\nstring\n""")'''
|
||||
|
||||
def dq0():
|
||||
assert __doc__ == r'''func placeholder - with ("""\nstring\n""")'''
|
||||
|
||||
def dq1():
|
||||
"""assert that dedent() has no effect on 'text'"""
|
||||
assert dq1.__doc__ == """assert that dedent() has no effect on 'text'"""
|
||||
|
||||
def dq2():
|
||||
'''assert that dedent() has no effect on 'text\''''
|
||||
assert dq1.__doc__ == '''assert that dedent() has no effect on 'text\''''
|
||||
|
||||
def dq3():
|
||||
"""assert that dedent() has no effect on 'text\""""
|
||||
assert dq3.__doc__ == """assert that dedent() has no effect on 'text\""""
|
||||
|
||||
def dq4():
|
||||
"""assert that dedent() has no effect on 'text'"""
|
||||
assert dq4.__doc__ == """assert that dedent() has no effect on 'text'"""
|
||||
|
||||
def dq5():
|
||||
r'''func placeholder - ' and with ("""\nstring\n""")'''
|
||||
assert dq5.__doc__ == r'''func placeholder - ' and with ("""\nstring\n""")'''
|
||||
|
||||
def dq6():
|
||||
r"""func placeholder - ' and with ('''\nstring\n''') and \"\"\"\nstring\n\"\"\" """
|
||||
assert dq6.__doc__ == r"""func placeholder - ' and with ('''\nstring\n''') and \"\"\"\nstring\n\"\"\" """
|
||||
|
||||
def dq7():
|
||||
u""" <----- SEE 'u' HERE
|
||||
>>> mylen(u"áéíóú")
|
||||
5
|
||||
"""
|
||||
assert dq7.__doc__ == u""" <----- SEE 'u' HERE
|
||||
>>> mylen(u"áéíóú")
|
||||
5
|
||||
"""
|
||||
|
||||
def dq8():
|
||||
u""" <----- SEE 'u' HERE
|
||||
>>> mylen(u"تست")
|
||||
5
|
||||
"""
|
||||
assert dq8.__doc__ == u""" <----- SEE 'u' HERE
|
||||
>>> mylen(u"تست")
|
||||
5
|
||||
"""
|
||||
|
||||
def baz():
|
||||
"""
|
||||
@@ -20,3 +65,28 @@ def baz():
|
||||
>>> t.rundict(m1.__dict__, 'rundict_test_pvt') # None are skipped.
|
||||
TestResults(failed=0, attempted=8)
|
||||
"""
|
||||
assert baz.__doc__ == \
|
||||
"""
|
||||
... '''>>> assert 1 == 1
|
||||
... '''
|
||||
... \"""
|
||||
>>> exec test_data in m1.__dict__
|
||||
>>> exec test_data in m2.__dict__
|
||||
>>> m1.__dict__.update({"f2": m2._f, "g2": m2.g, "h2": m2.H})
|
||||
|
||||
Tests that objects outside m1 are excluded:
|
||||
\"""
|
||||
>>> t.rundict(m1.__dict__, 'rundict_test_pvt') # None are skipped.
|
||||
TestResults(failed=0, attempted=8)
|
||||
"""
|
||||
|
||||
dq0()
|
||||
dq1()
|
||||
dq2()
|
||||
dq3()
|
||||
dq4()
|
||||
dq5()
|
||||
dq6()
|
||||
dq7()
|
||||
dq8()
|
||||
baz()
|
||||
|
@@ -7,3 +7,4 @@ import http.client as httpclient
|
||||
if len(__file__) == 0:
|
||||
# a.b.c should force consecutive LOAD_ATTRs
|
||||
import a.b.c as d
|
||||
import stuff0.stuff1.stuff2.stuff3 as stuff3
|
||||
|
@@ -136,7 +136,7 @@ if __name__ == '__main__':
|
||||
test_options_keys = list(test_options.keys())
|
||||
test_options_keys.sort()
|
||||
opts, args = getopt.getopt(sys.argv[1:], '',
|
||||
['start-with=', 'verify', 'verify-run', 'weak-verify',
|
||||
['start-with=', 'verify', 'verify-run', 'syntax-verify',
|
||||
'max=', 'coverage', 'all', ] \
|
||||
+ test_options_keys )
|
||||
vers = ''
|
||||
@@ -144,7 +144,7 @@ if __name__ == '__main__':
|
||||
for opt, val in opts:
|
||||
if opt == '--verify':
|
||||
do_verify = 'strong'
|
||||
elif opt == '--weak-verify':
|
||||
elif opt == '--syntax-verify':
|
||||
do_verify = 'weak'
|
||||
elif opt == '--verify-run':
|
||||
do_verify = 'verify-run'
|
||||
|
@@ -193,7 +193,7 @@ if __name__ == '__main__':
|
||||
test_options_keys.sort()
|
||||
opts, args = getopt.getopt(sys.argv[1:], '',
|
||||
['start-with=', 'verify', 'verify-run',
|
||||
'weak-verify', 'all',
|
||||
'syntax-verify', 'all',
|
||||
'compile', 'coverage',
|
||||
'no-rm'] \
|
||||
+ test_options_keys )
|
||||
@@ -210,7 +210,7 @@ if __name__ == '__main__':
|
||||
for opt, val in opts:
|
||||
if opt == '--verify':
|
||||
test_opts['do_verify'] = 'strong'
|
||||
elif opt == '--weak-verify':
|
||||
elif opt == '--syntax-verify':
|
||||
test_opts['do_verify'] = 'weak'
|
||||
elif opt == '--verify-run':
|
||||
test_opts['do_verify'] = 'verify-run'
|
||||
|
@@ -51,14 +51,8 @@ import uncompyle6.semantics.fragments
|
||||
# Export some functions
|
||||
from uncompyle6.main import decompile_file
|
||||
|
||||
# For compatibility
|
||||
uncompyle_file = decompile_file
|
||||
|
||||
# Convenience functions so you can say:
|
||||
# from uncompyle6 import (code_deparse, deparse_code2str)
|
||||
|
||||
code_deparse = uncompyle6.semantics.pysource.code_deparse
|
||||
deparse_code2str = uncompyle6.semantics.pysource.deparse_code2str
|
||||
|
||||
# This is deprecated:
|
||||
deparse_code = uncompyle6.semantics.pysource.deparse_code
|
||||
code_deparse = uncompyle6.semantics.pysource.code_deparse
|
||||
|
@@ -37,9 +37,11 @@ Options:
|
||||
--fragments use fragments deparser
|
||||
--verify compare generated source with input byte-code
|
||||
--verify-run compile generated source, run it and check exit code
|
||||
--weak-verify compile generated source
|
||||
--syntax-verify compile generated source
|
||||
--linemaps generated line number correspondencies between byte-code
|
||||
and generated source output
|
||||
--encoding <encoding>
|
||||
use <encoding> in generated source according to pep-0263
|
||||
--help show this message
|
||||
|
||||
Debugging Options:
|
||||
@@ -80,14 +82,15 @@ def main_bin():
|
||||
timestampfmt = "# %Y.%m.%d %H:%M:%S %Z"
|
||||
|
||||
try:
|
||||
opts, pyc_paths = getopt.getopt(sys.argv[1:], 'hac:gtdrVo:p:',
|
||||
opts, pyc_paths = getopt.getopt(sys.argv[1:], 'hac:gtTdrVo:p:',
|
||||
'help asm compile= grammar linemaps recurse '
|
||||
'timestamp tree tree+ '
|
||||
'fragments verify verify-run version '
|
||||
'weak-verify '
|
||||
'showgrammar'.split(' '))
|
||||
except getopt.GetoptError(e):
|
||||
sys.stderr.write('%s: %s\n' % (os.path.basename(sys.argv[0]), e))
|
||||
'syntax-verify '
|
||||
'showgrammar encoding='.split(' '))
|
||||
except getopt.GetoptError, e:
|
||||
sys.stderr.write('%s: %s\n' %
|
||||
(os.path.basename(sys.argv[0]), e))
|
||||
sys.exit(-1)
|
||||
|
||||
options = {}
|
||||
@@ -100,7 +103,7 @@ def main_bin():
|
||||
sys.exit(0)
|
||||
elif opt == '--verify':
|
||||
options['do_verify'] = 'strong'
|
||||
elif opt == '--weak-verify':
|
||||
elif opt == '--syntax-verify':
|
||||
options['do_verify'] = 'weak'
|
||||
elif opt == '--fragments':
|
||||
options['do_fragments'] = True
|
||||
@@ -114,7 +117,7 @@ def main_bin():
|
||||
elif opt in ('--tree', '-t'):
|
||||
options['showast'] = True
|
||||
options['do_verify'] = None
|
||||
elif opt in ('--tree+',):
|
||||
elif opt in ('--tree+', '-T'):
|
||||
options['showast'] = 'Full'
|
||||
options['do_verify'] = None
|
||||
elif opt in ('--grammar', '-g'):
|
||||
@@ -129,6 +132,8 @@ def main_bin():
|
||||
numproc = int(val)
|
||||
elif opt in ('--recurse', '-r'):
|
||||
recurse_dirs = True
|
||||
elif opt == '--encoding':
|
||||
options['source_encoding'] = val
|
||||
else:
|
||||
sys.stderr.write(opt)
|
||||
usage()
|
||||
|
@@ -42,7 +42,7 @@ def _get_outstream(outfile):
|
||||
|
||||
def decompile(
|
||||
bytecode_version, co, out=None, showasm=None, showast=False,
|
||||
timestamp=None, showgrammar=False, code_objects={},
|
||||
timestamp=None, showgrammar=False, source_encoding=None, code_objects={},
|
||||
source_size=None, is_pypy=None, magic_int=None,
|
||||
mapstream=None, do_fragments=False):
|
||||
"""
|
||||
@@ -81,6 +81,8 @@ def decompile(
|
||||
m = ""
|
||||
|
||||
sys_version_lines = sys.version.split('\n')
|
||||
if source_encoding:
|
||||
write('# -*- coding: %s -*-' % source_encoding)
|
||||
write('# uncompyle6 version %s\n'
|
||||
'# %sPython bytecode %s%s\n# Decompiled from: %sPython %s' %
|
||||
(VERSION, co_pypy_str, bytecode_version,
|
||||
@@ -147,7 +149,7 @@ def compile_file(source_path):
|
||||
|
||||
|
||||
def decompile_file(filename, outstream=None, showasm=None, showast=False,
|
||||
showgrammar=False, mapstream=None, do_fragments=False):
|
||||
showgrammar=False, source_encoding=None, mapstream=None, do_fragments=False):
|
||||
"""
|
||||
decompile Python byte-code file (.pyc). Return objects to
|
||||
all of the deparsed objects found in `filename`.
|
||||
@@ -163,12 +165,12 @@ def decompile_file(filename, outstream=None, showasm=None, showast=False,
|
||||
for con in co:
|
||||
deparsed.append(
|
||||
decompile(version, con, outstream, showasm, showast,
|
||||
timestamp, showgrammar, code_objects=code_objects,
|
||||
timestamp, showgrammar, source_encoding, code_objects=code_objects,
|
||||
is_pypy=is_pypy, magic_int=magic_int),
|
||||
mapstream=mapstream)
|
||||
else:
|
||||
deparsed = [decompile(version, co, outstream, showasm, showast,
|
||||
timestamp, showgrammar,
|
||||
timestamp, showgrammar, source_encoding,
|
||||
code_objects=code_objects, source_size=source_size,
|
||||
is_pypy=is_pypy, magic_int=magic_int,
|
||||
mapstream=mapstream, do_fragments=do_fragments)]
|
||||
@@ -179,7 +181,7 @@ def decompile_file(filename, outstream=None, showasm=None, showast=False,
|
||||
# FIXME: combine into an options parameter
|
||||
def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
showasm=None, showast=False, do_verify=False,
|
||||
showgrammar=False, raise_on_error=False,
|
||||
showgrammar=False, source_encoding=None, raise_on_error=False,
|
||||
do_linemaps=False, do_fragments=False):
|
||||
"""
|
||||
in_base base directory for input files
|
||||
@@ -250,7 +252,7 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
# Try to uncompile the input file
|
||||
try:
|
||||
deparsed = decompile_file(infile, outstream, showasm, showast, showgrammar,
|
||||
linemap_stream, do_fragments)
|
||||
source_encoding, linemap_stream, do_fragments)
|
||||
if do_fragments:
|
||||
for d in deparsed:
|
||||
last_mod = None
|
||||
@@ -280,6 +282,19 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
sys.stdout.write("\n")
|
||||
sys.stderr.write("\nLast file: %s " % (infile))
|
||||
raise
|
||||
except RuntimeError, e:
|
||||
sys.stdout.write("\n%s\n" % str(e))
|
||||
if str(e).startswith('Unsupported Python'):
|
||||
sys.stdout.write("\n")
|
||||
sys.stderr.write("\n# Unsupported bytecode in file %s\n# %s\n" % (infile, e))
|
||||
else:
|
||||
if outfile:
|
||||
outstream.close()
|
||||
os.remove(outfile)
|
||||
sys.stdout.write("\n")
|
||||
sys.stderr.write("\nLast file: %s " % (infile))
|
||||
raise
|
||||
|
||||
# except:
|
||||
# failed_files += 1
|
||||
# if current_outfile:
|
||||
@@ -337,9 +352,9 @@ def main(in_base, out_base, compiled_files, source_files, outfile=None,
|
||||
# mem_usage = __memUsage()
|
||||
print mess, infile
|
||||
if current_outfile:
|
||||
sys.stdout.write("%s\r" %
|
||||
status_msg(do_verify, tot_files, okay_files, failed_files,
|
||||
verify_failed_files, do_verify))
|
||||
sys.stdout.write("%s -- %s\r" %
|
||||
(infile, status_msg(do_verify, tot_files, okay_files, failed_files,
|
||||
verify_failed_files, do_verify)))
|
||||
try:
|
||||
# FIXME: Something is weird with Pypy here
|
||||
sys.stdout.flush()
|
||||
|
@@ -59,7 +59,6 @@ class PythonParser(GenericASTBuilder):
|
||||
'imports_cont',
|
||||
'kvlist_n',
|
||||
# Python 3.6+
|
||||
'joined_str',
|
||||
'come_from_loops',
|
||||
]
|
||||
self.collect = frozenset(nt_list)
|
||||
@@ -81,7 +80,7 @@ class PythonParser(GenericASTBuilder):
|
||||
# FIXME: would love to do expr, sstmts, stmts and
|
||||
# so on but that would require major changes to the
|
||||
# semantic actions
|
||||
self.singleton = frozenset(('str', 'joined_str', 'store', '_stmts', 'suite_stmts_opt',
|
||||
self.singleton = frozenset(('str', 'store', '_stmts', 'suite_stmts_opt',
|
||||
'inplace_op'))
|
||||
# Instructions filled in from scanner
|
||||
self.insts = []
|
||||
@@ -497,6 +496,7 @@ class PythonParser(GenericASTBuilder):
|
||||
def p_expr(self, args):
|
||||
'''
|
||||
expr ::= _mklambda
|
||||
expr ::= LOAD_CODE
|
||||
expr ::= LOAD_FAST
|
||||
expr ::= LOAD_NAME
|
||||
expr ::= LOAD_CONST
|
||||
@@ -802,7 +802,6 @@ def python_parser(version, co, out=sys.stdout, showasm=False,
|
||||
if __name__ == '__main__':
|
||||
def parse_test(co):
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||
ast = python_parser('2.7.13', co, showasm=True, is_pypy=True)
|
||||
ast = python_parser(PYTHON_VERSION, co, showasm=True, is_pypy=IS_PYPY)
|
||||
print(ast)
|
||||
return
|
||||
|
@@ -96,9 +96,9 @@ class Python2Parser(PythonParser):
|
||||
for ::= SETUP_LOOP expr for_iter store
|
||||
for_block POP_BLOCK _come_froms
|
||||
|
||||
del_stmt ::= delete_subscr
|
||||
delete_subscr ::= expr expr DELETE_SUBSCR
|
||||
del_stmt ::= expr DELETE_ATTR
|
||||
del_stmt ::= delete_subscript
|
||||
delete_subscript ::= expr expr DELETE_SUBSCR
|
||||
del_stmt ::= expr DELETE_ATTR
|
||||
|
||||
_mklambda ::= load_closure mklambda
|
||||
kwarg ::= LOAD_CONST expr
|
||||
@@ -388,10 +388,10 @@ class Python2Parser(PythonParser):
|
||||
continue
|
||||
elif opname == 'DELETE_SUBSCR':
|
||||
self.addRule("""
|
||||
del_stmt ::= delete_subscr
|
||||
delete_subscr ::= expr expr DELETE_SUBSCR
|
||||
del_stmt ::= delete_subscript
|
||||
delete_subscript ::= expr expr DELETE_SUBSCR
|
||||
""", nop_func)
|
||||
self.check_reduce['delete_subscr'] = 'AST'
|
||||
self.check_reduce['delete_subscript'] = 'AST'
|
||||
custom_seen_ops.add(opname)
|
||||
continue
|
||||
elif opname == 'GET_ITER':
|
||||
@@ -457,7 +457,7 @@ class Python2Parser(PythonParser):
|
||||
if i > 0 and tokens[i-1] == 'LOAD_LAMBDA':
|
||||
self.addRule('mklambda ::= %s LOAD_LAMBDA %s' %
|
||||
('pos_arg ' * token.attr, opname), nop_func)
|
||||
rule = 'mkfunc ::= %s LOAD_CONST %s' % ('expr ' * token.attr, opname)
|
||||
rule = 'mkfunc ::= %s LOAD_CODE %s' % ('expr ' * token.attr, opname)
|
||||
elif opname_base == 'MAKE_CLOSURE':
|
||||
# FIXME: use add_unique_rules to tidy this up.
|
||||
if i > 0 and tokens[i-1] == 'LOAD_LAMBDA':
|
||||
@@ -472,7 +472,7 @@ class Python2Parser(PythonParser):
|
||||
('expr ' * token.attr, opname))], customize)
|
||||
pass
|
||||
self.add_unique_rules([
|
||||
('mkfunc ::= %s load_closure LOAD_CONST %s' %
|
||||
('mkfunc ::= %s load_closure LOAD_CODE %s' %
|
||||
('expr ' * token.attr, opname))], customize)
|
||||
|
||||
if self.version >= 2.7:
|
||||
@@ -547,7 +547,7 @@ class Python2Parser(PythonParser):
|
||||
elif rule == ('or', ('expr', 'jmp_true', 'expr', '\\e_come_from_opt')):
|
||||
expr2 = ast[2]
|
||||
return expr2 == 'expr' and expr2[0] == 'LOAD_ASSERT'
|
||||
elif lhs in ('delete_subscr', 'del_expr'):
|
||||
elif lhs in ('delete_subscript', 'del_expr'):
|
||||
op = ast[0][0]
|
||||
return op.kind in ('and', 'or')
|
||||
|
||||
|
@@ -82,9 +82,9 @@ class Python25Parser(Python26Parser):
|
||||
return_stmt_lambda ::= ret_expr RETURN_VALUE_LAMBDA
|
||||
setupwithas ::= DUP_TOP LOAD_ATTR ROT_TWO LOAD_ATTR CALL_FUNCTION_0 setup_finally
|
||||
stmt ::= classdefdeco
|
||||
stmt ::= conditional_lambda
|
||||
stmt ::= if_expr_lambda
|
||||
stmt ::= conditional_not_lambda
|
||||
conditional_lambda ::= expr jmp_false_then expr return_if_lambda
|
||||
if_expr_lambda ::= expr jmp_false_then expr return_if_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
conditional_not_lambda
|
||||
::= expr jmp_true_then expr return_if_lambda
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017-2018 Rocky Bernstein
|
||||
# Copyright (c) 2017-2019 Rocky Bernstein
|
||||
"""
|
||||
spark grammar differences over Python2 for Python 2.6.
|
||||
"""
|
||||
@@ -102,6 +102,8 @@ class Python26Parser(Python2Parser):
|
||||
|
||||
def p_stmt26(self, args):
|
||||
"""
|
||||
stmt ::= ifelsestmtr
|
||||
|
||||
# We use filler as a placeholder to keep nonterminal positions
|
||||
# the same across different grammars so that the same semantic actions
|
||||
# can be used
|
||||
@@ -173,6 +175,9 @@ class Python26Parser(Python2Parser):
|
||||
iflaststmt ::= testexpr_then c_stmts_opt JUMP_ABSOLUTE come_froms POP_TOP
|
||||
iflaststmt ::= testexpr c_stmts_opt JUMP_ABSOLUTE come_froms POP_TOP
|
||||
|
||||
# "if"/"else" statement that ends in a RETURN
|
||||
ifelsestmtr ::= testexpr_then return_if_stmts returns
|
||||
|
||||
testexpr_then ::= testtrue_then
|
||||
testexpr_then ::= testfalse_then
|
||||
testtrue_then ::= expr jmp_true_then
|
||||
@@ -293,19 +298,19 @@ class Python26Parser(Python2Parser):
|
||||
compare_chained2 ::= expr COMPARE_OP return_lambda
|
||||
|
||||
return_if_lambda ::= RETURN_END_IF_LAMBDA POP_TOP
|
||||
stmt ::= conditional_lambda
|
||||
stmt ::= if_expr_lambda
|
||||
stmt ::= conditional_not_lambda
|
||||
conditional_lambda ::= expr jmp_false_then expr return_if_lambda
|
||||
if_expr_lambda ::= expr jmp_false_then expr return_if_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
conditional_not_lambda ::=
|
||||
expr jmp_true_then expr return_if_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
|
||||
# conditional_true are for conditions which always evaluate true
|
||||
# if_expr_true are for conditions which always evaluate true
|
||||
# There is dead or non-optional remnants of the condition code though,
|
||||
# and we use that to match on to reconstruct the source more accurately
|
||||
expr ::= conditional_true
|
||||
conditional_true ::= expr jf_pop expr COME_FROM
|
||||
expr ::= if_expr_true
|
||||
if_expr_true ::= expr jf_pop expr COME_FROM
|
||||
|
||||
# This comes from
|
||||
# 0 or max(5, 3) if 0 else 3
|
||||
|
@@ -112,14 +112,14 @@ class Python27Parser(Python2Parser):
|
||||
compare_chained2 ::= expr COMPARE_OP return_lambda
|
||||
compare_chained2 ::= expr COMPARE_OP return_lambda
|
||||
|
||||
# conditional_true are for conditions which always evaluate true
|
||||
# if_expr_true are for conditions which always evaluate true
|
||||
# There is dead or non-optional remnants of the condition code though,
|
||||
# and we use that to match on to reconstruct the source more accurately.
|
||||
# FIXME: we should do analysis and reduce *only* if there is dead code?
|
||||
# right now we check that expr is "or". Any other nodes types?
|
||||
|
||||
expr ::= conditional_true
|
||||
conditional_true ::= expr JUMP_FORWARD expr COME_FROM
|
||||
expr ::= if_expr_true
|
||||
if_expr_true ::= expr JUMP_FORWARD expr COME_FROM
|
||||
|
||||
conditional ::= expr jmp_false expr JUMP_FORWARD expr COME_FROM
|
||||
conditional ::= expr jmp_false expr JUMP_ABSOLUTE expr
|
||||
@@ -127,6 +127,8 @@ class Python27Parser(Python2Parser):
|
||||
|
||||
def p_stmt27(self, args):
|
||||
"""
|
||||
stmt ::= ifelsestmtr
|
||||
|
||||
# assert condition
|
||||
assert ::= assert_expr jmp_true LOAD_ASSERT RAISE_VARARGS_1
|
||||
|
||||
@@ -179,11 +181,14 @@ class Python27Parser(Python2Parser):
|
||||
ifelsestmtl ::= testexpr c_stmts_opt JUMP_BACK else_suitel
|
||||
ifelsestmtl ::= testexpr c_stmts_opt CONTINUE else_suitel
|
||||
|
||||
# "if"/"else" statement that ends in a RETURN
|
||||
ifelsestmtr ::= testexpr return_if_stmts COME_FROM returns
|
||||
|
||||
# Common with 2.6
|
||||
return_if_lambda ::= RETURN_END_IF_LAMBDA COME_FROM
|
||||
stmt ::= conditional_lambda
|
||||
stmt ::= conditional_not_lambda
|
||||
conditional_lambda ::= expr jmp_false expr return_if_lambda
|
||||
stmt ::= if_expr_lambda
|
||||
stmt ::= conditional_not_lambda
|
||||
if_expr_lambda ::= expr jmp_false expr return_if_lambda
|
||||
return_stmt_lambda LAMBDA_MARKER
|
||||
conditional_not_lambda
|
||||
::= expr jmp_true expr return_if_lambda
|
||||
@@ -216,7 +221,7 @@ class Python27Parser(Python2Parser):
|
||||
self.check_reduce['raise_stmt1'] = 'AST'
|
||||
self.check_reduce['list_if_not'] = 'AST'
|
||||
self.check_reduce['list_if'] = 'AST'
|
||||
self.check_reduce['conditional_true'] = 'AST'
|
||||
self.check_reduce['if_expr_true'] = 'tokens'
|
||||
self.check_reduce['whilestmt'] = 'tokens'
|
||||
return
|
||||
|
||||
@@ -229,6 +234,12 @@ class Python27Parser(Python2Parser):
|
||||
return invalid
|
||||
|
||||
if rule == ('and', ('expr', 'jmp_false', 'expr', '\\e_come_from_opt')):
|
||||
# If the instruction after the instructions formin "and" is an "YIELD_VALUE"
|
||||
# then this is probably an "if" inside a comprehension.
|
||||
if tokens[last] == 'YIELD_VALUE':
|
||||
# Note: We might also consider testing last+1 being "POP_TOP"
|
||||
return True
|
||||
|
||||
# Test that jmp_false jumps to the end of "and"
|
||||
# or that it jumps to the same place as the end of "and"
|
||||
jmp_false = ast[1][0]
|
||||
@@ -268,11 +279,8 @@ class Python27Parser(Python2Parser):
|
||||
while (tokens[i] != 'JUMP_BACK'):
|
||||
i -= 1
|
||||
return tokens[i].attr != tokens[i-1].attr
|
||||
# elif rule[0] == ('conditional_true'):
|
||||
# # FIXME: the below is a hack: we check expr for
|
||||
# # nodes that could have possibly been a been a Boolean.
|
||||
# # We should also look for the presence of dead code.
|
||||
# return ast[0] == 'expr' and ast[0] == 'or'
|
||||
elif rule[0] == 'if_expr_true':
|
||||
return (first) > 0 and tokens[first-1] == 'POP_JUMP_IF_FALSE'
|
||||
|
||||
return False
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -73,7 +73,7 @@ class Python32Parser(Python3Parser):
|
||||
args_pos, args_kw, annotate_args = token.attr
|
||||
# Check that there are 2 annotated params?
|
||||
rule = (('mkfunc_annotate ::= %s%sannotate_tuple '
|
||||
'LOAD_CONST LOAD_CONST EXTENDED_ARG %s') %
|
||||
'LOAD_CONST LOAD_CODE EXTENDED_ARG %s') %
|
||||
(('pos_arg ' * (args_pos)),
|
||||
('annotate_arg ' * (annotate_args-1)), opname))
|
||||
self.add_unique_rule(rule, opname, token.attr, customize)
|
||||
|
@@ -47,7 +47,7 @@ class Python34Parser(Python33Parser):
|
||||
|
||||
# Python 3.4+ optimizes the trailing two JUMPS away
|
||||
|
||||
# Is this 3.4 only?
|
||||
# This is 3.4 only
|
||||
yield_from ::= expr GET_ITER LOAD_CONST YIELD_FROM
|
||||
|
||||
_ifstmts_jump ::= c_stmts_opt JUMP_ABSOLUTE JUMP_FORWARD COME_FROM
|
||||
@@ -55,6 +55,7 @@ class Python34Parser(Python33Parser):
|
||||
|
||||
def customize_grammar_rules(self, tokens, customize):
|
||||
self.remove_rules("""
|
||||
yield_from ::= expr expr YIELD_FROM
|
||||
# 3.4.2 has this. 3.4.4 may now
|
||||
# while1stmt ::= SETUP_LOOP l_stmts COME_FROM JUMP_BACK COME_FROM_LOOP
|
||||
""")
|
||||
|
@@ -29,8 +29,15 @@ class Python36Parser(Python35Parser):
|
||||
|
||||
|
||||
def p_36misc(self, args):
|
||||
"""
|
||||
sstmt ::= sstmt RETURN_LAST
|
||||
"""sstmt ::= sstmt RETURN_LAST
|
||||
|
||||
# long except clauses in a loop can sometimes cause a JUMP_BACK to turn into a
|
||||
# JUMP_FORWARD to a JUMP_BACK. And when this happens there is an additional
|
||||
# ELSE added to the except_suite. With better flow control perhaps we can
|
||||
# sort this out better.
|
||||
except_suite ::= c_stmts_opt POP_EXCEPT jump_except ELSE
|
||||
except_suite_finalize ::= SETUP_FINALLY c_stmts_opt except_var_finalize END_FINALLY
|
||||
_jump ELSE
|
||||
|
||||
# 3.6 redoes how return_closure works. FIXME: Isolate to LOAD_CLOSURE
|
||||
return_closure ::= LOAD_CLOSURE DUP_TOP STORE_NAME RETURN_VALUE RETURN_LAST
|
||||
@@ -142,6 +149,7 @@ class Python36Parser(Python35Parser):
|
||||
COME_FROM_FINALLY
|
||||
|
||||
compare_chained2 ::= expr COMPARE_OP come_froms JUMP_FORWARD
|
||||
|
||||
"""
|
||||
|
||||
def customize_grammar_rules(self, tokens, customize):
|
||||
@@ -187,35 +195,28 @@ class Python36Parser(Python35Parser):
|
||||
self.add_unique_doc_rules(rules_str, customize)
|
||||
elif opname == 'FORMAT_VALUE':
|
||||
rules_str = """
|
||||
expr ::= fstring_single
|
||||
fstring_single ::= expr FORMAT_VALUE
|
||||
expr ::= fstring_expr
|
||||
fstring_expr ::= expr FORMAT_VALUE
|
||||
|
||||
str ::= LOAD_CONST
|
||||
formatted_value ::= fstring_expr
|
||||
formatted_value ::= str
|
||||
|
||||
expr ::= formatted_value1
|
||||
formatted_value1 ::= expr FORMAT_VALUE
|
||||
"""
|
||||
self.add_unique_doc_rules(rules_str, customize)
|
||||
elif opname == 'FORMAT_VALUE_ATTR':
|
||||
rules_str = """
|
||||
expr ::= fstring_single
|
||||
fstring_single ::= expr expr FORMAT_VALUE_ATTR
|
||||
expr ::= formatted_value2
|
||||
formatted_value2 ::= expr expr FORMAT_VALUE_ATTR
|
||||
"""
|
||||
self.add_unique_doc_rules(rules_str, customize)
|
||||
elif opname == 'MAKE_FUNCTION_8':
|
||||
if 'LOAD_DICTCOMP' in self.seen_ops:
|
||||
# Is there something general going on here?
|
||||
rule = """
|
||||
dict_comp ::= load_closure LOAD_DICTCOMP LOAD_CONST
|
||||
dict_comp ::= load_closure LOAD_DICTCOMP LOAD_STR
|
||||
MAKE_FUNCTION_8 expr
|
||||
GET_ITER CALL_FUNCTION_1
|
||||
"""
|
||||
self.addRule(rule, nop_func)
|
||||
elif 'LOAD_SETCOMP' in self.seen_ops:
|
||||
rule = """
|
||||
set_comp ::= load_closure LOAD_SETCOMP LOAD_CONST
|
||||
set_comp ::= load_closure LOAD_SETCOMP LOAD_STR
|
||||
MAKE_FUNCTION_8 expr
|
||||
GET_ITER CALL_FUNCTION_1
|
||||
"""
|
||||
@@ -245,16 +246,12 @@ class Python36Parser(Python35Parser):
|
||||
"""
|
||||
self.addRule(rules_str, nop_func)
|
||||
|
||||
elif opname == 'BUILD_STRING':
|
||||
elif opname.startswith('BUILD_STRING'):
|
||||
v = token.attr
|
||||
joined_str_n = "formatted_value_%s" % v
|
||||
rules_str = """
|
||||
expr ::= fstring_multi
|
||||
fstring_multi ::= joined_str BUILD_STRING
|
||||
joined_str ::= formatted_value+
|
||||
fstring_multi ::= %s BUILD_STRING
|
||||
%s ::= %sBUILD_STRING
|
||||
""" % (joined_str_n, joined_str_n, "formatted_value " * v)
|
||||
expr ::= joined_str
|
||||
joined_str ::= %sBUILD_STRING_%d
|
||||
""" % ("expr " * v, v)
|
||||
self.add_unique_doc_rules(rules_str, customize)
|
||||
if 'FORMAT_VALUE_ATTR' in self.seen_ops:
|
||||
rules_str = """
|
||||
@@ -274,6 +271,23 @@ class Python36Parser(Python35Parser):
|
||||
self.addRule(rule, nop_func)
|
||||
rule = ('starred ::= %s %s' % ('expr ' * v, opname))
|
||||
self.addRule(rule, nop_func)
|
||||
elif opname == 'SETUP_ANNOTATIONS':
|
||||
# 3.6 Variable Annotations PEP 526
|
||||
# This seems to come before STORE_ANNOTATION, and doesn't
|
||||
# correspond to direct Python source code.
|
||||
rule = """
|
||||
stmt ::= SETUP_ANNOTATIONS
|
||||
stmt ::= ann_assign_init_value
|
||||
stmt ::= ann_assign_no_init
|
||||
|
||||
ann_assign_init_value ::= expr store store_annotation
|
||||
ann_assign_no_init ::= store_annotation
|
||||
store_annotation ::= LOAD_NAME STORE_ANNOTATION
|
||||
store_annotation ::= subscript STORE_ANNOTATION
|
||||
"""
|
||||
self.addRule(rule, nop_func)
|
||||
# Check to combine assignment + annotation into one statement
|
||||
self.check_reduce['assign'] = 'token'
|
||||
elif opname == 'SETUP_WITH':
|
||||
rules_str = """
|
||||
withstmt ::= expr SETUP_WITH POP_TOP suite_stmts_opt COME_FROM_WITH
|
||||
@@ -299,6 +313,7 @@ class Python36Parser(Python35Parser):
|
||||
self.addRule(rules_str, nop_func)
|
||||
pass
|
||||
pass
|
||||
return
|
||||
|
||||
def custom_classfunc_rule(self, opname, token, customize, next_token):
|
||||
|
||||
@@ -398,6 +413,15 @@ class Python36Parser(Python35Parser):
|
||||
tokens, first, last)
|
||||
if invalid:
|
||||
return invalid
|
||||
if rule[0] == 'assign':
|
||||
# Try to combine assignment + annotation into one statement
|
||||
if (len(tokens) >= last + 1 and
|
||||
tokens[last] == 'LOAD_NAME' and
|
||||
tokens[last+1] == 'STORE_ANNOTATION' and
|
||||
tokens[last-1].pattr == tokens[last+1].pattr):
|
||||
# Will handle as ann_assign_init_value
|
||||
return True
|
||||
pass
|
||||
if rule[0] == 'call_kw':
|
||||
# Make sure we don't derive call_kw
|
||||
nt = ast[0]
|
||||
|
@@ -72,12 +72,18 @@ class Python37Parser(Python36Parser):
|
||||
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_TOP POP_BLOCK
|
||||
else_suite COME_FROM_LOOP
|
||||
|
||||
# Is there a pattern here?
|
||||
attributes ::= IMPORT_FROM ROT_TWO POP_TOP IMPORT_FROM
|
||||
attributes ::= attributes ROT_TWO POP_TOP IMPORT_FROM
|
||||
|
||||
attribute37 ::= expr LOAD_METHOD
|
||||
expr ::= attribute37
|
||||
|
||||
# long except clauses in a loop can sometimes cause a JUMP_BACK to turn into a
|
||||
# JUMP_FORWARD to a JUMP_BACK. And when this happens there is an additional
|
||||
# ELSE added to the except_suite. With better flow control perhaps we can
|
||||
# sort this out better.
|
||||
except_suite ::= c_stmts_opt POP_EXCEPT jump_except ELSE
|
||||
|
||||
# FIXME: generalize and specialize
|
||||
call ::= expr CALL_METHOD_0
|
||||
|
||||
@@ -87,26 +93,50 @@ class Python37Parser(Python36Parser):
|
||||
|
||||
compare_chained37 ::= expr compare_chained1a_37
|
||||
compare_chained37 ::= expr compare_chained1b_37
|
||||
compare_chained37 ::= expr compare_chained1c_37
|
||||
|
||||
compare_chained37_false ::= expr compare_chained1_false_37
|
||||
compare_chained37_false ::= expr compare_chained2_false_37
|
||||
|
||||
compare_chained1a_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained1a_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2a_37 ELSE POP_TOP COME_FROM
|
||||
compare_chained1b_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2b_37 POP_TOP JUMP_FORWARD COME_FROM
|
||||
compare_chained1c_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2a_37 POP_TOP
|
||||
|
||||
compare_chained1_false_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2c_37 POP_TOP JUMP_FORWARD COME_FROM
|
||||
compare_chained2_false_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP POP_JUMP_IF_FALSE
|
||||
compare_chained2a_false_37 ELSE POP_TOP JUMP_BACK COME_FROM
|
||||
|
||||
compare_chained2a_37 ::= expr COMPARE_OP POP_JUMP_IF_TRUE JUMP_FORWARD
|
||||
compare_chained2a_false_37 ::= expr COMPARE_OP POP_JUMP_IF_FALSE JUMP_FORWARD
|
||||
compare_chained2a_37 ::= expr COMPARE_OP POP_JUMP_IF_TRUE JUMP_BACK
|
||||
compare_chained2a_false_37 ::= expr COMPARE_OP POP_JUMP_IF_FALSE jf_cfs
|
||||
|
||||
compare_chained2b_37 ::= expr COMPARE_OP come_from_opt POP_JUMP_IF_FALSE JUMP_FORWARD ELSE
|
||||
compare_chained2b_37 ::= expr COMPARE_OP come_from_opt POP_JUMP_IF_FALSE JUMP_FORWARD
|
||||
|
||||
compare_chained2c_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP come_from_opt POP_JUMP_IF_FALSE
|
||||
compare_chained2a_false_37 ELSE
|
||||
compare_chained2c_37 ::= expr DUP_TOP ROT_THREE COMPARE_OP come_from_opt POP_JUMP_IF_FALSE
|
||||
compare_chained2a_false_37
|
||||
|
||||
_ifstmts_jump ::= c_stmts_opt come_froms
|
||||
jf_cfs ::= JUMP_FORWARD _come_froms
|
||||
ifelsestmt ::= testexpr c_stmts_opt jf_cfs else_suite opt_come_from_except
|
||||
|
||||
jmp_false37 ::= POP_JUMP_IF_FALSE COME_FROM
|
||||
list_if ::= expr jmp_false37 list_iter
|
||||
|
||||
_ifstmts_jump ::= c_stmts_opt come_froms
|
||||
|
||||
and_not ::= expr jmp_false expr POP_JUMP_IF_TRUE
|
||||
|
||||
expr ::= if_exp_37a
|
||||
expr ::= if_exp_37b
|
||||
if_exp_37a ::= and_not expr JUMP_FORWARD COME_FROM expr COME_FROM
|
||||
if_exp_37b ::= expr jmp_false expr POP_JUMP_IF_FALSE jump_forward_else expr
|
||||
"""
|
||||
|
||||
def customize_grammar_rules(self, tokens, customize):
|
||||
|
@@ -287,6 +287,8 @@ class Scanner2(Scanner):
|
||||
op_name = 'LOAD_DICTCOMP'
|
||||
elif const.co_name == '<setcomp>':
|
||||
op_name = 'LOAD_SETCOMP'
|
||||
else:
|
||||
op_name = "LOAD_CODE"
|
||||
# verify() uses 'pattr' for comparison, since 'attr'
|
||||
# now holds Code(const) and thus can not be used
|
||||
# for comparison (todo: think about changing this)
|
||||
|
@@ -173,6 +173,8 @@ class Scanner26(scan.Scanner2):
|
||||
op_name = 'LOAD_DICTCOMP'
|
||||
elif const.co_name == '<setcomp>':
|
||||
op_name = 'LOAD_SETCOMP'
|
||||
else:
|
||||
op_name = "LOAD_CODE"
|
||||
# verify uses 'pattr' for comparison, since 'attr'
|
||||
# now holds Code(const) and thus can not be used
|
||||
# for comparison (todo: think about changing this)
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -31,6 +31,8 @@ class Scanner36(Scanner3):
|
||||
t.op == self.opc.CALL_FUNCTION_EX and t.attr & 1):
|
||||
t.kind = 'CALL_FUNCTION_EX_KW'
|
||||
pass
|
||||
elif t.op == self.opc.BUILD_STRING:
|
||||
t.kind = 'BUILD_STRING_%s' % t.attr
|
||||
elif t.op == self.opc.CALL_FUNCTION_KW:
|
||||
t.kind = 'CALL_FUNCTION_KW_%s' % t.attr
|
||||
elif t.op == self.opc.FORMAT_VALUE:
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2016-2018 by Rocky Bernstein
|
||||
# Copyright (c) 2016-2019 by Rocky Bernstein
|
||||
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
|
||||
# Copyright (c) 1999 John Aycock
|
||||
#
|
||||
@@ -22,18 +22,29 @@ if PYTHON3:
|
||||
intern = sys.intern
|
||||
|
||||
class Token: # Python 2.4 can't have empty ()
|
||||
|
||||
"""
|
||||
Class representing a byte-code instruction.
|
||||
|
||||
A byte-code token is equivalent to Python 3's dis.instruction or
|
||||
the contents of one line as output by dis.dis().
|
||||
"""
|
||||
|
||||
# FIXME: match Python 3.4's terms:
|
||||
# linestart = starts_line
|
||||
# attr = argval
|
||||
# pattr = argrepr
|
||||
def __init__(self, opname, attr=None, pattr=None, offset=-1,
|
||||
linestart=None, op=None, has_arg=None, opc=None):
|
||||
def __init__(
|
||||
self,
|
||||
opname,
|
||||
attr=None,
|
||||
pattr=None,
|
||||
offset=-1,
|
||||
linestart=None,
|
||||
op=None,
|
||||
has_arg=None,
|
||||
opc=None,
|
||||
):
|
||||
self.kind = intern(opname)
|
||||
self.has_arg = has_arg
|
||||
self.attr = attr
|
||||
@@ -46,6 +57,7 @@ class Token: # Python 2.4 can't have empty ()
|
||||
|
||||
if opc is None:
|
||||
from xdis.std import _std_api
|
||||
|
||||
self.opc = _std_api.opc
|
||||
else:
|
||||
self.opc = opc
|
||||
@@ -58,7 +70,9 @@ class Token: # Python 2.4 can't have empty ()
|
||||
""" '==' on kind and "pattr" attributes.
|
||||
It is okay if offsets and linestarts are different"""
|
||||
if isinstance(o, Token):
|
||||
return (self.kind == o.kind) and (self.pattr == o.pattr)
|
||||
return (self.kind == o.kind) and (
|
||||
(self.pattr == o.pattr) or self.attr == o.attr
|
||||
)
|
||||
else:
|
||||
# ?? do we need this?
|
||||
return self.kind == o
|
||||
@@ -77,47 +91,69 @@ class Token: # Python 2.4 can't have empty ()
|
||||
# ('%9s %-18s %r' % (self.offset, self.kind, pattr)))
|
||||
|
||||
def __str__(self):
|
||||
return self.format(line_prefix='')
|
||||
return self.format(line_prefix="")
|
||||
|
||||
def format(self, line_prefix=''):
|
||||
def format(self, line_prefix=""):
|
||||
if self.linestart:
|
||||
prefix = '\n%s%4d ' % (line_prefix, self.linestart)
|
||||
prefix = "\n%s%4d " % (line_prefix, self.linestart)
|
||||
else:
|
||||
prefix = ' ' * (6 + len(line_prefix))
|
||||
offset_opname = '%6s %-17s' % (self.offset, self.kind)
|
||||
prefix = (" " * (6 + len(line_prefix)))
|
||||
offset_opname = "%6s %-17s" % (self.offset, self.kind)
|
||||
|
||||
if not self.has_arg:
|
||||
return "%s%s" % (prefix, offset_opname)
|
||||
|
||||
if isinstance(self.attr, int):
|
||||
argstr = "%6d " % self.attr
|
||||
else:
|
||||
argstr = ' '*7
|
||||
argstr = (" " * 7)
|
||||
name = self.kind
|
||||
|
||||
if self.has_arg:
|
||||
pattr = self.pattr
|
||||
if self.opc:
|
||||
if self.op in self.opc.JREL_OPS:
|
||||
if not self.pattr.startswith('to '):
|
||||
if not self.pattr.startswith("to "):
|
||||
pattr = "to " + self.pattr
|
||||
elif self.op in self.opc.JABS_OPS:
|
||||
self.pattr = str(self.pattr)
|
||||
if not self.pattr.startswith('to '):
|
||||
if not self.pattr.startswith("to "):
|
||||
pattr = "to " + str(self.pattr)
|
||||
pass
|
||||
elif self.op in self.opc.CONST_OPS:
|
||||
# Compare with pysource n_LOAD_CONST
|
||||
attr = self.attr
|
||||
if attr is None:
|
||||
pattr = None
|
||||
if name == "LOAD_STR":
|
||||
pattr = self.attr
|
||||
elif name == "LOAD_CODE":
|
||||
return "%s%s%s %s" % (prefix, offset_opname, argstr, pattr)
|
||||
else:
|
||||
return "%s%s %r" % (prefix, offset_opname, pattr)
|
||||
|
||||
elif self.op in self.opc.hascompare:
|
||||
if isinstance(self.attr, int):
|
||||
pattr = self.opc.cmp_op[self.attr]
|
||||
return "%s%s%s %s" % (prefix, offset_opname, argstr, pattr)
|
||||
elif self.op in self.opc.hasvargs:
|
||||
return "%s%s%s" % (prefix, offset_opname, argstr)
|
||||
elif name == 'LOAD_ASSERT':
|
||||
return "%s%s %s" % (prefix, offset_opname, pattr)
|
||||
elif self.op in self.opc.NAME_OPS:
|
||||
if self.opc.version >= 3.0:
|
||||
return "%s%s%s %s" % (prefix, offset_opname, argstr, self.attr)
|
||||
elif name == "EXTENDED_ARG":
|
||||
return "%s%s%s 0x%x << %s = %s" % (
|
||||
prefix,
|
||||
offset_opname,
|
||||
argstr,
|
||||
self.attr,
|
||||
self.opc.EXTENDED_ARG_SHIFT,
|
||||
pattr,
|
||||
)
|
||||
# And so on. See xdis/bytecode.py get_instructions_bytes
|
||||
pass
|
||||
elif re.search(r'_\d+$', self.kind):
|
||||
return "%s%s%s" % (prefix, offset_opname, argstr)
|
||||
elif re.search(r"_\d+$", self.kind):
|
||||
return "%s%s%s" % (prefix, offset_opname, argstr)
|
||||
else:
|
||||
pattr = ''
|
||||
return "%s%s%s %r" % (prefix, offset_opname, argstr, pattr)
|
||||
pattr = ""
|
||||
return "%s%s%s %r" % (prefix, offset_opname, argstr, pattr)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.kind)
|
||||
@@ -125,4 +161,5 @@ class Token: # Python 2.4 can't have empty ()
|
||||
def __getitem__(self, i):
|
||||
raise IndexError
|
||||
|
||||
NoneToken = Token('LOAD_CONST', offset=-1, attr=None, pattr=None)
|
||||
|
||||
NoneToken = Token("LOAD_CONST", offset=-1, attr=None, pattr=None)
|
||||
|
@@ -36,7 +36,6 @@ class AligningWalker(SourceWalker, object):
|
||||
self.pending_newlines = max(self.pending_newlines, 1)
|
||||
|
||||
def write(self, *data):
|
||||
from trepan.api import debug; debug()
|
||||
if (len(data) == 1) and data[0] == self.indent:
|
||||
diff = max(self.pending_newlines,
|
||||
self.desired_line_number - self.current_line_number)
|
||||
|
@@ -27,75 +27,85 @@ else:
|
||||
maxint = sys.maxint
|
||||
|
||||
|
||||
# Operator precidence
|
||||
# See https://docs.python.org/2/reference/expressions.html
|
||||
# or https://docs.python.org/3/reference/expressions.html
|
||||
# for a list.
|
||||
# Operator precidence See
|
||||
# https://docs.python.org/2/reference/expressions.html#operator-precedence
|
||||
# or
|
||||
# https://docs.python.org/3/reference/expressions.html#operator-precedence
|
||||
# for a list. We keep the same top-to-botom order here as in the above links,
|
||||
# so we start with low precedence (high values) and go down in value.
|
||||
|
||||
# Things at the top of this list below with low-value precidence will
|
||||
# tend to have parenthesis around them. Things at the bottom
|
||||
# Things at the bottom of this list below with high precedence (low value) will
|
||||
# tend to have parenthesis around them. Things at the top
|
||||
# of the list will tend not to have parenthesis around them.
|
||||
PRECEDENCE = {
|
||||
'list': 0,
|
||||
'dict': 0,
|
||||
'unary_convert': 0,
|
||||
'dict_comp': 0,
|
||||
'set_comp': 0,
|
||||
'set_comp_expr': 0,
|
||||
'list_comp': 0,
|
||||
'generator_exp': 0,
|
||||
|
||||
'attribute': 2,
|
||||
'subscript': 2,
|
||||
'subscript2': 2,
|
||||
'store_subscript': 2,
|
||||
'delete_subscr': 2,
|
||||
# Note: The values in this table are even numbers. Inside
|
||||
# various templates we use odd values. Avoiding equal-precedent comparisons
|
||||
# avoids ambiguity what to do when the precedence is equal.
|
||||
|
||||
|
||||
PRECEDENCE = {
|
||||
'yield': 102,
|
||||
'yield_from': 102,
|
||||
|
||||
'_mklambda': 30,
|
||||
|
||||
'conditional': 28, # Conditional expression
|
||||
'conditional_lamdba': 28, # Lambda expression
|
||||
'conditional_not_lamdba': 28, # Lambda expression
|
||||
'conditionalnot': 28,
|
||||
'if_expr_true': 28,
|
||||
'ret_cond': 28,
|
||||
|
||||
'or': 26, # Boolean OR
|
||||
'ret_or': 26,
|
||||
|
||||
'and': 24, # Boolean AND
|
||||
'compare': 20, # in, not in, is, is not, <, <=, >, >=, !=, ==
|
||||
'ret_and': 24,
|
||||
'unary_not': 22, # Boolean NOT
|
||||
|
||||
'BINARY_AND': 14, # Bitwise AND
|
||||
'BINARY_OR': 18, # Bitwise OR
|
||||
'BINARY_XOR': 16, # Bitwise XOR
|
||||
|
||||
'BINARY_LSHIFT': 12, # Shifts <<
|
||||
'BINARY_RSHIFT': 12, # Shifts >>
|
||||
|
||||
'BINARY_ADD': 10, # -
|
||||
'BINARY_SUBTRACT': 10, # +
|
||||
|
||||
'BINARY_DIVIDE': 8, # /
|
||||
'BINARY_FLOOR_DIVIDE': 8, # //
|
||||
'BINARY_MATRIX_MULTIPLY': 8, # @
|
||||
'BINARY_MODULO': 8, # Remainder, %
|
||||
'BINARY_MULTIPLY': 8, # *
|
||||
'BINARY_TRUE_DIVIDE': 8, # Division /
|
||||
|
||||
'unary_expr': 6, # +x, -x, ~x
|
||||
|
||||
'BINARY_POWER': 4, # Exponentiation, *
|
||||
|
||||
'attribute': 2, # x.attribute
|
||||
'buildslice2': 2, # x[index]
|
||||
'buildslice3': 2, # x[index:index]
|
||||
'call': 2, # x(arguments...)
|
||||
'delete_subscript': 2,
|
||||
'slice0': 2,
|
||||
'slice1': 2,
|
||||
'slice2': 2,
|
||||
'slice3': 2,
|
||||
'buildslice2': 2,
|
||||
'buildslice3': 2,
|
||||
'call': 2,
|
||||
'store_subscript': 2,
|
||||
'subscript': 2,
|
||||
'subscript2': 2,
|
||||
|
||||
'BINARY_POWER': 4,
|
||||
|
||||
'unary_expr': 6,
|
||||
|
||||
'BINARY_MULTIPLY': 8,
|
||||
'BINARY_DIVIDE': 8,
|
||||
'BINARY_TRUE_DIVIDE': 8,
|
||||
'BINARY_FLOOR_DIVIDE': 8,
|
||||
'BINARY_MODULO': 8,
|
||||
|
||||
'BINARY_ADD': 10,
|
||||
'BINARY_SUBTRACT': 10,
|
||||
|
||||
'BINARY_LSHIFT': 12,
|
||||
'BINARY_RSHIFT': 12,
|
||||
|
||||
'BINARY_AND': 14,
|
||||
'BINARY_XOR': 16,
|
||||
'BINARY_OR': 18,
|
||||
|
||||
'compare': 20,
|
||||
'unary_not': 22,
|
||||
'and': 24,
|
||||
'ret_and': 24,
|
||||
|
||||
'or': 26,
|
||||
'ret_or': 26,
|
||||
|
||||
'conditional': 28,
|
||||
'conditional_lamdba': 28,
|
||||
'conditional_not_lamdba': 28,
|
||||
'conditionalnot': 28,
|
||||
'ret_cond': 28,
|
||||
|
||||
'_mklambda': 30,
|
||||
|
||||
'yield': 101,
|
||||
'yield_from': 101
|
||||
'dict': 0, # {expressions...}
|
||||
'dict_comp': 0,
|
||||
'generator_exp': 0, # (expressions...)
|
||||
'list': 0, # [expressions...]
|
||||
'list_comp': 0,
|
||||
'set_comp': 0,
|
||||
'set_comp_expr': 0,
|
||||
'unary_convert': 0,
|
||||
}
|
||||
|
||||
LINE_LENGTH = 80
|
||||
@@ -118,10 +128,10 @@ PASS = SyntaxTree('stmts',
|
||||
[ SyntaxTree('stmt',
|
||||
[ SyntaxTree('pass', [])])])])
|
||||
|
||||
ASSIGN_DOC_STRING = lambda doc_string: \
|
||||
ASSIGN_DOC_STRING = lambda doc_string, doc_load: \
|
||||
SyntaxTree('stmt',
|
||||
[ SyntaxTree('assign',
|
||||
[ SyntaxTree('expr', [ Token('LOAD_CONST', pattr=doc_string) ]),
|
||||
[ SyntaxTree('expr', [ Token(doc_load, pattr=doc_string, attr=doc_string) ]),
|
||||
SyntaxTree('store', [ Token('STORE_NAME', pattr='__doc__')])
|
||||
])])
|
||||
|
||||
@@ -210,9 +220,10 @@ TABLE_DIRECT = {
|
||||
|
||||
'IMPORT_FROM': ( '%{pattr}', ),
|
||||
'attribute': ( '%c.%[1]{pattr}',
|
||||
(0, 'expr')),
|
||||
'LOAD_FAST': ( '%{pattr}', ),
|
||||
'LOAD_NAME': ( '%{pattr}', ),
|
||||
(0, 'expr')),
|
||||
'LOAD_STR': ( '%{pattr}', ),
|
||||
'LOAD_FAST': ( '%{pattr}', ),
|
||||
'LOAD_NAME': ( '%{pattr}', ),
|
||||
'LOAD_CLASSNAME': ( '%{pattr}', ),
|
||||
'LOAD_GLOBAL': ( '%{pattr}', ),
|
||||
'LOAD_DEREF': ( '%{pattr}', ),
|
||||
@@ -221,7 +232,7 @@ TABLE_DIRECT = {
|
||||
'DELETE_FAST': ( '%|del %{pattr}\n', ),
|
||||
'DELETE_NAME': ( '%|del %{pattr}\n', ),
|
||||
'DELETE_GLOBAL': ( '%|del %{pattr}\n', ),
|
||||
'delete_subscr': ( '%|del %p[%c]\n',
|
||||
'delete_subscript': ( '%|del %p[%c]\n',
|
||||
(0, 'expr', PRECEDENCE['subscript']), (1, 'expr') ),
|
||||
'subscript': ( '%p[%c]',
|
||||
(0, 'expr', PRECEDENCE['subscript']),
|
||||
@@ -252,10 +263,11 @@ TABLE_DIRECT = {
|
||||
|
||||
'list_iter': ( '%c', 0 ),
|
||||
'list_for': ( ' for %c in %c%c', 2, 0, 3 ),
|
||||
'list_if': ( ' if %c%c', 0, 2 ),
|
||||
'list_if_not': ( ' if not %p%c',
|
||||
(0, 'expr', PRECEDENCE['unary_not']),
|
||||
2 ),
|
||||
'list_if': ( ' if %p%c',
|
||||
(0, 'expr', 27), 2 ),
|
||||
'list_if_not': ( ' if not %p%c',
|
||||
(0, 'expr', PRECEDENCE['unary_not']),
|
||||
2 ),
|
||||
'lc_body': ( '', ), # ignore when recursing
|
||||
|
||||
'comp_iter': ( '%c', 0 ),
|
||||
@@ -281,19 +293,19 @@ TABLE_DIRECT = {
|
||||
'and2': ( '%c', 3 ),
|
||||
'or': ( '%c or %c', 0, 2 ),
|
||||
'ret_or': ( '%c or %c', 0, 2 ),
|
||||
'conditional': ( '%p if %p else %p', (2, 27), (0, 27), (4, 27) ),
|
||||
'conditional_true': ( '%p if 1 else %p', (0, 27), (2, 27) ),
|
||||
'conditional': ( '%p if %c else %c',
|
||||
(2, 'expr', 27), 0, 4 ),
|
||||
'if_expr_lambda': ( '%p if %c else %c',
|
||||
(2, 'expr', 27), (0, 'expr'), 4 ),
|
||||
'if_expr_true': ( '%p if 1 else %c', (0, 'expr', 27), 2 ),
|
||||
'ret_cond': ( '%p if %p else %p', (2, 27), (0, 27), (-1, 27) ),
|
||||
'conditional_not': ( '%p if not %p else %p',
|
||||
(2, 27),
|
||||
(0, "expr", PRECEDENCE['unary_not']),
|
||||
(4, 27) ),
|
||||
'conditional_lambda':
|
||||
( '%c if %c else %c',
|
||||
(2, 'expr'), 0, 4 ),
|
||||
'conditional_not_lambda':
|
||||
( '%c if not %c else %c',
|
||||
(2, 'expr'), 0, 4 ),
|
||||
( '%p if not %c else %c',
|
||||
(2, 'expr', 27), 0, 4 ),
|
||||
|
||||
'compare_single': ( '%p %[-1]{pattr.replace("-", " ")} %p', (0, 19), (1, 19) ),
|
||||
'compare_chained': ( '%p %p', (0, 29), (1, 30)),
|
||||
@@ -306,7 +318,7 @@ TABLE_DIRECT = {
|
||||
'mkfuncdeco0': ( '%|def %c\n', 0),
|
||||
'classdefdeco': ( '\n\n%c', 0),
|
||||
'classdefdeco1': ( '%|@%c\n%c', 0, 1),
|
||||
'kwarg': ( '%[0]{pattr}=%c', 1),
|
||||
'kwarg': ( '%[0]{pattr}=%c', 1), # Change when Python 2 does LOAD_STR
|
||||
'kwargs': ( '%D', (0, maxint, ', ') ),
|
||||
'kwargs1': ( '%D', (0, maxint, ', ') ),
|
||||
|
||||
@@ -338,23 +350,34 @@ TABLE_DIRECT = {
|
||||
'testtrue': ( 'not %p',
|
||||
(0, PRECEDENCE['unary_not']) ),
|
||||
|
||||
# Generally the args here are 0: (some sort of) "testexpr",
|
||||
# 1: (some sort of) "cstmts_opt",
|
||||
# 2 or 3: "else_suite"
|
||||
# But unfortunately there are irregularities, For example, 2.6- uses "testexpr_then"
|
||||
# and sometimes "cstmts" instead of "cstmts_opt" happens.
|
||||
# Down the line we might isolate these into version-specific rules.
|
||||
'ifelsestmt': ( '%|if %c:\n%+%c%-%|else:\n%+%c%-', 0, 1, 3 ),
|
||||
'ifelsestmtc': ( '%|if %c:\n%+%c%-%|else:\n%+%c%-', 0, 1, 3 ),
|
||||
'ifelsestmtl': ( '%|if %c:\n%+%c%-%|else:\n%+%c%-', 0, 1, 3 ),
|
||||
'ifelsestmtr': ( '%|if %c:\n%+%c%-%|else:\n%+%c%-', 0, 1, 2 ),
|
||||
'ifelsestmtr2': ( '%|if %c:\n%+%c%-%|else:\n%+%c%-\n\n', 0, 1, 3 ), # has COME_FROM in position 2
|
||||
|
||||
|
||||
# "elif" forms are not generated by the parser but are created through tree
|
||||
# transformations. See "n_ifelsestmt".
|
||||
'ifelifstmt': ( '%|if %c:\n%+%c%-%c', 0, 1, 3 ),
|
||||
'elifelifstmt': ( '%|elif %c:\n%+%c%-%c', 0, 1, 3 ),
|
||||
'elifstmt': ( '%|elif %c:\n%+%c%-', 0, 1 ),
|
||||
'elifelsestmt': ( '%|elif %c:\n%+%c%-%|else:\n%+%c%-', 0, 1, 3 ),
|
||||
'ifelsestmtr': ( '%|if %c:\n%+%c%-%|else:\n%+%c%-', 0, 1, 2 ),
|
||||
'ifelsestmtr2': ( '%|if %c:\n%+%c%-%|else:\n%+%c%-\n\n', 0, 1, 3 ), # has COME_FROM
|
||||
'elifelsestmtr': ( '%|elif %c:\n%+%c%-%|else:\n%+%c%-\n\n', 0, 1, 2 ),
|
||||
'elifelsestmtr2': ( '%|elif %c:\n%+%c%-%|else:\n%+%c%-\n\n', 0, 1, 3 ), # has COME_FROM
|
||||
'elifelsestmtr2': ( '%|elif %c:\n%+%c%-%|else:\n%+%c%-\n\n', 0, 1, 3 ), # has COME_FROM in position 2
|
||||
|
||||
'whileTruestmt': ( '%|while True:\n%+%c%-\n\n', 1 ),
|
||||
'whilestmt': ( '%|while %c:\n%+%c%-\n\n', 1, 2 ),
|
||||
'while1stmt': ( '%|while 1:\n%+%c%-\n\n', 1 ),
|
||||
'while1elsestmt': ( '%|while 1:\n%+%c%-%|else:\n%+%c%-\n\n', 1, -2 ),
|
||||
'whileelsestmt': ( '%|while %c:\n%+%c%-%|else:\n%+%c%-\n\n', 1, 2, -2 ),
|
||||
'whileelsestmt2': ( '%|while %c:\n%+%c%-%|else:\n%+%c%-\n\n', 1, 2, -3 ),
|
||||
'whileelselaststmt': ( '%|while %c:\n%+%c%-%|else:\n%+%c%-', 1, 2, -2 ),
|
||||
|
||||
# Note: Python 3.8+ changes this
|
||||
@@ -386,7 +409,9 @@ TABLE_DIRECT = {
|
||||
'tf_tryelsestmt': ( '%c%-%c%|else:\n%+%c', 1, 3, 4 ),
|
||||
'tryfinallystmt': ( '%|try:\n%+%c%-%|finally:\n%+%c%-\n\n', 1, 5 ),
|
||||
'except': ( '%|except:\n%+%c%-', 3 ),
|
||||
'except_cond1': ( '%|except %c:\n', 1 ),
|
||||
'except_cond1': ( '%|except %c:\n', (1, 'expr') ),
|
||||
'except_cond2': ( '%|except %c as %c:\n',
|
||||
(1, 'expr'), (5, 'store') ),
|
||||
'except_suite': ( '%+%c%-%C', 0, (1, maxint, '') ),
|
||||
|
||||
# In Python 3.6, this is more complicated in the presence of "returns"
|
||||
|
@@ -49,11 +49,6 @@ def customize_for_version(self, is_pypy, version):
|
||||
5, 6, 7, 0, 1, 2 ),
|
||||
})
|
||||
if version >= 3.0:
|
||||
TABLE_DIRECT.update({
|
||||
# Gotta love Python for its futzing around with syntax like this
|
||||
'raise_stmt2': ( '%|raise %c from %c\n', 0, 1),
|
||||
})
|
||||
|
||||
if version >= 3.2:
|
||||
TABLE_DIRECT.update({
|
||||
'del_deref_stmt': ( '%|del %c\n', 0),
|
||||
@@ -62,6 +57,10 @@ def customize_for_version(self, is_pypy, version):
|
||||
from uncompyle6.semantics.customize3 import customize_for_version3
|
||||
customize_for_version3(self, version)
|
||||
else: # < 3.0
|
||||
TABLE_DIRECT.update({
|
||||
'except_cond3' : ( '%|except %c, %c:\n',
|
||||
(1, 'expr'), (-2, 'store') )
|
||||
})
|
||||
if 2.4 <= version <= 2.6:
|
||||
TABLE_DIRECT.update({
|
||||
'comp_for': ( ' for %c in %c', 3, 1 ),
|
||||
|
@@ -26,8 +26,6 @@ def customize_for_version25(self, version):
|
||||
# Import style for 2.5+
|
||||
########################
|
||||
TABLE_DIRECT.update({
|
||||
'except_cond3' : ( '%|except %c, %c:\n',
|
||||
(1, 'expr'), (-2, 'store') ),
|
||||
'importmultiple': ( '%|import %c%c\n', 2, 3 ),
|
||||
'import_cont' : ( ', %c', 2 ),
|
||||
# With/as is allowed as "from future" thing in 2.5
|
||||
|
@@ -31,9 +31,31 @@ def customize_for_version26_27(self, version):
|
||||
if version > 2.6:
|
||||
TABLE_DIRECT.update({
|
||||
'except_cond2': ( '%|except %c as %c:\n', 1, 5 ),
|
||||
# When a generator is a single parameter of a function,
|
||||
# it doesn't need the surrounding parenethesis.
|
||||
'call_generator': ('%c%P', 0, (1, -1, ', ', 100)),
|
||||
})
|
||||
else:
|
||||
TABLE_DIRECT.update({
|
||||
'testtrue_then': ( 'not %p', (0, 22) ),
|
||||
|
||||
})
|
||||
|
||||
def n_call(node):
|
||||
mapping = self._get_mapping(node)
|
||||
key = node
|
||||
for i in mapping[1:]:
|
||||
key = key[i]
|
||||
pass
|
||||
if key.kind == 'CALL_FUNCTION_1':
|
||||
# A function with one argument. If this is a generator,
|
||||
# no parenthesis is needed.
|
||||
args_node = node[-2]
|
||||
if args_node == 'expr':
|
||||
n = args_node[0]
|
||||
if n == 'generator_exp':
|
||||
node.kind = 'call_generator'
|
||||
pass
|
||||
pass
|
||||
|
||||
self.default(node)
|
||||
self.n_call = n_call
|
||||
|
@@ -19,26 +19,38 @@
|
||||
from uncompyle6.semantics.consts import TABLE_DIRECT
|
||||
|
||||
from xdis.code import iscode
|
||||
from uncompyle6.semantics.helper import gen_function_parens_adjust
|
||||
from uncompyle6.semantics.make_function import make_function3_annotate
|
||||
from uncompyle6.semantics.customize35 import customize_for_version35
|
||||
from uncompyle6.semantics.customize36 import customize_for_version36
|
||||
from uncompyle6.semantics.customize37 import customize_for_version37
|
||||
from uncompyle6.semantics.customize38 import customize_for_version38
|
||||
|
||||
|
||||
def customize_for_version3(self, version):
|
||||
TABLE_DIRECT.update({
|
||||
'comp_for' : ( ' for %c in %c',
|
||||
(2, 'store') , (0, 'expr') ),
|
||||
'conditionalnot' : ( '%c if not %c else %c',
|
||||
(2, 'expr') , (0, 'expr'), (4, 'expr') ),
|
||||
'except_cond2' : ( '%|except %c as %c:\n', 1, 5 ),
|
||||
'function_def_annotate': ( '\n\n%|def %c%c\n', -1, 0),
|
||||
'importmultiple' : ( '%|import %c%c\n', 2, 3 ),
|
||||
'import_cont' : ( ', %c', 2 ),
|
||||
'store_locals' : ( '%|# inspect.currentframe().f_locals = __locals__\n', ),
|
||||
'withstmt' : ( '%|with %c:\n%+%c%-', 0, 3),
|
||||
'withasstmt' : ( '%|with %c as (%c):\n%+%c%-', 0, 2, 3),
|
||||
})
|
||||
TABLE_DIRECT.update(
|
||||
{
|
||||
"comp_for": (" for %c in %c", (2, "store"), (0, "expr")),
|
||||
"conditionalnot": (
|
||||
"%c if not %c else %c",
|
||||
(2, "expr"),
|
||||
(0, "expr"),
|
||||
(4, "expr"),
|
||||
),
|
||||
"except_cond2": ("%|except %c as %c:\n", 1, 5),
|
||||
"function_def_annotate": ("\n\n%|def %c%c\n", -1, 0),
|
||||
# When a generator is a single parameter of a function,
|
||||
# it doesn't need the surrounding parenethesis.
|
||||
"call_generator": ("%c%P", 0, (1, -1, ", ", 100)),
|
||||
"importmultiple": ("%|import %c%c\n", 2, 3),
|
||||
"import_cont": (", %c", 2),
|
||||
"kwarg": ("%[0]{attr}=%c", 1),
|
||||
"raise_stmt2": ("%|raise %c from %c\n", 0, 1),
|
||||
"store_locals": ("%|# inspect.currentframe().f_locals = __locals__\n",),
|
||||
"withstmt": ("%|with %c:\n%+%c%-", 0, 3),
|
||||
"withasstmt": ("%|with %c as (%c):\n%+%c%-", 0, 2, 3),
|
||||
}
|
||||
)
|
||||
|
||||
assert version >= 3.0
|
||||
|
||||
@@ -53,102 +65,102 @@ def customize_for_version3(self, version):
|
||||
# ----------
|
||||
# * subclass_code - the code for the subclass body
|
||||
subclass_info = None
|
||||
if node == 'classdefdeco2':
|
||||
if node == "classdefdeco2":
|
||||
if self.version >= 3.6:
|
||||
class_name = node[1][1].pattr
|
||||
class_name = node[1][1].attr
|
||||
elif self.version <= 3.3:
|
||||
class_name = node[2][0].pattr
|
||||
class_name = node[2][0].attr
|
||||
else:
|
||||
class_name = node[1][2].pattr
|
||||
class_name = node[1][2].attr
|
||||
build_class = node
|
||||
else:
|
||||
build_class = node[0]
|
||||
if self.version >= 3.6:
|
||||
if build_class == 'build_class_kw':
|
||||
if build_class == "build_class_kw":
|
||||
mkfunc = build_class[1]
|
||||
assert mkfunc == 'mkfunc'
|
||||
assert mkfunc == "mkfunc"
|
||||
subclass_info = build_class
|
||||
if hasattr(mkfunc[0], 'attr') and iscode(mkfunc[0].attr):
|
||||
if hasattr(mkfunc[0], "attr") and iscode(mkfunc[0].attr):
|
||||
subclass_code = mkfunc[0].attr
|
||||
else:
|
||||
assert mkfunc[0] == 'load_closure'
|
||||
assert mkfunc[0] == "load_closure"
|
||||
subclass_code = mkfunc[1].attr
|
||||
assert iscode(subclass_code)
|
||||
if build_class[1][0] == 'load_closure':
|
||||
if build_class[1][0] == "load_closure":
|
||||
code_node = build_class[1][1]
|
||||
else:
|
||||
code_node = build_class[1][0]
|
||||
class_name = code_node.attr.co_name
|
||||
else:
|
||||
class_name = node[1][0].pattr
|
||||
class_name = node[1][0].attr
|
||||
build_class = node[0]
|
||||
|
||||
assert 'mkfunc' == build_class[1]
|
||||
assert "mkfunc" == build_class[1]
|
||||
mkfunc = build_class[1]
|
||||
if mkfunc[0] in ('kwargs', 'no_kwargs'):
|
||||
if mkfunc[0] in ("kwargs", "no_kwargs"):
|
||||
if 3.0 <= self.version <= 3.2:
|
||||
for n in mkfunc:
|
||||
if hasattr(n, 'attr') and iscode(n.attr):
|
||||
if hasattr(n, "attr") and iscode(n.attr):
|
||||
subclass_code = n.attr
|
||||
break
|
||||
elif n == 'expr':
|
||||
elif n == "expr":
|
||||
subclass_code = n[0].attr
|
||||
pass
|
||||
pass
|
||||
else:
|
||||
for n in mkfunc:
|
||||
if hasattr(n, 'attr') and iscode(n.attr):
|
||||
if hasattr(n, "attr") and iscode(n.attr):
|
||||
subclass_code = n.attr
|
||||
break
|
||||
pass
|
||||
pass
|
||||
if node == 'classdefdeco2':
|
||||
if node == "classdefdeco2":
|
||||
subclass_info = node
|
||||
else:
|
||||
subclass_info = node[0]
|
||||
elif build_class[1][0] == 'load_closure':
|
||||
elif build_class[1][0] == "load_closure":
|
||||
# Python 3 with closures not functions
|
||||
load_closure = build_class[1]
|
||||
if hasattr(load_closure[-3], 'attr'):
|
||||
if hasattr(load_closure[-3], "attr"):
|
||||
# Python 3.3 classes with closures work like this.
|
||||
# Note have to test before 3.2 case because
|
||||
# index -2 also has an attr.
|
||||
subclass_code = load_closure[-3].attr
|
||||
elif hasattr(load_closure[-2], 'attr'):
|
||||
elif hasattr(load_closure[-2], "attr"):
|
||||
# Python 3.2 works like this
|
||||
subclass_code = load_closure[-2].attr
|
||||
else:
|
||||
raise 'Internal Error n_classdef: cannot find class body'
|
||||
if hasattr(build_class[3], '__len__'):
|
||||
raise "Internal Error n_classdef: cannot find class body"
|
||||
if hasattr(build_class[3], "__len__"):
|
||||
if not subclass_info:
|
||||
subclass_info = build_class[3]
|
||||
elif hasattr(build_class[2], '__len__'):
|
||||
elif hasattr(build_class[2], "__len__"):
|
||||
subclass_info = build_class[2]
|
||||
else:
|
||||
raise 'Internal Error n_classdef: cannot superclass name'
|
||||
elif self.version >= 3.6 and node == 'classdefdeco2':
|
||||
raise "Internal Error n_classdef: cannot superclass name"
|
||||
elif self.version >= 3.6 and node == "classdefdeco2":
|
||||
subclass_info = node
|
||||
subclass_code = build_class[1][0].attr
|
||||
elif not subclass_info:
|
||||
if mkfunc[0] in ('no_kwargs', 'kwargs'):
|
||||
if mkfunc[0] in ("no_kwargs", "kwargs"):
|
||||
subclass_code = mkfunc[1].attr
|
||||
else:
|
||||
subclass_code = mkfunc[0].attr
|
||||
if node == 'classdefdeco2':
|
||||
if node == "classdefdeco2":
|
||||
subclass_info = node
|
||||
else:
|
||||
subclass_info = node[0]
|
||||
|
||||
if (node == 'classdefdeco2'):
|
||||
self.write('\n')
|
||||
if node == "classdefdeco2":
|
||||
self.write("\n")
|
||||
else:
|
||||
self.write('\n\n')
|
||||
self.write("\n\n")
|
||||
|
||||
self.currentclass = str(class_name)
|
||||
self.write(self.indent, 'class ', self.currentclass)
|
||||
self.write(self.indent, "class ", self.currentclass)
|
||||
|
||||
self.print_super_classes3(subclass_info)
|
||||
self.println(':')
|
||||
self.println(":")
|
||||
|
||||
# class body
|
||||
self.indent_more()
|
||||
@@ -157,11 +169,12 @@ def customize_for_version3(self, version):
|
||||
|
||||
self.currentclass = cclass
|
||||
if len(self.param_stack) > 1:
|
||||
self.write('\n\n')
|
||||
self.write("\n\n")
|
||||
else:
|
||||
self.write('\n\n\n')
|
||||
self.write("\n\n\n")
|
||||
|
||||
self.prune()
|
||||
|
||||
self.n_classdef3 = n_classdef3
|
||||
|
||||
if version == 3.0:
|
||||
@@ -170,42 +183,44 @@ def customize_for_version3(self, version):
|
||||
# since we pick up the iteration variable some other way and
|
||||
# we definitely don't include in the source _[dd].
|
||||
def n_comp_iter(node):
|
||||
if node[0] == 'expr':
|
||||
if node[0] == "expr":
|
||||
n = node[0][0]
|
||||
if (n == 'LOAD_FAST' and
|
||||
n.pattr[0:2] == '_['):
|
||||
if n == "LOAD_FAST" and n.pattr[0:2] == "_[":
|
||||
self.prune()
|
||||
pass
|
||||
pass
|
||||
# Not this special case, procede as normal...
|
||||
# Not this special case, proceed as normal...
|
||||
self.default(node)
|
||||
|
||||
self.n_comp_iter = n_comp_iter
|
||||
|
||||
if version >= 3.3:
|
||||
elif version == 3.3:
|
||||
# FIXME: perhaps this can be folded into the 3.4+ case?
|
||||
def n_yield_from(node):
|
||||
self.write('yield from')
|
||||
self.write(' ')
|
||||
if 3.3 <= self.version <= 3.4:
|
||||
self.preorder(node[0][0][0][0])
|
||||
elif self.version >= 3.5:
|
||||
self.preorder(node[0])
|
||||
else:
|
||||
assert False, "dunno about this python version"
|
||||
self.prune() # stop recursing
|
||||
assert node[0] == "expr"
|
||||
assert node[0][0] == "get_iter"
|
||||
# Skip over yield_from.expr.get_iter which adds an
|
||||
# extra iter(). Maybe we can do in tranformation phase instead?
|
||||
template = ("yield from %c", (0, "expr"))
|
||||
self.template_engine(template, node[0][0])
|
||||
self.prune()
|
||||
|
||||
self.n_yield_from = n_yield_from
|
||||
|
||||
if 3.2 <= version <= 3.4:
|
||||
|
||||
def n_call(node):
|
||||
|
||||
mapping = self._get_mapping(node)
|
||||
key = node
|
||||
for i in mapping[1:]:
|
||||
key = key[i]
|
||||
pass
|
||||
if key.kind.startswith('CALL_FUNCTION_VAR_KW'):
|
||||
if key.kind.startswith("CALL_FUNCTION_VAR_KW"):
|
||||
# We may want to fill this in...
|
||||
# But it is distinct from CALL_FUNCTION_VAR below
|
||||
pass
|
||||
elif key.kind.startswith('CALL_FUNCTION_VAR'):
|
||||
elif key.kind.startswith("CALL_FUNCTION_VAR"):
|
||||
# CALL_FUNCTION_VAR's top element of the stack contains
|
||||
# the variable argument list, then comes
|
||||
# annotation args, then keyword args.
|
||||
@@ -219,28 +234,52 @@ def customize_for_version3(self, version):
|
||||
# kwargs == 0 is handled by the table entry
|
||||
# Should probably handle it here though.
|
||||
if nargs == 0:
|
||||
template = ('%c(*%c, %C)',
|
||||
0, -2, (1, kwargs+1, ', '))
|
||||
template = ("%c(*%c, %C)", 0, -2, (1, kwargs + 1, ", "))
|
||||
else:
|
||||
template = ('%c(%C, *%c, %C)',
|
||||
0, (1, nargs+1, ', '),
|
||||
-2, (-2-kwargs, -2, ', '))
|
||||
template = (
|
||||
"%c(%C, *%c, %C)",
|
||||
0,
|
||||
(1, nargs + 1, ", "),
|
||||
-2,
|
||||
(-2 - kwargs, -2, ", "),
|
||||
)
|
||||
self.template_engine(template, node)
|
||||
self.prune()
|
||||
|
||||
else:
|
||||
gen_function_parens_adjust(key, node)
|
||||
self.default(node)
|
||||
self.n_call = n_call
|
||||
|
||||
self.n_call = n_call
|
||||
elif version < 3.2:
|
||||
|
||||
def n_call(node):
|
||||
mapping = self._get_mapping(node)
|
||||
key = node
|
||||
for i in mapping[1:]:
|
||||
key = key[i]
|
||||
pass
|
||||
gen_function_parens_adjust(key, node)
|
||||
self.default(node)
|
||||
|
||||
self.n_call = n_call
|
||||
|
||||
def n_mkfunc_annotate(node):
|
||||
|
||||
if self.version >= 3.3 or node[-2] == 'kwargs':
|
||||
# Handling EXTENDED_ARG before MAKE_FUNCTION ...
|
||||
if node[-2] == "EXTENDED_ARG":
|
||||
i = -1
|
||||
else:
|
||||
i = 0
|
||||
|
||||
if self.version <= 3.2:
|
||||
code = node[-2 + i]
|
||||
elif self.version >= 3.3 or node[-2] == "kwargs":
|
||||
# LOAD_CONST code object ..
|
||||
# LOAD_CONST 'x0' if >= 3.3
|
||||
# EXTENDED_ARG
|
||||
# MAKE_FUNCTION ..
|
||||
code = node[-4]
|
||||
elif node[-3] == 'expr':
|
||||
code = node[-3 + i]
|
||||
elif node[-3] == "expr":
|
||||
code = node[-3][0]
|
||||
else:
|
||||
# LOAD_CONST code object ..
|
||||
@@ -248,42 +287,51 @@ def customize_for_version3(self, version):
|
||||
code = node[-3]
|
||||
|
||||
self.indent_more()
|
||||
for annotate_last in range(len(node)-1, -1, -1):
|
||||
if node[annotate_last] == 'annotate_tuple':
|
||||
for annotate_last in range(len(node) - 1, -1, -1):
|
||||
if node[annotate_last] == "annotate_tuple":
|
||||
break
|
||||
|
||||
# FIXME: the real situation is that when derived from
|
||||
# function_def_annotate we the name has been filled in.
|
||||
# But when derived from funcdefdeco it hasn't Would like a better
|
||||
# way to distinquish.
|
||||
if self.f.getvalue()[-4:] == 'def ':
|
||||
if self.f.getvalue()[-4:] == "def ":
|
||||
self.write(code.attr.co_name)
|
||||
|
||||
# FIXME: handle and pass full annotate args
|
||||
make_function3_annotate(self, node, is_lambda=False,
|
||||
code_node=code, annotate_last=annotate_last)
|
||||
make_function3_annotate(
|
||||
self, node, is_lambda=False, code_node=code, annotate_last=annotate_last
|
||||
)
|
||||
|
||||
if len(self.param_stack) > 1:
|
||||
self.write('\n\n')
|
||||
self.write("\n\n")
|
||||
else:
|
||||
self.write('\n\n\n')
|
||||
self.write("\n\n\n")
|
||||
self.indent_less()
|
||||
self.prune() # stop recursing
|
||||
self.prune() # stop recursing
|
||||
|
||||
self.n_mkfunc_annotate = n_mkfunc_annotate
|
||||
|
||||
TABLE_DIRECT.update({
|
||||
'tryelsestmtl3': ( '%|try:\n%+%c%-%c%|else:\n%+%c%-',
|
||||
(1, 'suite_stmts_opt'),
|
||||
(3, 'except_handler'),
|
||||
(5, 'else_suitel') ),
|
||||
})
|
||||
TABLE_DIRECT.update(
|
||||
{
|
||||
"tryelsestmtl3": (
|
||||
"%|try:\n%+%c%-%c%|else:\n%+%c%-",
|
||||
(1, "suite_stmts_opt"),
|
||||
(3, "except_handler"),
|
||||
(5, "else_suitel"),
|
||||
)
|
||||
}
|
||||
)
|
||||
if version >= 3.4:
|
||||
#######################
|
||||
# Python 3.4+ Changes #
|
||||
#######################
|
||||
TABLE_DIRECT.update({
|
||||
'LOAD_CLASSDEREF': ( '%{pattr}', ),
|
||||
})
|
||||
TABLE_DIRECT.update(
|
||||
{
|
||||
"LOAD_CLASSDEREF": ("%{pattr}",),
|
||||
"yield_from": ("yield from %c", (0, "expr")),
|
||||
}
|
||||
)
|
||||
if version >= 3.5:
|
||||
customize_for_version35(self, version)
|
||||
if version >= 3.6:
|
||||
@@ -293,8 +341,8 @@ def customize_for_version3(self, version):
|
||||
if version >= 3.8:
|
||||
customize_for_version38(self, version)
|
||||
pass # version >= 3.8
|
||||
pass # 3.7
|
||||
pass # 3.6
|
||||
pass # 3.5
|
||||
pass # 3.4
|
||||
pass # 3.7
|
||||
pass # 3.6
|
||||
pass # 3.5
|
||||
pass # 3.4
|
||||
return
|
||||
|
@@ -19,7 +19,8 @@ from xdis.code import iscode
|
||||
from xdis.util import COMPILER_FLAG_BIT
|
||||
from uncompyle6.semantics.consts import (
|
||||
INDENT_PER_LEVEL, TABLE_DIRECT)
|
||||
from uncompyle6.semantics.helper import flatten_list
|
||||
from uncompyle6.semantics.helper import (
|
||||
flatten_list, gen_function_parens_adjust)
|
||||
|
||||
#######################
|
||||
# Python 3.5+ Changes #
|
||||
@@ -112,17 +113,21 @@ def customize_for_version35(self, version):
|
||||
template = ('*%c)', nargs+1)
|
||||
self.template_engine(template, node)
|
||||
self.prune()
|
||||
else:
|
||||
gen_function_parens_adjust(key, node)
|
||||
|
||||
self.default(node)
|
||||
self.n_call = n_call
|
||||
|
||||
def n_function_def(node):
|
||||
if self.version >= 3.6:
|
||||
code_node = node[0][0]
|
||||
else:
|
||||
code_node = node[0][1]
|
||||
n0 = node[0]
|
||||
is_code = False
|
||||
for i in list(range(len(n0)-2, -1, -1)):
|
||||
code_node = n0[i]
|
||||
if hasattr(code_node, 'attr') and iscode(code_node.attr):
|
||||
is_code = True
|
||||
break
|
||||
|
||||
is_code = hasattr(code_node, 'attr') and iscode(code_node.attr)
|
||||
if (is_code and
|
||||
(code_node.attr.co_flags & COMPILER_FLAG_BIT['COROUTINE'])):
|
||||
self.template_engine(('\n\n%|async def %c\n',
|
||||
|
@@ -17,7 +17,9 @@
|
||||
|
||||
from spark_parser.ast import GenericASTTraversalPruningException
|
||||
from uncompyle6.scanners.tok import Token
|
||||
from uncompyle6.semantics.helper import flatten_list
|
||||
from uncompyle6.semantics.helper import (
|
||||
flatten_list, escape_string, strip_quotes
|
||||
)
|
||||
from uncompyle6.semantics.consts import (
|
||||
INDENT_PER_LEVEL, PRECEDENCE, TABLE_DIRECT, TABLE_R)
|
||||
|
||||
@@ -31,28 +33,19 @@ def escape_format(s):
|
||||
#######################
|
||||
|
||||
def customize_for_version36(self, version):
|
||||
# Value 100 is important; it is exactly
|
||||
# module/function precidence.
|
||||
PRECEDENCE['call_kw'] = 100
|
||||
PRECEDENCE['call_kw36'] = 100
|
||||
PRECEDENCE['call_ex'] = 100
|
||||
PRECEDENCE['call_ex_kw'] = 100
|
||||
PRECEDENCE['call_ex_kw2'] = 100
|
||||
PRECEDENCE['call_ex_kw3'] = 100
|
||||
PRECEDENCE['call_ex_kw4'] = 100
|
||||
PRECEDENCE['call_kw'] = 0
|
||||
PRECEDENCE['call_kw36'] = 1
|
||||
PRECEDENCE['call_ex'] = 1
|
||||
PRECEDENCE['call_ex_kw'] = 1
|
||||
PRECEDENCE['call_ex_kw2'] = 1
|
||||
PRECEDENCE['call_ex_kw3'] = 1
|
||||
PRECEDENCE['call_ex_kw4'] = 1
|
||||
PRECEDENCE['unmap_dict'] = 0
|
||||
PRECEDENCE['formatted_value1'] = 100
|
||||
|
||||
TABLE_DIRECT.update({
|
||||
'tryfinally36': ( '%|try:\n%+%c%-%|finally:\n%+%c%-\n\n',
|
||||
(1, 'returns'), 3 ),
|
||||
'fstring_expr': ( "{%c%{conversion}}",
|
||||
(0, 'expr') ),
|
||||
# FIXME: the below assumes the format strings
|
||||
# don't have ''' in them. Fix this properly
|
||||
'fstring_single': ( "f'''{%c%{conversion}}'''", 0),
|
||||
'formatted_value_attr': ( "f'''{%c%{conversion}}%{string}'''",
|
||||
(0, 'expr')),
|
||||
'fstring_multi': ( "f'''%c'''", 0),
|
||||
'func_args36': ( "%c(**", 0),
|
||||
'try_except36': ( '%|try:\n%+%c%-%c\n\n', 1, -2 ),
|
||||
'except_return': ( '%|except:\n%+%c%-', 3 ),
|
||||
@@ -67,9 +60,15 @@ def customize_for_version36(self, version):
|
||||
'call_ex' : (
|
||||
'%c(%p)',
|
||||
(0, 'expr'), (1, 100)),
|
||||
'call_ex_kw' : (
|
||||
'%c(%p)',
|
||||
(0, 'expr'), (2, 100)),
|
||||
'store_annotation': (
|
||||
'%[1]{pattr}: %c',
|
||||
0
|
||||
),
|
||||
'ann_assign_init_value': (
|
||||
'%|%c = %p\n',
|
||||
(-1, 'store_annotation'), (0, 'expr', 200)),
|
||||
'ann_assign_no_init': (
|
||||
'%|%c\n', (0, 'store_annotation')),
|
||||
|
||||
})
|
||||
|
||||
@@ -80,20 +79,28 @@ def customize_for_version36(self, version):
|
||||
})
|
||||
|
||||
def build_unpack_tuple_with_call(node):
|
||||
|
||||
if node[0] == 'expr':
|
||||
tup = node[0][0]
|
||||
n = node[0]
|
||||
if n == 'expr':
|
||||
n = n[0]
|
||||
if n == 'tuple':
|
||||
self.call36_tuple(n)
|
||||
first = 1
|
||||
sep = ', *'
|
||||
elif n == 'LOAD_STR':
|
||||
value = self.format_pos_args(n)
|
||||
self.f.write(value)
|
||||
first = 1
|
||||
sep = ', *'
|
||||
else:
|
||||
tup = node[0]
|
||||
pass
|
||||
assert tup == 'tuple'
|
||||
self.call36_tuple(tup)
|
||||
first = 0
|
||||
sep = '*'
|
||||
|
||||
buwc = node[-1]
|
||||
assert buwc.kind.startswith('BUILD_TUPLE_UNPACK_WITH_CALL')
|
||||
for n in node[1:-1]:
|
||||
self.f.write(', *')
|
||||
for n in node[first:-1]:
|
||||
self.f.write(sep)
|
||||
self.preorder(n)
|
||||
sep = ', *'
|
||||
pass
|
||||
self.prune()
|
||||
return
|
||||
@@ -119,45 +126,41 @@ def customize_for_version36(self, version):
|
||||
return
|
||||
self.n_build_map_unpack_with_call = build_unpack_map_with_call
|
||||
|
||||
def call_ex_kw(node):
|
||||
"""Handle CALL_FUNCTION_EX 1 (have KW) but with
|
||||
BUILD_MAP_UNPACK_WITH_CALL"""
|
||||
|
||||
expr = node[1]
|
||||
assert expr == 'expr'
|
||||
|
||||
value = self.format_pos_args(expr)
|
||||
if value == '':
|
||||
fmt = "%c(%p)"
|
||||
else:
|
||||
fmt = "%%c(%s, %%p)" % value
|
||||
|
||||
self.template_engine(
|
||||
(fmt,
|
||||
(0, 'expr'), (2, 'build_map_unpack_with_call', 100)), node)
|
||||
|
||||
self.prune()
|
||||
self.n_call_ex_kw = call_ex_kw
|
||||
|
||||
def call_ex_kw2(node):
|
||||
"""Handle CALL_FUNCTION_EX 2 (have KW) but with
|
||||
BUILD_{MAP,TUPLE}_UNPACK_WITH_CALL"""
|
||||
|
||||
# This is weird shit. Thanks Python!
|
||||
self.preorder(node[0])
|
||||
self.write('(')
|
||||
|
||||
assert node[1] == 'build_tuple_unpack_with_call'
|
||||
btuwc = node[1]
|
||||
tup = btuwc[0]
|
||||
if tup == 'expr':
|
||||
tup = tup[0]
|
||||
|
||||
if tup == 'LOAD_CONST':
|
||||
self.write(', '.join(['"%s"' % t.replace('"','\\"') for t in tup.attr]))
|
||||
value = self.format_pos_args(node[1])
|
||||
if value == '':
|
||||
fmt = "%c(%p)"
|
||||
else:
|
||||
assert tup == 'tuple'
|
||||
self.call36_tuple(tup)
|
||||
fmt = "%%c(%s, %%p)" % value
|
||||
|
||||
assert node[2] == 'build_map_unpack_with_call'
|
||||
self.template_engine(
|
||||
(fmt,
|
||||
(0, 'expr'), (2, 'build_map_unpack_with_call', 100)), node)
|
||||
|
||||
self.write(', ')
|
||||
d = node[2][0]
|
||||
if d == 'expr':
|
||||
d = d[0]
|
||||
assert d == 'dict'
|
||||
self.call36_dict(d)
|
||||
|
||||
args = btuwc[1]
|
||||
self.write(', *')
|
||||
self.preorder(args)
|
||||
|
||||
self.write(', **')
|
||||
star_star_args = node[2][1]
|
||||
if star_star_args == 'expr':
|
||||
star_star_args = star_star_args[0]
|
||||
self.preorder(star_star_args)
|
||||
self.write(')')
|
||||
self.prune()
|
||||
self.n_call_ex_kw2 = call_ex_kw2
|
||||
|
||||
@@ -166,14 +169,13 @@ def customize_for_version36(self, version):
|
||||
BUILD_MAP_UNPACK_WITH_CALL"""
|
||||
self.preorder(node[0])
|
||||
self.write('(')
|
||||
args = node[1][0]
|
||||
if args == 'expr':
|
||||
args = args[0]
|
||||
if args == 'tuple':
|
||||
if self.call36_tuple(args) > 0:
|
||||
self.write(', ')
|
||||
pass
|
||||
|
||||
value = self.format_pos_args(node[1][0])
|
||||
if value == '':
|
||||
pass
|
||||
else:
|
||||
self.write(value)
|
||||
self.write(', ')
|
||||
|
||||
self.write('*')
|
||||
self.preorder(node[1][1])
|
||||
@@ -226,6 +228,25 @@ def customize_for_version36(self, version):
|
||||
self.prune()
|
||||
self.n_call_ex_kw4 = call_ex_kw4
|
||||
|
||||
def format_pos_args(node):
|
||||
"""
|
||||
Positional args should format to:
|
||||
(*(2, ), ...) -> (2, ...)
|
||||
We remove starting and trailing parenthesis and ', ' if
|
||||
tuple has only one element.
|
||||
"""
|
||||
value = self.traverse(node, indent='')
|
||||
if value.startswith('('):
|
||||
assert value.endswith(')')
|
||||
value = value[1:-1].rstrip(" ") # Remove starting '(' and trailing ')' and additional spaces
|
||||
if value == '':
|
||||
pass # args is empty
|
||||
else:
|
||||
if value.endswith(','): # if args has only one item
|
||||
value = value[:-1]
|
||||
return value
|
||||
self.format_pos_args = format_pos_args
|
||||
|
||||
def call36_tuple(node):
|
||||
"""
|
||||
A tuple used in a call, these are like normal tuples but they
|
||||
@@ -331,81 +352,8 @@ def customize_for_version36(self, version):
|
||||
return
|
||||
self.call36_dict = call36_dict
|
||||
|
||||
|
||||
FSTRING_CONVERSION_MAP = {1: '!s', 2: '!r', 3: '!a', 'X':':X'}
|
||||
|
||||
def n_except_suite_finalize(node):
|
||||
if node[1] == 'returns' and self.hide_internal:
|
||||
# Process node[1] only.
|
||||
# The code after "returns", e.g. node[3], is dead code.
|
||||
# Adding it is wrong as it dedents and another
|
||||
# exception handler "except_stmt" afterwards.
|
||||
# Note it is also possible that the grammar is wrong here.
|
||||
# and this should not be "except_stmt".
|
||||
self.indent_more()
|
||||
self.preorder(node[1])
|
||||
self.indent_less()
|
||||
else:
|
||||
self.default(node)
|
||||
self.prune()
|
||||
self.n_except_suite_finalize = n_except_suite_finalize
|
||||
|
||||
def n_formatted_value(node):
|
||||
if node[0] == 'LOAD_CONST':
|
||||
value = node[0].attr
|
||||
if isinstance(value, tuple):
|
||||
self.write(node[0].attr)
|
||||
else:
|
||||
self.write(escape_format(node[0].attr))
|
||||
self.prune()
|
||||
else:
|
||||
self.default(node)
|
||||
self.n_formatted_value = n_formatted_value
|
||||
|
||||
def f_conversion(node):
|
||||
fmt_node = node.data[1]
|
||||
if fmt_node == 'expr' and fmt_node[0] == 'LOAD_CONST':
|
||||
data = fmt_node[0].attr
|
||||
else:
|
||||
data = fmt_node.attr
|
||||
node.conversion = FSTRING_CONVERSION_MAP.get(data, '')
|
||||
|
||||
def fstring_expr(node):
|
||||
f_conversion(node)
|
||||
self.default(node)
|
||||
self.n_fstring_expr = fstring_expr
|
||||
|
||||
def fstring_single(node):
|
||||
f_conversion(node)
|
||||
self.default(node)
|
||||
self.n_fstring_single = fstring_single
|
||||
|
||||
def formatted_value_attr(node):
|
||||
f_conversion(node)
|
||||
fmt_node = node.data[3]
|
||||
if fmt_node == 'expr' and fmt_node[0] == 'LOAD_CONST':
|
||||
node.string = escape_format(fmt_node[0].attr)
|
||||
else:
|
||||
node.string = fmt_node
|
||||
|
||||
self.default(node)
|
||||
self.n_formatted_value_attr = formatted_value_attr
|
||||
|
||||
# def kwargs_only_36(node):
|
||||
# keys = node[-1].attr
|
||||
# num_kwargs = len(keys)
|
||||
# values = node[:num_kwargs]
|
||||
# for i, (key, value) in enumerate(zip(keys, values)):
|
||||
# self.write(key + '=')
|
||||
# self.preorder(value)
|
||||
# if i < num_kwargs:
|
||||
# self.write(',')
|
||||
# self.prune()
|
||||
# return
|
||||
# self.n_kwargs_only_36 = kwargs_only_36
|
||||
|
||||
def n_call_kw36(node):
|
||||
self.template_engine(("%c(", 0), node)
|
||||
self.template_engine(("%p(", (0, 100)), node)
|
||||
keys = node[-2].attr
|
||||
num_kwargs = len(keys)
|
||||
num_posargs = len(node) - (num_kwargs + 2)
|
||||
@@ -442,6 +390,137 @@ def customize_for_version36(self, version):
|
||||
return
|
||||
self.n_call_kw36 = n_call_kw36
|
||||
|
||||
|
||||
FSTRING_CONVERSION_MAP = {1: '!s', 2: '!r', 3: '!a', 'X':':X'}
|
||||
|
||||
def n_except_suite_finalize(node):
|
||||
if node[1] == 'returns' and self.hide_internal:
|
||||
# Process node[1] only.
|
||||
# The code after "returns", e.g. node[3], is dead code.
|
||||
# Adding it is wrong as it dedents and another
|
||||
# exception handler "except_stmt" afterwards.
|
||||
# Note it is also possible that the grammar is wrong here.
|
||||
# and this should not be "except_stmt".
|
||||
self.indent_more()
|
||||
self.preorder(node[1])
|
||||
self.indent_less()
|
||||
else:
|
||||
self.default(node)
|
||||
self.prune()
|
||||
self.n_except_suite_finalize = n_except_suite_finalize
|
||||
|
||||
def n_formatted_value(node):
|
||||
if node[0] in ('LOAD_STR', 'LOAD_CONST'):
|
||||
value = node[0].attr
|
||||
if isinstance(value, tuple):
|
||||
self.write(node[0].attr)
|
||||
else:
|
||||
self.write(escape_string(node[0].attr))
|
||||
self.prune()
|
||||
else:
|
||||
self.default(node)
|
||||
self.n_formatted_value = n_formatted_value
|
||||
|
||||
def n_formatted_value_attr(node):
|
||||
f_conversion(node)
|
||||
fmt_node = node.data[3]
|
||||
if fmt_node == 'expr' and fmt_node[0] == 'LOAD_STR':
|
||||
node.string = escape_format(fmt_node[0].attr)
|
||||
else:
|
||||
node.string = fmt_node
|
||||
self.default(node)
|
||||
self.n_formatted_value_attr = n_formatted_value_attr
|
||||
|
||||
def f_conversion(node):
|
||||
fmt_node = node.data[1]
|
||||
if fmt_node == 'expr' and fmt_node[0] == 'LOAD_STR':
|
||||
data = fmt_node[0].attr
|
||||
else:
|
||||
data = fmt_node.attr
|
||||
node.conversion = FSTRING_CONVERSION_MAP.get(data, '')
|
||||
return node.conversion
|
||||
|
||||
def n_formatted_value1(node):
|
||||
expr = node[0]
|
||||
assert expr == 'expr'
|
||||
value = self.traverse(expr, indent='')
|
||||
conversion = f_conversion(node)
|
||||
f_str = "f%s" % escape_string("{%s%s}" % (value, conversion))
|
||||
self.write(f_str)
|
||||
self.prune()
|
||||
|
||||
self.n_formatted_value1 = n_formatted_value1
|
||||
|
||||
def n_formatted_value2(node):
|
||||
p = self.prec
|
||||
self.prec = 100
|
||||
|
||||
expr = node[0]
|
||||
assert expr == 'expr'
|
||||
value = self.traverse(expr, indent='')
|
||||
format_value_attr = node[-1]
|
||||
assert format_value_attr == 'FORMAT_VALUE_ATTR'
|
||||
attr = format_value_attr.attr
|
||||
if attr == 4:
|
||||
assert node[1] == 'expr'
|
||||
fmt = strip_quotes(self.traverse(node[1], indent=''))
|
||||
conversion = ":%s" % fmt
|
||||
else:
|
||||
conversion = FSTRING_CONVERSION_MAP.get(attr, '')
|
||||
|
||||
f_str = "f%s" % escape_string("{%s%s}" % (value, conversion))
|
||||
self.write(f_str)
|
||||
|
||||
self.prec = p
|
||||
self.prune()
|
||||
self.n_formatted_value2 = n_formatted_value2
|
||||
|
||||
def n_joined_str(node):
|
||||
p = self.prec
|
||||
self.prec = 100
|
||||
|
||||
result = ''
|
||||
for expr in node[:-1]:
|
||||
assert expr == 'expr'
|
||||
value = self.traverse(expr, indent='')
|
||||
if expr[0].kind.startswith('formatted_value'):
|
||||
# remove leading 'f'
|
||||
assert value.startswith('f')
|
||||
value = value[1:]
|
||||
pass
|
||||
else:
|
||||
# {{ and }} in Python source-code format strings mean
|
||||
# { and } respectively. But only when *not* part of a
|
||||
# formatted value. However in the LOAD_STR
|
||||
# bytecode, the escaping of the braces has been
|
||||
# removed. So we need to put back the braces escaping in
|
||||
# reconstructing the source.
|
||||
assert expr[0] == 'LOAD_STR'
|
||||
value = value.replace("{", "{{").replace("}", "}}")
|
||||
|
||||
# Remove leading quotes
|
||||
result += strip_quotes(value)
|
||||
pass
|
||||
self.write('f%s' % escape_string(result))
|
||||
|
||||
self.prec = p
|
||||
self.prune()
|
||||
self.n_joined_str = n_joined_str
|
||||
|
||||
|
||||
# def kwargs_only_36(node):
|
||||
# keys = node[-1].attr
|
||||
# num_kwargs = len(keys)
|
||||
# values = node[:num_kwargs]
|
||||
# for i, (key, value) in enumerate(zip(keys, values)):
|
||||
# self.write(key + '=')
|
||||
# self.preorder(value)
|
||||
# if i < num_kwargs:
|
||||
# self.write(',')
|
||||
# self.prune()
|
||||
# return
|
||||
# self.n_kwargs_only_36 = kwargs_only_36
|
||||
|
||||
def starred(node):
|
||||
l = len(node)
|
||||
assert l > 0
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user