You've already forked python-uncompyle6
mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2025-08-04 01:09:52 +08:00
Compare commits
363 Commits
release-3.
...
release-py
Author | SHA1 | Date | |
---|---|---|---|
|
0b24eca8d7 | ||
|
ab414d3d9c | ||
|
3116ac8323 | ||
|
ede6eabc40 | ||
|
61e2b3b635 | ||
|
23fb07b1c9 | ||
|
1bbb72a6ce | ||
|
17361a9baa | ||
|
68821efdb0 | ||
|
e9ee671874 | ||
|
9593043432 | ||
|
1c95eb7b4e | ||
|
ff9ae4e792 | ||
|
d9eb5c5b09 | ||
|
e7b7de8842 | ||
|
3f26589bf1 | ||
|
30ce3a8bea | ||
|
341e17f62c | ||
|
b561b0090c | ||
|
ca41ea99f2 | ||
|
e3040c78a9 | ||
|
a556e96c22 | ||
|
e9c0d03b8b | ||
|
155fd06372 | ||
|
acff1b6ee0 | ||
|
19bb16270d | ||
|
35c41f8065 | ||
|
1cd2d1e915 | ||
|
e2dec73a62 | ||
|
fad43feb3d | ||
|
96d8daeae9 | ||
|
8f6a1cb10b | ||
|
9d36e7742e | ||
|
fc98bc972e | ||
|
007ba4a8f3 | ||
|
75f3624f31 | ||
|
6b78677a74 | ||
|
ab1dba1536 | ||
|
254d0519bb | ||
|
120412f5a8 | ||
|
b54be24e14 | ||
|
535df1592e | ||
|
64ffa5f6ab | ||
|
9be4908c9c | ||
|
f18ce71e91 | ||
|
362a353e03 | ||
|
7d110f17bc | ||
|
04f4f3c25f | ||
|
1d5f4b0a05 | ||
|
dc3e6b31ca | ||
|
94a81a36b7 | ||
|
e568d68baa | ||
|
8c22d57979 | ||
|
bf0f5715a3 | ||
|
d90c44b454 | ||
|
9d807501af | ||
|
aa4416571b | ||
|
d38395334c | ||
|
2e78c007ee | ||
|
516c7a0e9a | ||
|
681588f12d | ||
|
f5a10ed5d0 | ||
|
3500c49daf | ||
|
3d218c84b0 | ||
|
de75849ae3 | ||
|
1afe1fd943 | ||
|
c5f8bbf32d | ||
|
6b36d14859 | ||
|
30d6dcdd69 | ||
|
ccbe8a8e2b | ||
|
46c02bd352 | ||
|
30ba043000 | ||
|
1f0e5f27d5 | ||
|
75245ba38c | ||
|
4889916304 | ||
|
d1806edaad | ||
|
c968e31be8 | ||
|
c8870c6ed8 | ||
|
23180806b4 | ||
|
c48345a5c0 | ||
|
a1cdc5e40c | ||
|
661bfd4e52 | ||
|
1f835d6237 | ||
|
3d072e29a6 | ||
|
74f01fbe33 | ||
|
710c950965 | ||
|
7aa6ff1d9b | ||
|
421c358f9d | ||
|
cfb4ad625f | ||
|
0b622a0ad8 | ||
|
8b7d5d3270 | ||
|
5c7fdf6e8f | ||
|
d2c8e4e12c | ||
|
626f690a5a | ||
|
6ac48bb0e1 | ||
|
39cef6a41b | ||
|
a18b4b1505 | ||
|
116fbb33e0 | ||
|
631940887f | ||
|
47beff57b2 | ||
|
7fb94176b1 | ||
|
b2c832e19f | ||
|
2ae9cd7d08 | ||
|
1f663013ab | ||
|
e3c7afb94d | ||
|
0d327ab0ce | ||
|
35a60e0274 | ||
|
1b2b45642b | ||
|
28bfb453f5 | ||
|
df55ce3212 | ||
|
fcb4409e50 | ||
|
1462a8beb0 | ||
|
f877e65919 | ||
|
78898ed187 | ||
|
ef03d78c4d | ||
|
48b251273a | ||
|
c91b5e1164 | ||
|
f8fd474b55 | ||
|
bc5f43ab05 | ||
|
1da2118e13 | ||
|
67e8f5d1a7 | ||
|
2a76013ed5 | ||
|
681bbd616b | ||
|
46390a161e | ||
|
28d0ec7a2a | ||
|
8a842c57d3 | ||
|
fb333f1505 | ||
|
ab257dc7ce | ||
|
e3d8751338 | ||
|
a1532bbfea | ||
|
128963d2e9 | ||
|
1cb9fc8b43 | ||
|
b9147b7872 | ||
|
5496271000 | ||
|
16b5df4ba4 | ||
|
fee6114d74 | ||
|
b14655dd43 | ||
|
3de2890050 | ||
|
90ae3e42f6 | ||
|
158a1886fe | ||
|
d2285f0d61 | ||
|
2e44ac25a1 | ||
|
9d425039a2 | ||
|
832f04a486 | ||
|
657d5ef024 | ||
|
e92c2503d1 | ||
|
b74662cf3d | ||
|
ed3b0e81b9 | ||
|
75755c8cfc | ||
|
4ce769399f | ||
|
d0dfdcfcde | ||
|
4e949a798d | ||
|
4fb379afb4 | ||
|
eb7484c671 | ||
|
79470ffff7 | ||
|
44af6c42a2 | ||
|
d7380dc549 | ||
|
b2f6e1cf1a | ||
|
7c9437f0a9 | ||
|
162bb0a85f | ||
|
e44ccd5787 | ||
|
c4612b7484 | ||
|
731c5a2092 | ||
|
7efbd55b69 | ||
|
5dbec5b383 | ||
|
f28ad69c38 | ||
|
49a71819a1 | ||
|
ed7d11525a | ||
|
5b1dcccddc | ||
|
992a08f5ce | ||
|
49ef408699 | ||
|
0487f2fb7a | ||
|
e43c8acd30 | ||
|
97604a93dd | ||
|
d266e9e123 | ||
|
7ac8bf91df | ||
|
772d36015c | ||
|
f381211291 | ||
|
aca4cb233d | ||
|
01ef3b774f | ||
|
9041dead7f | ||
|
4ea308f75a | ||
|
e5f06eb551 | ||
|
c68030e9fa | ||
|
fd95839701 | ||
|
6305023219 | ||
|
c7dda72a84 | ||
|
7caedcb50d | ||
|
1856e09a0c | ||
|
e47568e147 | ||
|
c702ce3802 | ||
|
a37f403410 | ||
|
9248a954bd | ||
|
89a7ad6f81 | ||
|
f432f4f698 | ||
|
5ef2d5cd9f | ||
|
204612ca85 | ||
|
df8c092212 | ||
|
55d2e598db | ||
|
3c67c7b32c | ||
|
5264ffc0e5 | ||
|
27b217a4ed | ||
|
d756548ac3 | ||
|
0171e4d899 | ||
|
a2054fb7dd | ||
|
f07c9c6dcf | ||
|
c677c946ea | ||
|
87063851be | ||
|
516c1a7910 | ||
|
2293f77841 | ||
|
212771244a | ||
|
5fc33aeef5 | ||
|
fff0d1c988 | ||
|
987b5a2290 | ||
|
910d210e52 | ||
|
b719a0ee35 | ||
|
25329d2752 | ||
|
df4d80ff26 | ||
|
13ab06ecb1 | ||
|
72e2d1a2bf | ||
|
c90210c063 | ||
|
21a8726a47 | ||
|
ca7f267103 | ||
|
7b15e54b7d | ||
|
ccd007355c | ||
|
36aba02093 | ||
|
a5dd330218 | ||
|
fc0eb87620 | ||
|
0b9fca2263 | ||
|
0d9464bb92 | ||
|
ff435227e9 | ||
|
fcdc3f67af | ||
|
299936e554 | ||
|
2e192f0467 | ||
|
9062f19a97 | ||
|
f51e40a1de | ||
|
e411024696 | ||
|
01a27e22b4 | ||
|
7553c4aed9 | ||
|
593304bc43 | ||
|
a9ca30fe34 | ||
|
6030730870 | ||
|
b9436e4851 | ||
|
b0a7452d48 | ||
|
5e05e521d9 | ||
|
7a052c349a | ||
|
35aca37557 | ||
|
57fe56d72e | ||
|
218e73540a | ||
|
0965e2cc96 | ||
|
5cf4f0a82f | ||
|
9b0225db60 | ||
|
8c0959de42 | ||
|
ccd71c857f | ||
|
b89dbb0ee7 | ||
|
a5bdc1acd0 | ||
|
a279784d8d | ||
|
3a9f4f2984 | ||
|
51ae8313cf | ||
|
38f04f0073 | ||
|
f3da5d770d | ||
|
24fb13cf23 | ||
|
524e8c8410 | ||
|
52d1e44560 | ||
|
6055c5e165 | ||
|
e0ed187ea6 | ||
|
eafe048c7e | ||
|
c0e553dbb5 | ||
|
7e59987af7 | ||
|
1f012f7c46 | ||
|
d1a3d42ab8 | ||
|
05fd992c48 | ||
|
47f1d888eb | ||
|
ca9c227837 | ||
|
5df384bb71 | ||
|
e80b36347a | ||
|
9e37495493 | ||
|
77b93c5f21 | ||
|
0b198ee881 | ||
|
9e0c65881d | ||
|
c796d6a799 | ||
|
3892fb533a | ||
|
2ea7487ca7 | ||
|
d4f6cec3d0 | ||
|
b1705e283d | ||
|
eee751e22a | ||
|
2b0fefb95f | ||
|
1a627ba207 | ||
|
ea75bcf47e | ||
|
6c6dcab857 | ||
|
0654aed6c8 | ||
|
3447ca0767 | ||
|
1e858efafd | ||
|
ce88a72ea1 | ||
|
7725b8e7de | ||
|
62ddbe320d | ||
|
a694601264 | ||
|
e06f88043f | ||
|
8fc3fd146f | ||
|
ce5066bddb | ||
|
93f18e2449 | ||
|
783e62f3ca | ||
|
c38dc61021 | ||
|
45782bbb39 | ||
|
4c9cd5657e | ||
|
dc627d13b8 | ||
|
ddc3489991 | ||
|
5b24c20331 | ||
|
8bb01143d8 | ||
|
a9635da96a | ||
|
e790cb75fd | ||
|
348afeebbf | ||
|
6888553773 | ||
|
0f489672b9 | ||
|
b7d8cbfaf5 | ||
|
df8d253f78 | ||
|
89b42e3696 | ||
|
22e5a4a283 | ||
|
61810172d1 | ||
|
658c8b4be7 | ||
|
d4dab54c7b | ||
|
5566b9ba6c | ||
|
e56ab2dcd5 | ||
|
d6c45979ba | ||
|
a06e9bf32e | ||
|
7e8f7ba674 | ||
|
09eb7f7f78 | ||
|
f7a910ec66 | ||
|
6d6a73eea7 | ||
|
e4a7641927 | ||
|
b24b46d48c | ||
|
a65d7dce5b | ||
|
718a0a5d34 | ||
|
ea9e3ab3f5 | ||
|
770e988ff8 | ||
|
0fa0641974 | ||
|
c13e23cdae | ||
|
fab4ebb768 | ||
|
89429339fa | ||
|
6ed129bd7a | ||
|
c4fde6b53e | ||
|
a7d93e88b4 | ||
|
9891494142 | ||
|
f8544dfbbe | ||
|
b00651d428 | ||
|
da8dccbaca | ||
|
37272ae827 | ||
|
7f2bee46b7 | ||
|
c8a4dcf72b | ||
|
012ff91cfb | ||
|
e690ddd50a | ||
|
45b7c1948c | ||
|
e2fb7ca3d2 | ||
|
b3bda76582 | ||
|
ab6d322eca | ||
|
1a8a0df107 | ||
|
0a37709b0a | ||
|
98cd1417df | ||
|
460069ceaa | ||
|
316aa44f23 | ||
|
7133540c23 | ||
|
590231741d | ||
|
a9349b8f3d |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,3 +21,4 @@
|
||||
ChangeLog
|
||||
__pycache__
|
||||
build
|
||||
nohup.out
|
||||
|
@@ -3,13 +3,7 @@ language: python
|
||||
sudo: false
|
||||
|
||||
python:
|
||||
- '3.5'
|
||||
- '2.7'
|
||||
- '2.6'
|
||||
- '3.3'
|
||||
- '3.4'
|
||||
- '3.2'
|
||||
- '3.6'
|
||||
- '2.7' # this is a cheat here because travis doesn't do 2.4-2.6
|
||||
|
||||
install:
|
||||
- pip install -e .
|
||||
|
9
Makefile
9
Makefile
@@ -27,19 +27,20 @@ check:
|
||||
check-short: pytest
|
||||
$(MAKE) -C test check-short
|
||||
|
||||
# Note for 2.6 use <=3.0.1 see requirements-dev.txt
|
||||
#: Tests for Python 2.7, 3.3 and 3.4
|
||||
check-2.7 check-3.3 check-3.4: pytest
|
||||
check-2.6 check-2.7 check-3.3 check-3.4 check-3.5: pytest
|
||||
$(MAKE) -C test $@
|
||||
|
||||
#: Tests for Python 3.2 and 3.5 - pytest doesn't work here
|
||||
# Or rather 3.5 doesn't work not on Travis
|
||||
check-3.0 check-3.1 check-3.2 check-3.5 check-3.6:
|
||||
check-3.0 check-3.1 check-3.2 check-3.6:
|
||||
$(MAKE) -C test $@
|
||||
|
||||
check-3.7: pytest
|
||||
|
||||
#:Tests for Python 2.6 (doesn't have pytest)
|
||||
check-2.6:
|
||||
#:Tests for Python 2.4-2.5 (don't have pytest)
|
||||
check-2.4 check-2.5:
|
||||
$(MAKE) -C test $@
|
||||
|
||||
#:PyPy 2.6.1 PyPy 5.0.1, or PyPy 5.8.0-beta0
|
||||
|
21
NEWS
21
NEWS
@@ -1,3 +1,24 @@
|
||||
uncompyle6 3.1.2 2018-04-08 Eastern Orthodox Easter
|
||||
|
||||
- Python 3.x subclass and call parsing fixes
|
||||
- Allow/note running on Python 3.1
|
||||
- improve 3.5+ BUILD_MAP_UNPACK
|
||||
- DRY instruction building code between 2.x and 3.x
|
||||
- expand testing
|
||||
|
||||
uncompyle6 3.1.1 2018-04-01 Easter April Fool's
|
||||
|
||||
Jesus on Friday's New York Times puzzle: "I'm stuck on 2A"
|
||||
|
||||
- fill out 3.5+ BUILD_MAP_UNPACK (more work is needed)
|
||||
- fill out 3.4+ CALL_FUNCTION_... (more work is needed)
|
||||
- fill out 3.5 MAKE_FUNCTION (more work is needed)
|
||||
- reduce 3.5, 3.6 control-flow bugs
|
||||
- reduce ambiguity in rules that lead to long (exponential?) parses
|
||||
- limit/isolate some 2.6/2.7,3.x grammar rules
|
||||
- more runtime testing of decompiled code
|
||||
- more removal of parenthesis around calls via setting precidence
|
||||
|
||||
uncompyle6 3.1.0 2018-03-21 Equinox
|
||||
|
||||
- Add code_deparse_with_offset() fragment function.
|
||||
|
@@ -35,6 +35,8 @@ classifiers = ['Development Status :: 5 - Production/Stable',
|
||||
'Programming Language :: Python :: 2.5',
|
||||
'Programming Language :: Python :: 2.6',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3.1',
|
||||
'Programming Language :: Python :: 3.2',
|
||||
'Programming Language :: Python :: 3.3',
|
||||
'Programming Language :: Python :: 3.4',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
@@ -54,8 +56,7 @@ entry_points = {
|
||||
]}
|
||||
ftp_url = None
|
||||
install_requires = ['spark-parser >= 1.8.5, < 1.9.0',
|
||||
'xdis >= 3.7.0, < 3.8.0', 'six']
|
||||
|
||||
'xdis >= 3.7.0, < 3.8.0']
|
||||
license = 'GPL3'
|
||||
mailing_list = 'python-debugger@googlegroups.com'
|
||||
modname = 'uncompyle6'
|
||||
|
0
admin-tools/check-newer-versions.sh
Executable file → Normal file
0
admin-tools/check-newer-versions.sh
Executable file → Normal file
0
admin-tools/check-older-versions.sh
Executable file → Normal file
0
admin-tools/check-older-versions.sh
Executable file → Normal file
46
admin-tools/how-to-make-a-release.txt
Normal file
46
admin-tools/how-to-make-a-release.txt
Normal file
@@ -0,0 +1,46 @@
|
||||
git pull
|
||||
|
||||
Change version in uncompyle6/version.py
|
||||
source uncompyle6/version.py
|
||||
echo $VERSION
|
||||
git commit -m"Get ready for release $VERSION" .
|
||||
|
||||
Update ChangeLog:
|
||||
make ChangeLog
|
||||
|
||||
Update NEWS from ChangeLog
|
||||
make check
|
||||
|
||||
git commit --amend .
|
||||
|
||||
git push
|
||||
|
||||
Make sure pyenv is running
|
||||
# Pyenv
|
||||
|
||||
source admin-tools/check-newer-versions.sh
|
||||
|
||||
|
||||
# Switch to python-2.4 and build that first...
|
||||
source admin-tools/setup-python-2.4
|
||||
|
||||
rm ChangeLog
|
||||
git merge master
|
||||
|
||||
Update NEWS from master branch
|
||||
|
||||
git commit -m"Get ready for release $VERSION" .
|
||||
|
||||
source admin-tools/check-older-versions.sh
|
||||
source admin-tools/check-newer-versions.sh
|
||||
|
||||
make-dist-older.sh
|
||||
|
||||
git tag release-python-2.4-$VERSION
|
||||
|
||||
./make-dist-newer.sh
|
||||
|
||||
git tag release-$VERSION
|
||||
|
||||
|
||||
twine upload dist/uncompyle6-${VERSION}*
|
@@ -5,4 +5,4 @@ if [[ $0 == ${BASH_SOURCE[0]} ]] ; then
|
||||
echo "This script should be *sourced* rather than run directly through bash"
|
||||
exit 1
|
||||
fi
|
||||
export PYVERSIONS='3.5.5 3.6.4 2.6.9 3.3.7 2.7.14 3.4.8'
|
||||
export PYVERSIONS='3.5.5 3.6.4 2.6.9 3.3.7 2.7.14 3.2.6 3.1.5 3.4.8'
|
||||
|
0
admin-tools/setup-master.sh
Executable file → Normal file
0
admin-tools/setup-master.sh
Executable file → Normal file
0
admin-tools/setup-python-2.4.sh
Executable file → Normal file
0
admin-tools/setup-python-2.4.sh
Executable file → Normal file
3
admin-tools/update-sources.sh
Executable file
3
admin-tools/update-sources.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
cd $(dirname ${BASH_SOURCE[0]})/..
|
||||
git pull
|
@@ -11,4 +11,4 @@ dependencies:
|
||||
test:
|
||||
override:
|
||||
- python ./setup.py develop && make check-2.7
|
||||
- cd ./test/stdlib && pyenv local 2.7.10 && bash ./runtests.sh 'test_[p-z]*.py'
|
||||
- cd ./test/stdlib && pyenv local 2.7.10 && bash ./runtests.sh 'test_[f-i]*.py'
|
||||
|
@@ -30,7 +30,7 @@ def list_comp():
|
||||
[y for y in range(3)]
|
||||
|
||||
def get_parsed_for_fn(fn):
|
||||
code = fn.__code__ if PYTHON3 else fn.func_code
|
||||
code = fn.func_code
|
||||
return deparse(code, version=PYTHON_VERSION)
|
||||
|
||||
def check_expect(expect, parsed, fn_name):
|
||||
|
@@ -10,7 +10,7 @@ else:
|
||||
maxint = sys.maxint
|
||||
from uncompyle6.semantics.helper import print_docstring
|
||||
|
||||
class PrintFake():
|
||||
class PrintFake:
|
||||
def __init__(self):
|
||||
self.pending_newlines = 0
|
||||
self.f = StringIO()
|
||||
|
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||
from uncompyle6.scanner import get_scanner
|
||||
from xdis.bytecode import Bytecode
|
||||
from array import array
|
||||
def bug(state, slotstate):
|
||||
if state:
|
||||
if slotstate is not None:
|
||||
@@ -22,18 +20,10 @@ def bug_loop(disassemble, tb=None):
|
||||
disassemble(tb)
|
||||
|
||||
def test_if_in_for():
|
||||
code = bug.__code__
|
||||
code = bug.func_code
|
||||
scan = get_scanner(PYTHON_VERSION)
|
||||
print(PYTHON_VERSION)
|
||||
if 2.7 <= PYTHON_VERSION <= 3.0 and not IS_PYPY:
|
||||
n = scan.setup_code(code)
|
||||
bytecode = Bytecode(code, scan.opc)
|
||||
scan.build_lines_data(code, n)
|
||||
scan.insts = list(bytecode)
|
||||
scan.offset2inst_index = {}
|
||||
for i, inst in enumerate(scan.insts):
|
||||
scan.offset2inst_index[inst.offset] = i
|
||||
scan.build_prev_op(n)
|
||||
scan.build_instructions(code)
|
||||
fjt = scan.find_jump_targets(False)
|
||||
|
||||
## FIXME: the data below is wrong.
|
||||
@@ -48,14 +38,7 @@ def test_if_in_for():
|
||||
# {'start': 62, 'end': 63, 'type': 'for-else'}]
|
||||
|
||||
code = bug_loop.__code__
|
||||
n = scan.setup_code(code)
|
||||
bytecode = Bytecode(code, scan.opc)
|
||||
scan.build_lines_data(code, n)
|
||||
scan.insts = list(bytecode)
|
||||
scan.build_prev_op(n)
|
||||
scan.offset2inst_index = {}
|
||||
for i, inst in enumerate(scan.insts):
|
||||
scan.offset2inst_index[inst.offset] = i
|
||||
scan.build_instructions(code)
|
||||
fjt = scan.find_jump_targets(False)
|
||||
assert{64: [42], 67: [42, 42], 42: [16, 41], 19: [6]} == fjt
|
||||
assert scan.structs == [
|
||||
@@ -69,14 +52,7 @@ def test_if_in_for():
|
||||
{'start': 48, 'end': 67, 'type': 'while-loop'}]
|
||||
|
||||
elif 3.2 < PYTHON_VERSION <= 3.4:
|
||||
bytecode = Bytecode(code, scan.opc)
|
||||
scan.code = array('B', code.co_code)
|
||||
scan.lines = scan.build_lines_data(code)
|
||||
scan.build_prev_op()
|
||||
scan.insts = list(bytecode)
|
||||
scan.offset2inst_index = {}
|
||||
for i, inst in enumerate(scan.insts):
|
||||
scan.offset2inst_index[inst.offset] = i
|
||||
scan.build_instructions(code)
|
||||
fjt = scan.find_jump_targets(False)
|
||||
assert {69: [66], 63: [18]} == fjt
|
||||
assert scan.structs == \
|
||||
@@ -86,5 +62,6 @@ def test_if_in_for():
|
||||
{'end': 59, 'type': 'for-loop', 'start': 31},
|
||||
{'end': 63, 'type': 'for-else', 'start': 62}]
|
||||
else:
|
||||
assert True, "FIXME: should note fixed"
|
||||
print("FIXME: should fix for %s" % PYTHON_VERSION)
|
||||
assert True
|
||||
return
|
||||
|
@@ -1,150 +0,0 @@
|
||||
# std
|
||||
import os
|
||||
# test
|
||||
import pytest
|
||||
import hypothesis
|
||||
from hypothesis import strategies as st
|
||||
# uncompyle6
|
||||
from uncompyle6 import PYTHON_VERSION, deparse_code
|
||||
|
||||
|
||||
@st.composite
|
||||
def expressions(draw):
|
||||
# todo : would be nice to generate expressions using hypothesis however
|
||||
# this is pretty involved so for now just use a corpus of expressions
|
||||
# from which to select.
|
||||
return draw(st.sampled_from((
|
||||
'abc',
|
||||
'len(items)',
|
||||
'x + 1',
|
||||
'lineno',
|
||||
'container',
|
||||
'self.attribute',
|
||||
'self.method()',
|
||||
# These expressions are failing, I think these are control
|
||||
# flow problems rather than problems with FORMAT_VALUE,
|
||||
# however I need to confirm this...
|
||||
#'sorted(items, key=lambda x: x.name)',
|
||||
#'func(*args, **kwargs)',
|
||||
#'text or default',
|
||||
#'43 if life_the_universe and everything else None'
|
||||
)))
|
||||
|
||||
|
||||
@st.composite
|
||||
def format_specifiers(draw):
|
||||
"""
|
||||
Generate a valid format specifier using the rules:
|
||||
|
||||
format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]
|
||||
fill ::= <any character>
|
||||
align ::= "<" | ">" | "=" | "^"
|
||||
sign ::= "+" | "-" | " "
|
||||
width ::= integer
|
||||
precision ::= integer
|
||||
type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
|
||||
|
||||
See https://docs.python.org/2/library/string.html
|
||||
|
||||
:param draw: Let hypothesis draw from other strategies.
|
||||
|
||||
:return: An example format_specifier.
|
||||
"""
|
||||
alphabet_strategy = st.characters(min_codepoint=ord('a'), max_codepoint=ord('z'))
|
||||
fill = draw(st.one_of(alphabet_strategy, st.none()))
|
||||
align = draw(st.sampled_from(list('<>=^')))
|
||||
fill_align = (fill + align or '') if fill else ''
|
||||
|
||||
type_ = draw(st.sampled_from('bcdeEfFgGnosxX%'))
|
||||
can_have_sign = type_ in 'deEfFgGnoxX%'
|
||||
can_have_comma = type_ in 'deEfFgG%'
|
||||
can_have_precision = type_ in 'fFgG'
|
||||
can_have_pound = type_ in 'boxX%'
|
||||
can_have_zero = type_ in 'oxX'
|
||||
|
||||
sign = draw(st.sampled_from(list('+- ') + [''])) if can_have_sign else ''
|
||||
pound = draw(st.sampled_from(('#', '',))) if can_have_pound else ''
|
||||
zero = draw(st.sampled_from(('0', '',))) if can_have_zero else ''
|
||||
|
||||
int_strategy = st.integers(min_value=1, max_value=1000)
|
||||
|
||||
width = draw(st.one_of(int_strategy, st.none()))
|
||||
width = str(width) if width is not None else ''
|
||||
|
||||
comma = draw(st.sampled_from((',', '',))) if can_have_comma else ''
|
||||
if can_have_precision:
|
||||
precision = draw(st.one_of(int_strategy, st.none()))
|
||||
precision = '.' + str(precision) if precision else ''
|
||||
else:
|
||||
precision = ''
|
||||
|
||||
return ''.join((fill_align, sign, pound, zero, width, comma, precision, type_,))
|
||||
|
||||
|
||||
@st.composite
|
||||
def fstrings(draw):
|
||||
"""
|
||||
Generate a valid f-string.
|
||||
See https://www.python.org/dev/peps/pep-0498/#specification
|
||||
|
||||
:param draw: Let hypothsis draw from other strategies.
|
||||
|
||||
:return: A valid f-string.
|
||||
"""
|
||||
character_strategy = st.characters(
|
||||
blacklist_characters='\r\n\'\\s{}',
|
||||
min_codepoint=1,
|
||||
max_codepoint=1000,
|
||||
)
|
||||
is_raw = draw(st.booleans())
|
||||
integer_strategy = st.integers(min_value=0, max_value=3)
|
||||
expression_count = draw(integer_strategy)
|
||||
content = []
|
||||
for _ in range(expression_count):
|
||||
expression = draw(expressions())
|
||||
conversion = draw(st.sampled_from(('', '!s', '!r', '!a',)))
|
||||
has_specifier = draw(st.booleans())
|
||||
specifier = ':' + draw(format_specifiers()) if has_specifier else ''
|
||||
content.append('{{{}{}}}'.format(expression, conversion, specifier))
|
||||
content.append(draw(st.text(character_strategy)))
|
||||
content = ''.join(content)
|
||||
return "f{}'{}'".format('r' if is_raw else '', content)
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@hypothesis.given(format_specifiers())
|
||||
def test_format_specifiers(format_specifier):
|
||||
"""Verify that format_specifiers generates valid specifiers"""
|
||||
try:
|
||||
exec('"{:' + format_specifier + '}".format(0)')
|
||||
except ValueError as e:
|
||||
if 'Unknown format code' not in str(e):
|
||||
raise
|
||||
|
||||
|
||||
def run_test(text):
|
||||
hypothesis.assume(len(text))
|
||||
hypothesis.assume("f'{" in text)
|
||||
expr = text + '\n'
|
||||
code = compile(expr, '<string>', 'single')
|
||||
deparsed = deparse_code(PYTHON_VERSION, code, compile_mode='single')
|
||||
recompiled = compile(deparsed.text, '<string>', 'single')
|
||||
if recompiled != code:
|
||||
assert 'dis(' + deparsed.text.strip('\n') + ')' == 'dis(' + expr.strip('\n') + ')'
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@hypothesis.given(fstrings())
|
||||
def test_uncompyle_fstring(fstring):
|
||||
"""Verify uncompyling fstring bytecode"""
|
||||
run_test(fstring)
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@pytest.mark.parametrize('fstring', [
|
||||
"f'{abc}{abc!s}'",
|
||||
"f'{abc}0'",
|
||||
])
|
||||
def test_uncompyle_direct(fstring):
|
||||
"""useful for debugging"""
|
||||
run_test(fstring)
|
@@ -1,175 +0,0 @@
|
||||
# std
|
||||
import string
|
||||
# 3rd party
|
||||
from hypothesis import given, assume, example, settings, strategies as st
|
||||
import pytest
|
||||
# uncompyle
|
||||
from validate import validate_uncompyle
|
||||
from test_fstring import expressions
|
||||
|
||||
|
||||
alpha = st.sampled_from(string.ascii_lowercase)
|
||||
numbers = st.sampled_from(string.digits)
|
||||
alphanum = st.sampled_from(string.ascii_lowercase + string.digits)
|
||||
|
||||
|
||||
@st.composite
|
||||
def function_calls(draw,
|
||||
min_keyword_args=0, max_keyword_args=5,
|
||||
min_positional_args=0, max_positional_args=5,
|
||||
min_star_args=0, max_star_args=1,
|
||||
min_double_star_args=0, max_double_star_args=1):
|
||||
"""
|
||||
Strategy factory for generating function calls.
|
||||
|
||||
:param draw: Callable which draws examples from other strategies.
|
||||
|
||||
:return: The function call text.
|
||||
"""
|
||||
st_positional_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_positional_args,
|
||||
max_size=max_positional_args
|
||||
)
|
||||
st_keyword_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_keyword_args,
|
||||
max_size=max_keyword_args
|
||||
)
|
||||
st_star_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_star_args,
|
||||
max_size=max_star_args
|
||||
)
|
||||
st_double_star_args = st.lists(
|
||||
alpha,
|
||||
min_size=min_double_star_args,
|
||||
max_size=max_double_star_args
|
||||
)
|
||||
|
||||
positional_args = draw(st_positional_args)
|
||||
keyword_args = draw(st_keyword_args)
|
||||
st_values = st.lists(
|
||||
expressions(),
|
||||
min_size=len(keyword_args),
|
||||
max_size=len(keyword_args)
|
||||
)
|
||||
keyword_args = [
|
||||
x + '=' + e
|
||||
for x, e in
|
||||
zip(keyword_args, draw(st_values))
|
||||
]
|
||||
star_args = ['*' + x for x in draw(st_star_args)]
|
||||
double_star_args = ['**' + x for x in draw(st_double_star_args)]
|
||||
|
||||
arguments = positional_args + keyword_args + star_args + double_star_args
|
||||
draw(st.randoms()).shuffle(arguments)
|
||||
arguments = ','.join(arguments)
|
||||
|
||||
function_call = 'fn({arguments})'.format(arguments=arguments)
|
||||
try:
|
||||
# TODO: Figure out the exact rules for ordering of positional, keyword,
|
||||
# star args, double star args and in which versions the various
|
||||
# types of arguments are supported so we don't need to check that the
|
||||
# expression compiles like this.
|
||||
compile(function_call, '<string>', 'single')
|
||||
except:
|
||||
assume(False)
|
||||
return function_call
|
||||
|
||||
|
||||
def test_function_no_args():
|
||||
validate_uncompyle("fn()")
|
||||
|
||||
|
||||
def isolated_function_calls(which):
|
||||
"""
|
||||
Returns a strategy for generating function calls, but isolated to
|
||||
particular types of arguments, for example only positional arguments.
|
||||
|
||||
This can help reason about debugging errors in specific types of function
|
||||
calls.
|
||||
|
||||
:param which: One of 'keyword', 'positional', 'star', 'double_star'
|
||||
|
||||
:return: Strategy for generating an function call isolated to specific
|
||||
argument types.
|
||||
"""
|
||||
kwargs = dict(
|
||||
max_keyword_args=0,
|
||||
max_positional_args=0,
|
||||
max_star_args=0,
|
||||
max_double_star_args=0,
|
||||
)
|
||||
kwargs['_'.join(('min', which, 'args'))] = 1
|
||||
kwargs['_'.join(('max', which, 'args'))] = 5 if 'star' not in which else 1
|
||||
return function_calls(**kwargs)
|
||||
|
||||
|
||||
with settings(max_examples=25):
|
||||
|
||||
@given(isolated_function_calls('positional'))
|
||||
@example("fn(0)")
|
||||
def test_function_positional_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@given(isolated_function_calls('keyword'))
|
||||
@example("fn(a=0)")
|
||||
def test_function_call_keyword_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@given(isolated_function_calls('star'))
|
||||
@example("fn(*items)")
|
||||
def test_function_call_star_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
@given(isolated_function_calls('double_star'))
|
||||
@example("fn(**{})")
|
||||
def test_function_call_double_star_only(expr):
|
||||
validate_uncompyle(expr)
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_CONST_KEY_MAP_BUILD_MAP_UNPACK_WITH_CALL_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(w=0,m=0,**v)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_BUILD_MAP_UNPACK_WITH_CALL_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(a=0,**g)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*g,**j)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*z,u=0)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_TUPLE_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(**a)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_MAP_BUILD_TUPLE_BUILD_TUPLE_UNPACK_WITH_CALL_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(b,b,b=0,*a)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_TUPLE_BUILD_TUPLE_UNPACK_WITH_CALL_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(*c,v)")
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
def test_BUILD_CONST_KEY_MAP_CALL_FUNCTION_EX():
|
||||
validate_uncompyle("fn(i=0,y=0,*p)")
|
||||
|
||||
|
||||
@pytest.mark.skip(reason='skipping property based test until all individual tests are passing')
|
||||
@given(function_calls())
|
||||
def test_function_call(function_call):
|
||||
validate_uncompyle(function_call)
|
@@ -18,40 +18,44 @@ def test_grammar():
|
||||
right_recursive, dup_rhs) = p.check_sets()
|
||||
|
||||
# We have custom rules that create the below
|
||||
expect_lhs = set(['expr1024', 'pos_arg', 'get_iter', 'attribute'])
|
||||
expect_lhs = set(['pos_arg', 'get_iter', 'attribute'])
|
||||
|
||||
unused_rhs = set(['list', 'mkfunc',
|
||||
'mklambda',
|
||||
'unpack',])
|
||||
expect_right_recursive = set([('designList',
|
||||
('store', 'DUP_TOP', 'designList'))])
|
||||
if PYTHON3:
|
||||
expect_lhs.add('load_genexpr')
|
||||
|
||||
if PYTHON_VERSION > 2.6:
|
||||
expect_lhs.add('kvlist')
|
||||
expect_lhs.add('kv3')
|
||||
unused_rhs.add('dict')
|
||||
|
||||
if PYTHON3:
|
||||
expect_lhs.add('load_genexpr')
|
||||
|
||||
unused_rhs = unused_rhs.union(set("""
|
||||
except_pop_except generator_exp classdefdeco2
|
||||
dict
|
||||
except_pop_except generator_exp
|
||||
""".split()))
|
||||
if PYTHON_VERSION >= 3.0:
|
||||
expect_lhs.add("annotate_arg")
|
||||
expect_lhs.add("annotate_tuple")
|
||||
unused_rhs.add("mkfunc_annotate")
|
||||
unused_rhs.add('call')
|
||||
unused_rhs.add("dict_comp")
|
||||
unused_rhs.add("classdefdeco1")
|
||||
if PYTHON_VERSION < 3.6:
|
||||
# 3.6 has at least one non-custom call rule
|
||||
# the others don't
|
||||
unused_rhs.add('call')
|
||||
if PYTHON_VERSION == 3.5:
|
||||
expect_right_recursive.add((('l_stmts',
|
||||
('lastl_stmt', 'COME_FROM', 'l_stmts'))))
|
||||
('lastl_stmt', 'come_froms', 'l_stmts'))))
|
||||
pass
|
||||
pass
|
||||
else:
|
||||
expect_right_recursive.add((('l_stmts',
|
||||
('lastl_stmt', 'COME_FROM', 'l_stmts'))))
|
||||
# expect_lhs.add('kwargs1')
|
||||
pass
|
||||
pass
|
||||
pass
|
||||
@@ -85,6 +89,8 @@ def test_grammar():
|
||||
""".split())
|
||||
if 2.6 <= PYTHON_VERSION <= 2.7:
|
||||
opcode_set = set(s.opc.opname).union(ignore_set)
|
||||
if PYTHON_VERSION == 2.6:
|
||||
opcode_set.add("THEN")
|
||||
check_tokens(tokens, opcode_set)
|
||||
elif PYTHON_VERSION == 3.4:
|
||||
ignore_set.add('LOAD_CLASSNAME')
|
||||
|
@@ -8,12 +8,8 @@ from uncompyle6.semantics.consts import (
|
||||
|
||||
if PYTHON3:
|
||||
from io import StringIO
|
||||
def iteritems(d):
|
||||
return d.items()
|
||||
else:
|
||||
from StringIO import StringIO
|
||||
def iteritems(d):
|
||||
return d.iteritems()
|
||||
|
||||
from uncompyle6.semantics.pysource import SourceWalker as SourceWalker
|
||||
|
||||
@@ -30,7 +26,7 @@ def test_template_engine():
|
||||
# FIXME: and so on...
|
||||
|
||||
from uncompyle6.semantics.consts import (
|
||||
TABLE_DIRECT, TABLE_R,
|
||||
TABLE_R, TABLE_DIRECT,
|
||||
)
|
||||
|
||||
from uncompyle6.semantics.fragments import (
|
||||
@@ -44,7 +40,7 @@ def test_tables():
|
||||
(TABLE_DIRECT, 'TABLE_DIRECT', False),
|
||||
(TABLE_R, 'TABLE_R', False),
|
||||
(TABLE_DIRECT_FRAGMENT, 'TABLE_DIRECT_FRAGMENT', True)):
|
||||
for k, entry in iteritems(t):
|
||||
for k, entry in t.iteritems():
|
||||
if k in skip_for_now:
|
||||
continue
|
||||
fmt = entry[0]
|
||||
|
@@ -1,19 +1,19 @@
|
||||
import pytest
|
||||
from uncompyle6 import PYTHON_VERSION, PYTHON3, deparse_code
|
||||
from uncompyle6 import PYTHON_VERSION, deparse_code
|
||||
|
||||
def test_single_mode():
|
||||
single_expressions = (
|
||||
'i = 1',
|
||||
'i and (j or k)',
|
||||
'i += 1',
|
||||
'i = j % 4',
|
||||
'i = {}',
|
||||
'i = []',
|
||||
'for i in range(10):\n i\n',
|
||||
'for i in range(10):\n for j in range(10):\n i + j\n',
|
||||
'try:\n i\nexcept Exception:\n j\nelse:\n k\n'
|
||||
)
|
||||
if PYTHON_VERSION >= 2.6:
|
||||
def test_single_mode():
|
||||
single_expressions = (
|
||||
'i = 1',
|
||||
'i and (j or k)',
|
||||
'i += 1',
|
||||
'i = j % 4',
|
||||
'i = {}',
|
||||
'i = []',
|
||||
'for i in range(10):\n i\n',
|
||||
'for i in range(10):\n for j in range(10):\n i + j\n',
|
||||
'try:\n i\nexcept Exception:\n j\nelse:\n k\n'
|
||||
)
|
||||
|
||||
for expr in single_expressions:
|
||||
code = compile(expr + '\n', '<string>', 'single')
|
||||
assert deparse_code(PYTHON_VERSION, code, compile_mode='single').text == expr + '\n'
|
||||
for expr in single_expressions:
|
||||
code = compile(expr + '\n', '<string>', 'single')
|
||||
assert deparse_code(PYTHON_VERSION, code, compile_mode='single').text == expr + '\n'
|
||||
|
2
pytest/testdata/if-2.7.right
vendored
2
pytest/testdata/if-2.7.right
vendored
@@ -9,4 +9,4 @@
|
||||
12 JUMP_FORWARD 0 'to 15'
|
||||
15_0 COME_FROM 12 '12'
|
||||
15 LOAD_CONST 0 None
|
||||
18 RETURN_VALUE
|
||||
18 RETURN_VALUE
|
||||
|
2
pytest/testdata/ifelse-2.7.right
vendored
2
pytest/testdata/ifelse-2.7.right
vendored
@@ -12,4 +12,4 @@
|
||||
18 STORE_NAME 2 'd'
|
||||
21_0 COME_FROM 12 '12'
|
||||
21 LOAD_CONST 2 None
|
||||
24 RETURN_VALUE
|
||||
24 RETURN_VALUE
|
||||
|
@@ -1,24 +1,25 @@
|
||||
# future
|
||||
from __future__ import print_function
|
||||
# std
|
||||
import os
|
||||
import difflib
|
||||
import subprocess
|
||||
import tempfile
|
||||
import functools
|
||||
# compatability
|
||||
import six
|
||||
|
||||
from StringIO import StringIO
|
||||
# uncompyle6 / xdis
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY, deparse_code
|
||||
# TODO : I think we can get xdis to support the dis api (python 3 version) by doing something like this there
|
||||
from xdis.bytecode import Bytecode
|
||||
from xdis.main import get_opcode
|
||||
opc = get_opcode(PYTHON_VERSION, IS_PYPY)
|
||||
Bytecode = functools.partial(Bytecode, opc=opc)
|
||||
|
||||
try:
|
||||
import functools
|
||||
Bytecode = functools.partial(Bytecode, opc=opc)
|
||||
def _dis_to_text(co):
|
||||
return Bytecode(co).dis()
|
||||
except:
|
||||
pass
|
||||
|
||||
def _dis_to_text(co):
|
||||
return Bytecode(co).dis()
|
||||
|
||||
|
||||
def print_diff(original, uncompyled):
|
||||
@@ -42,8 +43,11 @@ def print_diff(original, uncompyled):
|
||||
print('\nTo display diff highlighting run:\n pip install BeautifulSoup4')
|
||||
diff = difflib.HtmlDiff().make_table(*args)
|
||||
|
||||
with tempfile.NamedTemporaryFile(delete=False) as f:
|
||||
f = tempfile.NamedTemporaryFile(delete=False)
|
||||
try:
|
||||
f.write(str(diff).encode('utf-8'))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
try:
|
||||
print()
|
||||
@@ -60,8 +64,7 @@ def print_diff(original, uncompyled):
|
||||
print('\nFor side by side diff install elinks')
|
||||
diff = difflib.Differ().compare(original_lines, uncompyled_lines)
|
||||
print('\n'.join(diff))
|
||||
finally:
|
||||
os.unlink(f.name)
|
||||
os.unlink(f.name)
|
||||
|
||||
|
||||
def are_instructions_equal(i1, i2):
|
||||
@@ -123,8 +126,9 @@ def validate_uncompyle(text, mode='exec'):
|
||||
original_text = text
|
||||
|
||||
deparsed = deparse_code(PYTHON_VERSION, original_code,
|
||||
|
||||
compile_mode=mode,
|
||||
out=six.StringIO(),
|
||||
out=StringIO(),
|
||||
is_pypy=IS_PYPY)
|
||||
uncompyled_text = deparsed.text
|
||||
uncompyled_code = compile(uncompyled_text, '<string>', 'exec')
|
||||
|
@@ -1,3 +1,3 @@
|
||||
pytest>=3.0.0
|
||||
pytest>=3.0.0,<=3.0.1
|
||||
flake8
|
||||
hypothesis<=3.8.3
|
||||
hypothesis<=3.0.0
|
||||
|
16
setup.py
16
setup.py
@@ -1,16 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
|
||||
"""Setup script for the 'uncompyle6' distribution."""
|
||||
|
||||
import sys
|
||||
|
||||
SYS_VERSION = sys.version_info[0:2]
|
||||
if not ((2, 6) <= SYS_VERSION <= (3, 7)) or ((3, 0) <= SYS_VERSION <= (3, 1)):
|
||||
mess = "Python Release 2.6 .. 3.7 excluding 3.0 and 3.1 are supported in this code branch."
|
||||
if ((2, 4) <= SYS_VERSION <= (2, 7)):
|
||||
mess += ("\nFor your Python, version %s, use the python-2.4 code/branch." %
|
||||
if not ((2, 4) <= SYS_VERSION <= (2, 7)):
|
||||
mess = "Python Release 2.4 .. 2.7 are supported in this code branch."
|
||||
if ((3, 2) <= SYS_VERSION <= (3, 7)):
|
||||
mess += ("\nFor your Python, version %s, use the master code/branch." %
|
||||
sys.version[0:3])
|
||||
elif SYS_VERSION < (2, 4) or ((3, 0) <= SYS_VERSION <= (3, 1)):
|
||||
mess += ("\nThis package is not supported for Python version %s."
|
||||
else:
|
||||
mess += ("\nThis package is not supported before Python 2.4. Your Python version is %s."
|
||||
% sys.version[0:3])
|
||||
print(mess)
|
||||
raise Exception(mess)
|
||||
|
55
test-unit/test_grammar.py
Normal file
55
test-unit/test_grammar.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import re
|
||||
import unittest
|
||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY # , PYTHON_VERSION
|
||||
from uncompyle6.parser import get_python_parser, python_parser
|
||||
|
||||
class TestGrammar(unittest.TestCase):
|
||||
def test_grammar(self):
|
||||
|
||||
def check_tokens(tokens, opcode_set):
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
remain_tokens = set([re.sub('_\d+$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
self.assertEqual(remain_tokens, set([]),
|
||||
"Remaining tokens %s\n====\n%s" % (remain_tokens, p.dump_grammar()))
|
||||
|
||||
p = get_python_parser(PYTHON_VERSION, is_pypy=IS_PYPY)
|
||||
(lhs, rhs, tokens,
|
||||
right_recursive, dup_rhs) = p.check_sets()
|
||||
expect_lhs = set(['expr1024', 'pos_arg'])
|
||||
unused_rhs = set(['list', 'call', 'mkfunc',
|
||||
'mklambda',
|
||||
'unpack',])
|
||||
|
||||
expect_right_recursive = frozenset([('designList',
|
||||
('store', 'DUP_TOP', 'designList'))])
|
||||
expect_lhs.add('kwarg')
|
||||
|
||||
self.assertEqual(expect_lhs, set(lhs))
|
||||
self.assertEqual(unused_rhs, set(rhs))
|
||||
self.assertEqual(expect_right_recursive, right_recursive)
|
||||
|
||||
expect_dup_rhs = frozenset([('COME_FROM',), ('CONTINUE',), ('JUMP_ABSOLUTE',),
|
||||
('LOAD_CONST',),
|
||||
('JUMP_BACK',), ('JUMP_FORWARD',)])
|
||||
|
||||
reduced_dup_rhs = {}
|
||||
for k in dup_rhs:
|
||||
if k not in expect_dup_rhs:
|
||||
reduced_dup_rhs[k] = dup_rhs[k]
|
||||
pass
|
||||
pass
|
||||
for k in reduced_dup_rhs:
|
||||
print(k, reduced_dup_rhs[k])
|
||||
# assert not reduced_dup_rhs, reduced_dup_rhs
|
||||
|
||||
def test_dup_rule(self):
|
||||
import inspect
|
||||
python_parser(PYTHON_VERSION, inspect.currentframe().f_code,
|
||||
is_pypy=IS_PYPY,
|
||||
parser_debug={
|
||||
'dups': True, 'transition': False, 'reduce': False,
|
||||
'rules': False, 'errorstack': None, 'context': True})
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
1
test/.gitignore
vendored
Normal file
1
test/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/nohup.out
|
@@ -3,9 +3,9 @@ PHONY=check clean dist distclean test test-unit test-functional rmChangeLog clea
|
||||
check-bytecode-2.2 check-byteocde-2.3 check-bytecode-2.4 \
|
||||
check-short check-2.6 check-2.7 check-3.0 check-3.1 check-3.2 check-3.3 \
|
||||
check-3.4 check-3.5 check-5.6 5.6 5.8 \
|
||||
grammar-coverage-2.5 grammar-coverage-2.6 grammarcoverage-2.7 \
|
||||
grammar-coverage-3.1 grammar-coverage-3.2 grammarcoverage-3.3 \
|
||||
grammar-coverage-3.4 grammar-coverage-3.5 grammarcoverage-3.6
|
||||
grammar-coverage-2.5 grammar-coverage-2.6 grammar-coverage-2.7 \
|
||||
grammar-coverage-3.1 grammar-coverage-3.2 grammar-coverage-3.3 \
|
||||
grammar-coverage-3.4 grammar-coverage-3.5 grammar-coverage-3.6
|
||||
|
||||
|
||||
GIT2CL ?= git2cl
|
||||
@@ -28,7 +28,7 @@ check:
|
||||
$(MAKE) check-$(PYTHON_VERSION)
|
||||
|
||||
#: Run working tests from Python 2.6 or 2.7
|
||||
check-2.6 check-2.7: check-bytecode-2 check-bytecode-3 check-bytecode-1 check-native-short
|
||||
check-2.4 check-2.5 check-2.6 check-2.7: check-bytecode-2 check-bytecode-3 check-bytecode-1 check-native-short
|
||||
|
||||
#: Run working tests from Python 3.0
|
||||
check-3.0: check-bytecode
|
||||
@@ -37,18 +37,22 @@ check-3.0: check-bytecode
|
||||
#: Run working tests from Python 3.1
|
||||
check-3.1: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1-run --verify-run
|
||||
|
||||
#: Run working tests from Python 3.2
|
||||
check-3.2: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2-run --verify-run
|
||||
|
||||
#: Run working tests from Python 3.3
|
||||
check-3.3: check-bytecode
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.3-run --verify-run
|
||||
|
||||
#: Run working tests from Python 3.4
|
||||
check-3.4: check-bytecode check-3.4-ok check-2.7-ok
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4 --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.4-run --verify-run
|
||||
|
||||
#: Run working tests from Python 3.5
|
||||
check-3.5: check-bytecode
|
||||
@@ -114,23 +118,29 @@ check-bytecode-2.4:
|
||||
check-bytecode-2.5:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-2.5
|
||||
|
||||
#: Get grammar coverage for Python 2.4
|
||||
grammar-coverage-2.4:
|
||||
-rm $(COVER_DIR)/spark-grammar-24.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.4.cover $(PYTHON) test_pythonlib.py --bytecode-2.4
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.4.cover $(PYTHON) test_pyenvlib.py --2.4.6 --max= 800
|
||||
|
||||
#: Get grammar coverage for Python 2.5
|
||||
grammar-coverage-2.5:
|
||||
-rm $(COVER_DIR)/spark-grammar-25.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-25.cover $(PYTHON) test_pythonlib.py --bytecode-2.5
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-25.cover $(PYTHON) test_pyenvlib.py --2.5.6
|
||||
-rm $(COVER_DIR)/spark-grammar-2.5.cover || true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.5.cover $(PYTHON) test_pythonlib.py --bytecode-2.5
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.5.cover $(PYTHON) test_pyenvlib.py --2.5.6 --max=800
|
||||
|
||||
#: Get grammar coverage for Python 2.6
|
||||
grammar-coverage-2.6:
|
||||
-rm $(COVER_DIR)/spark-grammar-26.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-26.cover $(PYTHON) test_pythonlib.py --bytecode-2.6
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-26.cover $(PYTHON) test_pyenvlib.py --2.6.9
|
||||
-rm $(COVER_DIR)/spark-grammar-2.6.cover || true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.6.cover $(PYTHON) test_pythonlib.py --bytecode-2.6
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.6.cover $(PYTHON) test_pyenvlib.py --2.6.9 --max=800
|
||||
|
||||
#: Get grammar coverage for Python 2.7
|
||||
grammar-coverage-2.7:
|
||||
-rm $(COVER_DIR)/spark-grammar-27.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-27.cover $(PYTHON) test_pythonlib.py --bytecode-2.7
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-27.cover $(PYTHON) test_pyenvlib.py --2.7.13
|
||||
-rm $(COVER_DIR)/spark-grammar-2.7.cover || true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.7.cover $(PYTHON) test_pythonlib.py --bytecode-2.7
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-2.7.cover $(PYTHON) test_pyenvlib.py --2.7.14 --max=600
|
||||
|
||||
#: Get grammar coverage for Python 3.0
|
||||
grammar-coverage-3.0:
|
||||
@@ -141,33 +151,39 @@ SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-30.cover $(PYTHON) test_pythonl
|
||||
|
||||
#: Get grammar coverage for Python 3.1
|
||||
grammar-coverage-3.1:
|
||||
-rm $(COVER_DIR)/spark-grammar-31.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-31.cover $(PYTHON) test_pythonlib.py --bytecode-3.1
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-31.cover $(PYTHON) test_pyenvlib.py --3.1.5
|
||||
-rm $(COVER_DIR)/spark-grammar-3.1.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.1.cover $(PYTHON) test_pythonlib.py --bytecode-3.1
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.1.cover $(PYTHON) test_pyenvlib.py --3.1.5
|
||||
|
||||
#: Get grammar coverage for Python 3.2
|
||||
grammar-coverage-3.2:
|
||||
-rm $(COVER_DIR)/spark-grammar-32.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-32.cover $(PYTHON) test_pythonlib.py --bytecode-3.2
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-32.cover $(PYTHON) test_pyenvlib.py --3.2.6
|
||||
-rm $(COVER_DIR)/spark-grammar-3.2.cover || true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.2.cover $(PYTHON) test_pythonlib.py --bytecode-3.2
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.2.cover $(PYTHON) test_pyenvlib.py --3.2.6
|
||||
|
||||
#: Get grammar coverage for Python 3.3
|
||||
grammar-coverage-3.3:
|
||||
-rm $(COVER_DIR)/spark-grammar-33.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-33.cover $(PYTHON) test_pythonlib.py --bytecode-3.3
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-33.cover $(PYTHON) test_pyenvlib.py --3.3.6
|
||||
-rm $(COVER_DIR)/spark-grammar-3.3.cover || true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.3.cover $(PYTHON) test_pythonlib.py --bytecode-3.3
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.3.cover $(PYTHON) test_pyenvlib.py --3.3.7 --max=800
|
||||
|
||||
#: Get grammar coverage for Python 3.4
|
||||
grammar-coverage-3.4:
|
||||
-rm $(COVER_DIR)/spark-grammar-34.cover
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-34.cover $(PYTHON) test_pythonlib.py --bytecode-3.4
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-34.cover $(PYTHON) test_pyenvlib.py --3.4.2
|
||||
-rm $(COVER_DIR)/spark-grammar-3.4.cover || true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.4.cover $(PYTHON) test_pythonlib.py --bytecode-3.4
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.4.cover $(PYTHON) test_pyenvlib.py --3.4.8 --max=800
|
||||
|
||||
#: Get grammar coverage for Python 3.5
|
||||
grammar-coverage-3.5:
|
||||
rm $(COVER_DIR)/spark-grammar-35.cover || /bin/true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-35.cover $(PYTHON) test_pythonlib.py --bytecode-3.5
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-35.cover $(PYTHON) test_pyenvlib.py --3.5.3
|
||||
rm $(COVER_DIR)/spark-grammar-3.5.cover || /bin/true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.5.cover $(PYTHON) test_pythonlib.py --bytecode-3.5
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.5.cover $(PYTHON) test_pyenvlib.py --3.5.5 --max=450
|
||||
|
||||
#: Get grammar coverage for Python 3.6
|
||||
grammar-coverage-3.6:
|
||||
rm $(COVER_DIR)/spark-grammar-3.6.cover || /bin/true
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.6.cover $(PYTHON) test_pythonlib.py --bytecode-3.6
|
||||
SPARK_PARSER_COVERAGE=$(COVER_DIR)/spark-grammar-3.6.cover $(PYTHON) test_pyenvlib.py --3.6.4 --max=280
|
||||
|
||||
#: Check deparsing Python 2.6
|
||||
check-bytecode-2.6:
|
||||
@@ -186,10 +202,12 @@ check-bytecode-3.0:
|
||||
#: Check deparsing Python 3.1
|
||||
check-bytecode-3.1:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.1-run --verify-run
|
||||
|
||||
#: Check deparsing Python 3.2
|
||||
check-bytecode-3.2:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2 --weak-verify
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.2-run --verify-run
|
||||
|
||||
#: Check deparsing Python 3.3
|
||||
check-bytecode-3.3:
|
||||
@@ -216,6 +234,10 @@ check-native-short:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION) --weak-verify $(COMPILE)
|
||||
$(PYTHON) test_pythonlib.py --bytecode-$(PYTHON_VERSION)-run --verify-run $(COMPILE)
|
||||
|
||||
#: Run longer Python 2.6's lib files known to be okay
|
||||
check-2.4-ok:
|
||||
$(PYTHON) test_pythonlib.py --ok-2.4 --verify $(COMPILE)
|
||||
|
||||
#: Run longer Python 2.6's lib files known to be okay
|
||||
check-2.6-ok:
|
||||
$(PYTHON) test_pythonlib.py --ok-2.6 --weak-verify $(COMPILE)
|
||||
|
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.2/10_del.pyc
Normal file
BIN
test/bytecode_2.2/10_del.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/01_ops.pyc
Normal file
BIN
test/bytecode_2.4/01_ops.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/01_triple_compare.pyc
Normal file
BIN
test/bytecode_2.4/01_triple_compare.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/02_except_as.pyc
Normal file
BIN
test/bytecode_2.4/02_except_as.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/02_unary_convert.pyc
Normal file
BIN
test/bytecode_2.4/02_unary_convert.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4/02_yield_bug.pyc
Normal file
BIN
test/bytecode_2.4/02_yield_bug.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.4_run/04_try_else_confuse.pyc
Normal file
BIN
test/bytecode_2.4_run/04_try_else_confuse.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_2.4_run/04_try_except_else.pyc
Normal file
BIN
test/bytecode_2.4_run/04_try_except_else.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.5/06_setif_comprehension.pyc
Normal file
BIN
test/bytecode_2.5/06_setif_comprehension.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_2.7_run/04_assert_continue.pyc
Normal file
BIN
test/bytecode_2.7_run/04_assert_continue.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.1/04_call_function.pyc
Normal file
BIN
test/bytecode_3.1/04_call_function.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.1_run/05_abc_test.pyc
Normal file
BIN
test/bytecode_3.1_run/05_abc_test.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.2_run/04_call_function.pyc
Normal file
BIN
test/bytecode_3.2_run/04_call_function.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.2_run/05_abc_test.pyc
Normal file
BIN
test/bytecode_3.2_run/05_abc_test.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.2_run/15_assert.pyc
Normal file
BIN
test/bytecode_3.2_run/15_assert.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.3_run/15_assert.pyc
Normal file
BIN
test/bytecode_3.3_run/15_assert.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.4_run/04_call_function.pyc
Normal file
BIN
test/bytecode_3.4_run/04_call_function.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.4_run/05_abc_test.pyc
Normal file
BIN
test/bytecode_3.4_run/05_abc_test.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.5/01_while_if_then.pyc
Normal file
BIN
test/bytecode_3.5/01_while_if_then.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.5_run/01_loop_if_continue.pyc
Normal file
BIN
test/bytecode_3.5_run/01_loop_if_continue.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.5_run/01_map_unpack.pyc
Normal file
BIN
test/bytecode_3.5_run/01_map_unpack.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.5_run/04_call_function.pyc
Normal file
BIN
test/bytecode_3.5_run/04_call_function.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.5_run/07_build_map_unpack.pyc
Normal file
BIN
test/bytecode_3.5_run/07_build_map_unpack.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6/01_while_if_then.pyc
Normal file
BIN
test/bytecode_3.6/01_while_if_then.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6/03_if_elif.pyc
Normal file
BIN
test/bytecode_3.6/03_if_elif.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
test/bytecode_3.6/05-for-ifelse.pyc
Normal file
BIN
test/bytecode_3.6/05-for-ifelse.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6/05_set_comprehension.pyc
Normal file
BIN
test/bytecode_3.6/05_set_comprehension.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6/06_try_return.pyc
Normal file
BIN
test/bytecode_3.6/06_try_return.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6_run/04_call_function.pyc
Normal file
BIN
test/bytecode_3.6_run/04_call_function.pyc
Normal file
Binary file not shown.
BIN
test/bytecode_3.6_run/10_argparse.pyc
Normal file
BIN
test/bytecode_3.6_run/10_argparse.pyc
Normal file
Binary file not shown.
@@ -1,9 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
# Mode: -*- python -*-
|
||||
#
|
||||
# Copyright (c) 2015 by Rocky Bernstein <rb@dustyfeet.com>
|
||||
# Copyright (c) 2015, 2017 by Rocky Bernstein <rb@dustyfeet.com>
|
||||
#
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import dis, os.path
|
||||
|
1
test/grammar-cover/.gitignore
vendored
Normal file
1
test/grammar-cover/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/.python-version
|
1
test/grammar-cover/README.md
Normal file
1
test/grammar-cover/README.md
Normal file
@@ -0,0 +1 @@
|
||||
Code in this directory gets statistics on grammar coverage
|
5
test/grammar-cover/convert.sh
Executable file
5
test/grammar-cover/convert.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
for VERS in 2{4,5,6,7} 3{2,3,4,5} ; do
|
||||
GRAMMAR_TXT=grammar-${VERS}.txt
|
||||
spark-parser-coverage --max-count 3000 --path spark-grammar-${VERS}.cover > $GRAMMAR_TXT
|
||||
done
|
2
test/grammar-cover/grammar-all.sh
Executable file
2
test/grammar-cover/grammar-all.sh
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
$SHELL ./grammar.sh 2.4 2.5 2.6 2.7 3.2 3.3 3.4 3.5 3.6
|
44
test/grammar-cover/grammar.sh
Executable file
44
test/grammar-cover/grammar.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# Remake Python grammar statistics
|
||||
|
||||
typeset -A ALL_VERS=([2.4]=2.4.6 [2.5]=2.5.6 [2.6]=2.6.9 [2.7]=2.7.14 [3.2]=3.2.6 [3.3]=3.3.6 [3.4]=3.4.8 [3.5]=3.5.5 [3.6]=3.6.4)
|
||||
|
||||
if (( $# == 0 )); then
|
||||
echo 1>&2 "usage: $0 two-digit-version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
me=${BASH_SOURCE[0]}
|
||||
workdir=$(dirname $me)
|
||||
cd $workdir
|
||||
workdir=$(pwd)
|
||||
while [[ -n $1 ]] ; do
|
||||
SHORT_VERSION=$1; shift
|
||||
LONG_VERSION=${ALL_VERS[$SHORT_VERSION]}
|
||||
if [[ -z ${LONG_VERSION} ]] ; then
|
||||
echo 1>&2 "Version $SHORT_VERSION not known"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
tmpdir=$workdir/../../tmp/grammar-cover
|
||||
COVER_FILE=${tmpdir}/spark-grammar-${SHORT_VERSION}.cover
|
||||
[[ -d $tmpdir ]] || mkdir $tmpdir
|
||||
cd $workdir/../..
|
||||
if [[ $SHORT_VERSION > 2.5 ]] ; then
|
||||
source ./admin-tools/setup-master.sh
|
||||
else
|
||||
source ./admin-tools/setup-python-2.4.sh
|
||||
fi
|
||||
GRAMMAR_TXT=$tmpdir/grammar-${SHORT_VERSION}.txt
|
||||
(cd ../.. && pyenv local ${LONG_VERSION})
|
||||
cd ./test
|
||||
if [[ -r $COVER_FILE ]]; then
|
||||
rm $COVER_FILE
|
||||
fi
|
||||
if [[ -r $GRAMMAR_TXT ]]; then
|
||||
GRAMMAR_SAVE_TXT=${tmpdir}/grammar-${SHORT_VERSION}-save.txt
|
||||
cp $GRAMMAR_TXT $GRAMMAR_SAVE_TXT
|
||||
fi
|
||||
make grammar-coverage-${SHORT_VERSION};
|
||||
spark-parser-coverage --max-count=3000 --path $COVER_FILE > $GRAMMAR_TXT
|
||||
done
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user