You've already forked python-uncompyle6
mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2025-08-04 09:22:40 +08:00
Grammar debugging for 3.0, 3.7 and 3.8
This commit is contained in:
@@ -336,6 +336,7 @@ if __name__ == '__main__':
|
|||||||
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP LOAD_CLASSNAME
|
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP LOAD_CLASSNAME
|
||||||
LAMBDA_MARKER RETURN_LAST
|
LAMBDA_MARKER RETURN_LAST
|
||||||
""".split()))
|
""".split()))
|
||||||
|
## FIXME: try this
|
||||||
remain_tokens = set(tokens) - opcode_set
|
remain_tokens = set(tokens) - opcode_set
|
||||||
import re
|
import re
|
||||||
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
|
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
|
||||||
|
@@ -141,31 +141,6 @@ class Python37Parser(Python36Parser):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def customize_grammar_rules(self, tokens, customize):
|
def customize_grammar_rules(self, tokens, customize):
|
||||||
self.remove_rules("""
|
|
||||||
async_forelse_stmt ::= SETUP_LOOP expr
|
|
||||||
GET_AITER
|
|
||||||
LOAD_CONST YIELD_FROM SETUP_EXCEPT GET_ANEXT LOAD_CONST
|
|
||||||
YIELD_FROM
|
|
||||||
store
|
|
||||||
POP_BLOCK JUMP_FORWARD COME_FROM_EXCEPT DUP_TOP
|
|
||||||
LOAD_GLOBAL COMPARE_OP POP_JUMP_IF_FALSE
|
|
||||||
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_BLOCK
|
|
||||||
JUMP_ABSOLUTE END_FINALLY COME_FROM
|
|
||||||
for_block POP_BLOCK
|
|
||||||
else_suite COME_FROM_LOOP
|
|
||||||
stmt ::= async_for_stmt36
|
|
||||||
async_for_stmt36 ::= SETUP_LOOP expr
|
|
||||||
GET_AITER
|
|
||||||
LOAD_CONST YIELD_FROM SETUP_EXCEPT GET_ANEXT LOAD_CONST
|
|
||||||
YIELD_FROM
|
|
||||||
store
|
|
||||||
POP_BLOCK JUMP_BACK COME_FROM_EXCEPT DUP_TOP
|
|
||||||
LOAD_GLOBAL COMPARE_OP POP_JUMP_IF_TRUE
|
|
||||||
END_FINALLY continues COME_FROM
|
|
||||||
POP_TOP POP_TOP POP_TOP POP_EXCEPT
|
|
||||||
POP_TOP POP_BLOCK
|
|
||||||
COME_FROM_LOOP
|
|
||||||
""")
|
|
||||||
super(Python37Parser, self).customize_grammar_rules(tokens, customize)
|
super(Python37Parser, self).customize_grammar_rules(tokens, customize)
|
||||||
|
|
||||||
class Python37ParserSingle(Python37Parser, PythonParserSingle):
|
class Python37ParserSingle(Python37Parser, PythonParserSingle):
|
||||||
@@ -173,22 +148,33 @@ class Python37ParserSingle(Python37Parser, PythonParserSingle):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
# Check grammar
|
# Check grammar
|
||||||
|
# FIXME: DRY this with other parseXX.py routines
|
||||||
p = Python37Parser()
|
p = Python37Parser()
|
||||||
p.check_grammar()
|
p.check_grammar()
|
||||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||||
|
|
||||||
if PYTHON_VERSION == 3.7:
|
if PYTHON_VERSION == 3.7:
|
||||||
lhs, rhs, tokens, right_recursive, dup_rhs = p.check_sets()
|
lhs, rhs, tokens, right_recursive, dup_rhs = p.check_sets()
|
||||||
from uncompyle6.scanner import get_scanner
|
from uncompyle6.scanner import get_scanner
|
||||||
|
|
||||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||||
opcode_set = set(s.opc.opname).union(set(
|
opcode_set = set(s.opc.opname).union(
|
||||||
|
set(
|
||||||
"""JUMP_BACK CONTINUE RETURN_END_IF COME_FROM
|
"""JUMP_BACK CONTINUE RETURN_END_IF COME_FROM
|
||||||
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP LOAD_CLASSNAME
|
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP LOAD_CLASSNAME
|
||||||
LAMBDA_MARKER RETURN_LAST
|
LAMBDA_MARKER RETURN_LAST
|
||||||
""".split()))
|
""".split()
|
||||||
|
)
|
||||||
|
)
|
||||||
remain_tokens = set(tokens) - opcode_set
|
remain_tokens = set(tokens) - opcode_set
|
||||||
import re
|
import re
|
||||||
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
|
|
||||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
remain_tokens = set([re.sub(r"_\d+$", "", t) for t in remain_tokens])
|
||||||
|
remain_tokens = set([re.sub("_CONT$", "", t) for t in remain_tokens])
|
||||||
remain_tokens = set(remain_tokens) - opcode_set
|
remain_tokens = set(remain_tokens) - opcode_set
|
||||||
print(remain_tokens)
|
print(remain_tokens)
|
||||||
# print(sorted(p.rule2name.items()))
|
import sys
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
from spark_parser.spark import rule2str
|
||||||
|
for rule in sorted(p.rule2name.items()):
|
||||||
|
print(rule2str(rule[0]))
|
||||||
|
@@ -159,7 +159,7 @@ class Python38Parser(Python37Parser):
|
|||||||
super(Python38Parser, self).__init__(debug_parser)
|
super(Python38Parser, self).__init__(debug_parser)
|
||||||
self.customized = {}
|
self.customized = {}
|
||||||
|
|
||||||
def customize_grammar_rules(self, tokens, customize):
|
def remove_rules_38(self):
|
||||||
self.remove_rules("""
|
self.remove_rules("""
|
||||||
stmt ::= async_for_stmt37
|
stmt ::= async_for_stmt37
|
||||||
stmt ::= for
|
stmt ::= for
|
||||||
@@ -226,7 +226,10 @@ class Python38Parser(Python37Parser):
|
|||||||
|
|
||||||
|
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
def customize_grammar_rules(self, tokens, customize):
|
||||||
super(Python37Parser, self).customize_grammar_rules(tokens, customize)
|
super(Python37Parser, self).customize_grammar_rules(tokens, customize)
|
||||||
|
self.remove_rules_38()
|
||||||
self.check_reduce['ifstmt'] = 'tokens'
|
self.check_reduce['ifstmt'] = 'tokens'
|
||||||
self.check_reduce['whileTruestmt38'] = 'tokens'
|
self.check_reduce['whileTruestmt38'] = 'tokens'
|
||||||
|
|
||||||
@@ -234,6 +237,7 @@ class Python38Parser(Python37Parser):
|
|||||||
invalid = super(Python38Parser,
|
invalid = super(Python38Parser,
|
||||||
self).reduce_is_invalid(rule, ast,
|
self).reduce_is_invalid(rule, ast,
|
||||||
tokens, first, last)
|
tokens, first, last)
|
||||||
|
self.remove_rules_38()
|
||||||
if invalid:
|
if invalid:
|
||||||
return invalid
|
return invalid
|
||||||
if rule[0] == 'ifstmt':
|
if rule[0] == 'ifstmt':
|
||||||
@@ -264,24 +268,34 @@ class Python38Parser(Python37Parser):
|
|||||||
class Python38ParserSingle(Python38Parser, PythonParserSingle):
|
class Python38ParserSingle(Python38Parser, PythonParserSingle):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
# Check grammar
|
# Check grammar
|
||||||
|
# FIXME: DRY this with other parseXX.py routines
|
||||||
p = Python38Parser()
|
p = Python38Parser()
|
||||||
|
p.remove_rules_38()
|
||||||
p.check_grammar()
|
p.check_grammar()
|
||||||
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
from uncompyle6 import PYTHON_VERSION, IS_PYPY
|
||||||
|
|
||||||
if PYTHON_VERSION == 3.8:
|
if PYTHON_VERSION == 3.8:
|
||||||
lhs, rhs, tokens, right_recursive, dup_rhs = p.check_sets()
|
lhs, rhs, tokens, right_recursive, dup_rhs = p.check_sets()
|
||||||
from uncompyle6.scanner import get_scanner
|
from uncompyle6.scanner import get_scanner
|
||||||
|
|
||||||
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
s = get_scanner(PYTHON_VERSION, IS_PYPY)
|
||||||
opcode_set = set(s.opc.opname).union(set(
|
opcode_set = set(s.opc.opname).union(
|
||||||
|
set(
|
||||||
"""JUMP_BACK CONTINUE RETURN_END_IF COME_FROM
|
"""JUMP_BACK CONTINUE RETURN_END_IF COME_FROM
|
||||||
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP LOAD_CLASSNAME
|
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP LOAD_CLASSNAME
|
||||||
LAMBDA_MARKER RETURN_LAST
|
LAMBDA_MARKER RETURN_LAST
|
||||||
""".split()))
|
""".split()))
|
||||||
remain_tokens = set(tokens) - opcode_set
|
remain_tokens = set(tokens) - opcode_set
|
||||||
import re
|
import re
|
||||||
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
|
|
||||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
remain_tokens = set([re.sub(r"_\d+$", "", t) for t in remain_tokens])
|
||||||
|
remain_tokens = set([re.sub("_CONT$", "", t) for t in remain_tokens])
|
||||||
remain_tokens = set(remain_tokens) - opcode_set
|
remain_tokens = set(remain_tokens) - opcode_set
|
||||||
print(remain_tokens)
|
print(remain_tokens)
|
||||||
# print(sorted(p.rule2name.items()))
|
import sys
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
from spark_parser.spark import rule2str
|
||||||
|
for rule in sorted(p.rule2name.items()):
|
||||||
|
print(rule2str(rule[0]))
|
||||||
|
Reference in New Issue
Block a user