You've already forked python-uncompyle6
mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2025-08-03 00:45:53 +08:00
Make uncompyle6 run on Python3.4 and Python 2.7
We don't need our own disassembler. Python's will do fine
This commit is contained in:
@@ -3,6 +3,8 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
|
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
|
||||||
#
|
#
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
'''
|
'''
|
||||||
Usage: uncompyle6 [OPTIONS]... [ FILE | DIR]...
|
Usage: uncompyle6 [OPTIONS]... [ FILE | DIR]...
|
||||||
|
|
||||||
@@ -48,8 +50,8 @@ import os.path
|
|||||||
from uncompyle6 import main, verify
|
from uncompyle6 import main, verify
|
||||||
import time
|
import time
|
||||||
|
|
||||||
if sys.version[:3] != '2.7':
|
if sys.version[:3] != '2.7' and sys.version[:3] != '3.4':
|
||||||
print >>sys.stderr, 'Error: uncompyle6 requires Python 2.7.'
|
print('Error: uncompyle6 requires Python 2.7 or 3.4.', file=sys.stderr)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
showasm = showast = do_verify = numproc = recurse_dirs = 0
|
showasm = showast = do_verify = numproc = recurse_dirs = 0
|
||||||
@@ -62,13 +64,13 @@ timestampfmt = "# %Y.%m.%d %H:%M:%S %Z"
|
|||||||
try:
|
try:
|
||||||
opts, files = getopt.getopt(sys.argv[1:], 'hatdro:c:p:',
|
opts, files = getopt.getopt(sys.argv[1:], 'hatdro:c:p:',
|
||||||
['help', 'verify', 'showast', 'showasm'])
|
['help', 'verify', 'showast', 'showasm'])
|
||||||
except getopt.GetoptError, e:
|
except getopt.GetoptError as e:
|
||||||
print >>sys.stderr, '%s: %s' % (os.path.basename(sys.argv[0]), e)
|
print('%s: %s' % (os.path.basename(sys.argv[0]), e), file=sys.stderr)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
for opt, val in opts:
|
for opt, val in opts:
|
||||||
if opt in ('-h', '--help'):
|
if opt in ('-h', '--help'):
|
||||||
print __doc__
|
print(__doc__)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
elif opt == '--verify':
|
elif opt == '--verify':
|
||||||
do_verify = 1
|
do_verify = 1
|
||||||
@@ -89,8 +91,8 @@ for opt, val in opts:
|
|||||||
elif opt == '-r':
|
elif opt == '-r':
|
||||||
recurse_dirs = 1
|
recurse_dirs = 1
|
||||||
else:
|
else:
|
||||||
print opt
|
print(opt)
|
||||||
print Usage_short
|
print(Usage_short)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# expand directory if specified
|
# expand directory if specified
|
||||||
@@ -112,7 +114,7 @@ if src_base[-1:] != os.sep:
|
|||||||
src_base = os.path.dirname(src_base)
|
src_base = os.path.dirname(src_base)
|
||||||
if src_base:
|
if src_base:
|
||||||
sb_len = len( os.path.join(src_base, '') )
|
sb_len = len( os.path.join(src_base, '') )
|
||||||
files = map(lambda f: f[sb_len:], files)
|
files = [f[sb_len:] for f in files]
|
||||||
del sb_len
|
del sb_len
|
||||||
|
|
||||||
if outfile == '-':
|
if outfile == '-':
|
||||||
@@ -123,18 +125,23 @@ elif outfile and len(files) > 1:
|
|||||||
out_base = outfile; outfile = None
|
out_base = outfile; outfile = None
|
||||||
|
|
||||||
if timestamp:
|
if timestamp:
|
||||||
print time.strftime(timestampfmt)
|
print(time.strftime(timestampfmt))
|
||||||
if numproc <= 1:
|
if numproc <= 1:
|
||||||
try:
|
try:
|
||||||
result = main(src_base, out_base, files, codes, outfile, showasm, showast, do_verify)
|
result = main(src_base, out_base, files, codes, outfile, showasm, showast, do_verify)
|
||||||
print '# decompiled %i files: %i okay, %i failed, %i verify failed' % result
|
print('# decompiled %i files: %i okay, %i failed, %i verify failed' % result)
|
||||||
except (KeyboardInterrupt):
|
except (KeyboardInterrupt):
|
||||||
pass
|
pass
|
||||||
except verify.VerifyCmpError:
|
except verify.VerifyCmpError:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
from multiprocessing import Process, Queue
|
from multiprocessing import Process, Queue
|
||||||
from Queue import Empty
|
|
||||||
|
try:
|
||||||
|
from Queue import Empty
|
||||||
|
except ImportError:
|
||||||
|
from Queue import Empty
|
||||||
|
|
||||||
fqueue = Queue(len(files)+numproc)
|
fqueue = Queue(len(files)+numproc)
|
||||||
for f in files:
|
for f in files:
|
||||||
fqueue.put(f)
|
fqueue.put(f)
|
||||||
@@ -145,10 +152,10 @@ else:
|
|||||||
|
|
||||||
def process_func():
|
def process_func():
|
||||||
try:
|
try:
|
||||||
(tot_files, okay_files, failed_files, verify_failed_files) = (0,0,0,0)
|
(tot_files, okay_files, failed_files, verify_failed_files) = (0, 0, 0, 0)
|
||||||
while 1:
|
while 1:
|
||||||
f = fqueue.get()
|
f = fqueue.get()
|
||||||
if f == None:
|
if f is None:
|
||||||
break
|
break
|
||||||
(t, o, f, v) = \
|
(t, o, f, v) = \
|
||||||
main(src_base, out_base, [f], codes, outfile, showasm, showast, do_verify)
|
main(src_base, out_base, [f], codes, outfile, showasm, showast, do_verify)
|
||||||
@@ -168,8 +175,8 @@ else:
|
|||||||
for p in procs:
|
for p in procs:
|
||||||
p.join()
|
p.join()
|
||||||
try:
|
try:
|
||||||
(tot_files, okay_files, failed_files, verify_failed_files) = (0,0,0,0)
|
(tot_files, okay_files, failed_files, verify_failed_files) = (0, 0, 0, 0)
|
||||||
while 1:
|
while True:
|
||||||
(t, o, f, v) = rqueue.get(False)
|
(t, o, f, v) = rqueue.get(False)
|
||||||
tot_files += t
|
tot_files += t
|
||||||
okay_files += o
|
okay_files += o
|
||||||
@@ -177,11 +184,11 @@ else:
|
|||||||
verify_failed_files += v
|
verify_failed_files += v
|
||||||
except Empty:
|
except Empty:
|
||||||
pass
|
pass
|
||||||
print '# decompiled %i files: %i okay, %i failed, %i verify failed' % \
|
print('# decompiled %i files: %i okay, %i failed, %i verify failed' %
|
||||||
(tot_files, okay_files, failed_files, verify_failed_files)
|
(tot_files, okay_files, failed_files, verify_failed_files))
|
||||||
except (KeyboardInterrupt, OSError):
|
except (KeyboardInterrupt, OSError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
if timestamp:
|
if timestamp:
|
||||||
print time.strftime(timestampfmt)
|
print(time.strftime(timestampfmt))
|
||||||
|
@@ -1,704 +0,0 @@
|
|||||||
from __future__ import print_function
|
|
||||||
'''
|
|
||||||
Copyright (c) 1998-2002 John Aycock
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
|
||||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
||||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
||||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
'''
|
|
||||||
|
|
||||||
__version__ = 'SPARK-0.7 (pre-alpha-7) uncompyle trim'
|
|
||||||
|
|
||||||
def _namelist(instance):
|
|
||||||
namelist, namedict, classlist = [], {}, [instance.__class__]
|
|
||||||
for c in classlist:
|
|
||||||
for b in c.__bases__:
|
|
||||||
classlist.append(b)
|
|
||||||
for name in list(c.__dict__.keys()):
|
|
||||||
if name not in namedict:
|
|
||||||
namelist.append(name)
|
|
||||||
namedict[name] = 1
|
|
||||||
return namelist
|
|
||||||
|
|
||||||
class _State:
|
|
||||||
'''
|
|
||||||
Extracted from GenericParser and made global so that [un]picking works.
|
|
||||||
'''
|
|
||||||
def __init__(self, stateno, items):
|
|
||||||
self.T, self.complete, self.items = [], [], items
|
|
||||||
self.stateno = stateno
|
|
||||||
|
|
||||||
class GenericParser:
|
|
||||||
'''
|
|
||||||
An Earley parser, as per J. Earley, "An Efficient Context-Free
|
|
||||||
Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
|
|
||||||
"An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
|
|
||||||
Carnegie-Mellon University, August 1968. New formulation of
|
|
||||||
the parser according to J. Aycock, "Practical Earley Parsing
|
|
||||||
and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
|
|
||||||
2001, and J. Aycock and R. N. Horspool, "Practical Earley
|
|
||||||
Parsing", unpublished paper, 2001.
|
|
||||||
'''
|
|
||||||
|
|
||||||
def __init__(self, start):
|
|
||||||
self.rules = {}
|
|
||||||
self.rule2func = {}
|
|
||||||
self.rule2name = {}
|
|
||||||
self.collectRules()
|
|
||||||
self.augment(start)
|
|
||||||
self.ruleschanged = 1
|
|
||||||
|
|
||||||
_NULLABLE = '\e_'
|
|
||||||
_START = 'START'
|
|
||||||
_BOF = '|-'
|
|
||||||
|
|
||||||
#
|
|
||||||
# When pickling, take the time to generate the full state machine;
|
|
||||||
# some information is then extraneous, too. Unfortunately we
|
|
||||||
# can't save the rule2func map.
|
|
||||||
#
|
|
||||||
def __getstate__(self):
|
|
||||||
if self.ruleschanged:
|
|
||||||
#
|
|
||||||
# XXX - duplicated from parse()
|
|
||||||
#
|
|
||||||
self.computeNull()
|
|
||||||
self.newrules = {}
|
|
||||||
self.new2old = {}
|
|
||||||
self.makeNewRules()
|
|
||||||
self.ruleschanged = 0
|
|
||||||
self.edges, self.cores = {}, {}
|
|
||||||
self.states = { 0: self.makeState0() }
|
|
||||||
self.makeState(0, self._BOF)
|
|
||||||
#
|
|
||||||
# XXX - should find a better way to do this..
|
|
||||||
#
|
|
||||||
changes = 1
|
|
||||||
while changes:
|
|
||||||
changes = 0
|
|
||||||
for k, v in list(self.edges.items()):
|
|
||||||
if v is None:
|
|
||||||
state, sym = k
|
|
||||||
if self.states.has_key(state):
|
|
||||||
self.goto(state, sym)
|
|
||||||
changes = 1
|
|
||||||
rv = self.__dict__.copy()
|
|
||||||
for s in self.states.values():
|
|
||||||
del s.items
|
|
||||||
del rv['rule2func']
|
|
||||||
del rv['nullable']
|
|
||||||
del rv['cores']
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def __setstate__(self, D):
|
|
||||||
self.rules = {}
|
|
||||||
self.rule2func = {}
|
|
||||||
self.rule2name = {}
|
|
||||||
self.collectRules()
|
|
||||||
start = D['rules'][self._START][0][1][1] # Blech.
|
|
||||||
self.augment(start)
|
|
||||||
D['rule2func'] = self.rule2func
|
|
||||||
D['makeSet'] = self.makeSet_fast
|
|
||||||
self.__dict__ = D
|
|
||||||
|
|
||||||
#
|
|
||||||
# A hook for GenericASTBuilder and GenericASTMatcher. Mess
|
|
||||||
# thee not with this; nor shall thee toucheth the _preprocess
|
|
||||||
# argument to addRule.
|
|
||||||
#
|
|
||||||
def preprocess(self, rule, func): return rule, func
|
|
||||||
|
|
||||||
def addRule(self, doc, func, _preprocess=1):
|
|
||||||
fn = func
|
|
||||||
rules = doc.split()
|
|
||||||
|
|
||||||
index = []
|
|
||||||
for i in range(len(rules)):
|
|
||||||
if rules[i] == '::=':
|
|
||||||
index.append(i-1)
|
|
||||||
index.append(len(rules))
|
|
||||||
|
|
||||||
for i in range(len(index)-1):
|
|
||||||
lhs = rules[index[i]]
|
|
||||||
rhs = rules[index[i]+2:index[i+1]]
|
|
||||||
rule = (lhs, tuple(rhs))
|
|
||||||
|
|
||||||
if _preprocess:
|
|
||||||
rule, fn = self.preprocess(rule, func)
|
|
||||||
|
|
||||||
if lhs in self.rules:
|
|
||||||
self.rules[lhs].append(rule)
|
|
||||||
else:
|
|
||||||
self.rules[lhs] = [ rule ]
|
|
||||||
self.rule2func[rule] = fn
|
|
||||||
self.rule2name[rule] = func.__name__[2:]
|
|
||||||
self.ruleschanged = 1
|
|
||||||
|
|
||||||
def collectRules(self):
|
|
||||||
for name in _namelist(self):
|
|
||||||
if name[:2] == 'p_':
|
|
||||||
func = getattr(self, name)
|
|
||||||
doc = func.__doc__
|
|
||||||
self.addRule(doc, func)
|
|
||||||
|
|
||||||
def augment(self, start):
|
|
||||||
rule = '%s ::= %s %s' % (self._START, self._BOF, start)
|
|
||||||
self.addRule(rule, lambda args: args[1], 0)
|
|
||||||
|
|
||||||
def computeNull(self):
|
|
||||||
self.nullable = {}
|
|
||||||
tbd = []
|
|
||||||
|
|
||||||
for rulelist in list(self.rules.values()):
|
|
||||||
lhs = rulelist[0][0]
|
|
||||||
self.nullable[lhs] = 0
|
|
||||||
for rule in rulelist:
|
|
||||||
rhs = rule[1]
|
|
||||||
if len(rhs) == 0:
|
|
||||||
self.nullable[lhs] = 1
|
|
||||||
continue
|
|
||||||
#
|
|
||||||
# We only need to consider rules which
|
|
||||||
# consist entirely of nonterminal symbols.
|
|
||||||
# This should be a savings on typical
|
|
||||||
# grammars.
|
|
||||||
#
|
|
||||||
for sym in rhs:
|
|
||||||
if sym not in self.rules:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
tbd.append(rule)
|
|
||||||
changes = 1
|
|
||||||
while changes:
|
|
||||||
changes = 0
|
|
||||||
for lhs, rhs in tbd:
|
|
||||||
if self.nullable[lhs]:
|
|
||||||
continue
|
|
||||||
for sym in rhs:
|
|
||||||
if not self.nullable[sym]:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
self.nullable[lhs] = 1
|
|
||||||
changes = 1
|
|
||||||
|
|
||||||
def makeState0(self):
|
|
||||||
s0 = _State(0, [])
|
|
||||||
for rule in self.newrules[self._START]:
|
|
||||||
s0.items.append((rule, 0))
|
|
||||||
return s0
|
|
||||||
|
|
||||||
def finalState(self, tokens):
|
|
||||||
#
|
|
||||||
# Yuck.
|
|
||||||
#
|
|
||||||
if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
|
|
||||||
return 1
|
|
||||||
start = self.rules[self._START][0][1][1]
|
|
||||||
return self.goto(1, start)
|
|
||||||
|
|
||||||
def makeNewRules(self):
|
|
||||||
worklist = []
|
|
||||||
for rulelist in list(self.rules.values()):
|
|
||||||
for rule in rulelist:
|
|
||||||
worklist.append((rule, 0, 1, rule))
|
|
||||||
|
|
||||||
for rule, i, candidate, oldrule in worklist:
|
|
||||||
lhs, rhs = rule
|
|
||||||
n = len(rhs)
|
|
||||||
while i < n:
|
|
||||||
sym = rhs[i]
|
|
||||||
if sym not in self.rules or \
|
|
||||||
not self.nullable[sym]:
|
|
||||||
candidate = 0
|
|
||||||
i = i + 1
|
|
||||||
continue
|
|
||||||
|
|
||||||
newrhs = list(rhs)
|
|
||||||
newrhs[i] = self._NULLABLE+sym
|
|
||||||
newrule = (lhs, tuple(newrhs))
|
|
||||||
worklist.append((newrule, i+1,
|
|
||||||
candidate, oldrule))
|
|
||||||
candidate = 0
|
|
||||||
i = i + 1
|
|
||||||
else:
|
|
||||||
if candidate:
|
|
||||||
lhs = self._NULLABLE+lhs
|
|
||||||
rule = (lhs, rhs)
|
|
||||||
if lhs in self.newrules:
|
|
||||||
self.newrules[lhs].append(rule)
|
|
||||||
else:
|
|
||||||
self.newrules[lhs] = [ rule ]
|
|
||||||
self.new2old[rule] = oldrule
|
|
||||||
|
|
||||||
def typestring(self, token):
|
|
||||||
return None
|
|
||||||
|
|
||||||
def error(self, token):
|
|
||||||
print("Syntax error at or near `%s' token" % token)
|
|
||||||
raise SystemExit
|
|
||||||
|
|
||||||
def parse(self, tokens):
|
|
||||||
sets = [ [(1,0), (2,0)] ]
|
|
||||||
self.links = {}
|
|
||||||
|
|
||||||
if self.ruleschanged:
|
|
||||||
self.computeNull()
|
|
||||||
self.newrules = {}
|
|
||||||
self.new2old = {}
|
|
||||||
self.makeNewRules()
|
|
||||||
self.ruleschanged = 0
|
|
||||||
self.edges, self.cores = {}, {}
|
|
||||||
self.states = { 0: self.makeState0() }
|
|
||||||
self.makeState(0, self._BOF)
|
|
||||||
|
|
||||||
for i in xrange(len(tokens)):
|
|
||||||
sets.append([])
|
|
||||||
|
|
||||||
if sets[i] == []:
|
|
||||||
break
|
|
||||||
self.makeSet(tokens[i], sets, i)
|
|
||||||
else:
|
|
||||||
sets.append([])
|
|
||||||
self.makeSet(None, sets, len(tokens))
|
|
||||||
|
|
||||||
finalitem = (self.finalState(tokens), 0)
|
|
||||||
if finalitem not in sets[-2]:
|
|
||||||
if len(tokens) > 0:
|
|
||||||
self.error(tokens[i-1])
|
|
||||||
else:
|
|
||||||
self.error(None)
|
|
||||||
|
|
||||||
return self.buildTree(self._START, finalitem,
|
|
||||||
tokens, len(sets)-2)
|
|
||||||
|
|
||||||
def isnullable(self, sym):
|
|
||||||
#
|
|
||||||
# For symbols in G_e only. If we weren't supporting 1.5,
|
|
||||||
# could just use sym.startswith().
|
|
||||||
#
|
|
||||||
return self._NULLABLE == sym[0:len(self._NULLABLE)]
|
|
||||||
|
|
||||||
def skip(self, xxx_todo_changeme, pos=0):
|
|
||||||
(lhs, rhs) = xxx_todo_changeme
|
|
||||||
n = len(rhs)
|
|
||||||
while pos < n:
|
|
||||||
if not self.isnullable(rhs[pos]):
|
|
||||||
break
|
|
||||||
pos = pos + 1
|
|
||||||
return pos
|
|
||||||
|
|
||||||
def makeState(self, state, sym):
|
|
||||||
assert sym is not None
|
|
||||||
#
|
|
||||||
# Compute \epsilon-kernel state's core and see if
|
|
||||||
# it exists already.
|
|
||||||
#
|
|
||||||
kitems = []
|
|
||||||
for rule, pos in self.states[state].items:
|
|
||||||
lhs, rhs = rule
|
|
||||||
if rhs[pos:pos+1] == (sym,):
|
|
||||||
kitems.append((rule, self.skip(rule, pos+1)))
|
|
||||||
|
|
||||||
tcore = tuple(sorted(kitems))
|
|
||||||
if self.cores.has_key(tcore):
|
|
||||||
return self.cores[tcore]
|
|
||||||
#
|
|
||||||
# Nope, doesn't exist. Compute it and the associated
|
|
||||||
# \epsilon-nonkernel state together; we'll need it right away.
|
|
||||||
#
|
|
||||||
k = self.cores[tcore] = len(self.states)
|
|
||||||
K, NK = _State(k, kitems), _State(k+1, [])
|
|
||||||
self.states[k] = K
|
|
||||||
predicted = {}
|
|
||||||
|
|
||||||
edges = self.edges
|
|
||||||
rules = self.newrules
|
|
||||||
for X in K, NK:
|
|
||||||
worklist = X.items
|
|
||||||
for item in worklist:
|
|
||||||
rule, pos = item
|
|
||||||
lhs, rhs = rule
|
|
||||||
if pos == len(rhs):
|
|
||||||
X.complete.append(rule)
|
|
||||||
continue
|
|
||||||
|
|
||||||
nextSym = rhs[pos]
|
|
||||||
key = (X.stateno, nextSym)
|
|
||||||
if not rules.has_key(nextSym):
|
|
||||||
if not edges.has_key(key):
|
|
||||||
edges[key] = None
|
|
||||||
X.T.append(nextSym)
|
|
||||||
else:
|
|
||||||
edges[key] = None
|
|
||||||
if not predicted.has_key(nextSym):
|
|
||||||
predicted[nextSym] = 1
|
|
||||||
for prule in rules[nextSym]:
|
|
||||||
ppos = self.skip(prule)
|
|
||||||
new = (prule, ppos)
|
|
||||||
NK.items.append(new)
|
|
||||||
#
|
|
||||||
# Problem: we know K needs generating, but we
|
|
||||||
# don't yet know about NK. Can't commit anything
|
|
||||||
# regarding NK to self.edges until we're sure. Should
|
|
||||||
# we delay committing on both K and NK to avoid this
|
|
||||||
# hacky code? This creates other problems..
|
|
||||||
#
|
|
||||||
if X is K:
|
|
||||||
edges = {}
|
|
||||||
|
|
||||||
if NK.items == []:
|
|
||||||
return k
|
|
||||||
|
|
||||||
#
|
|
||||||
# Check for \epsilon-nonkernel's core. Unfortunately we
|
|
||||||
# need to know the entire set of predicted nonterminals
|
|
||||||
# to do this without accidentally duplicating states.
|
|
||||||
#
|
|
||||||
tcore = tuple(sorted(predicted.keys()))
|
|
||||||
if self.cores.has_key(tcore):
|
|
||||||
self.edges[(k, None)] = self.cores[tcore]
|
|
||||||
return k
|
|
||||||
|
|
||||||
nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
|
|
||||||
self.edges.update(edges)
|
|
||||||
self.states[nk] = NK
|
|
||||||
return k
|
|
||||||
|
|
||||||
def goto(self, state, sym):
|
|
||||||
key = (state, sym)
|
|
||||||
if not self.edges.has_key(key):
|
|
||||||
#
|
|
||||||
# No transitions from state on sym.
|
|
||||||
#
|
|
||||||
return None
|
|
||||||
|
|
||||||
rv = self.edges[key]
|
|
||||||
if rv is None:
|
|
||||||
#
|
|
||||||
# Target state isn't generated yet. Remedy this.
|
|
||||||
#
|
|
||||||
rv = self.makeState(state, sym)
|
|
||||||
self.edges[key] = rv
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def gotoT(self, state, t):
|
|
||||||
return [self.goto(state, t)]
|
|
||||||
|
|
||||||
def gotoST(self, state, st):
|
|
||||||
rv = []
|
|
||||||
for t in self.states[state].T:
|
|
||||||
if st == t:
|
|
||||||
rv.append(self.goto(state, t))
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def add(self, set, item, i=None, predecessor=None, causal=None):
|
|
||||||
if predecessor is None:
|
|
||||||
if item not in set:
|
|
||||||
set.append(item)
|
|
||||||
else:
|
|
||||||
key = (item, i)
|
|
||||||
if item not in set:
|
|
||||||
self.links[key] = []
|
|
||||||
set.append(item)
|
|
||||||
self.links[key].append((predecessor, causal))
|
|
||||||
|
|
||||||
def makeSet(self, token, sets, i):
|
|
||||||
cur, next = sets[i], sets[i+1]
|
|
||||||
|
|
||||||
ttype = token is not None and self.typestring(token) or None
|
|
||||||
if ttype is not None:
|
|
||||||
fn, arg = self.gotoT, ttype
|
|
||||||
else:
|
|
||||||
fn, arg = self.gotoST, token
|
|
||||||
|
|
||||||
for item in cur:
|
|
||||||
ptr = (item, i)
|
|
||||||
state, parent = item
|
|
||||||
add = fn(state, arg)
|
|
||||||
for k in add:
|
|
||||||
if k is not None:
|
|
||||||
self.add(next, (k, parent), i+1, ptr)
|
|
||||||
nk = self.goto(k, None)
|
|
||||||
if nk is not None:
|
|
||||||
self.add(next, (nk, i+1))
|
|
||||||
|
|
||||||
if parent == i:
|
|
||||||
continue
|
|
||||||
|
|
||||||
for rule in self.states[state].complete:
|
|
||||||
lhs, rhs = rule
|
|
||||||
for pitem in sets[parent]:
|
|
||||||
pstate, pparent = pitem
|
|
||||||
k = self.goto(pstate, lhs)
|
|
||||||
if k is not None:
|
|
||||||
why = (item, i, rule)
|
|
||||||
pptr = (pitem, parent)
|
|
||||||
self.add(cur, (k, pparent),
|
|
||||||
i, pptr, why)
|
|
||||||
nk = self.goto(k, None)
|
|
||||||
if nk is not None:
|
|
||||||
self.add(cur, (nk, i))
|
|
||||||
|
|
||||||
def makeSet_fast(self, token, sets, i):
|
|
||||||
#
|
|
||||||
# Call *only* when the entire state machine has been built!
|
|
||||||
# It relies on self.edges being filled in completely, and
|
|
||||||
# then duplicates and inlines code to boost speed at the
|
|
||||||
# cost of extreme ugliness.
|
|
||||||
#
|
|
||||||
cur, next = sets[i], sets[i+1]
|
|
||||||
ttype = token is not None and self.typestring(token) or None
|
|
||||||
|
|
||||||
for item in cur:
|
|
||||||
ptr = (item, i)
|
|
||||||
state, parent = item
|
|
||||||
if ttype is not None:
|
|
||||||
k = self.edges.get((state, ttype), None)
|
|
||||||
if k is not None:
|
|
||||||
#self.add(next, (k, parent), i+1, ptr)
|
|
||||||
#INLINED --v
|
|
||||||
new = (k, parent)
|
|
||||||
key = (new, i+1)
|
|
||||||
if new not in next:
|
|
||||||
self.links[key] = []
|
|
||||||
next.append(new)
|
|
||||||
self.links[key].append((ptr, None))
|
|
||||||
#INLINED --^
|
|
||||||
#nk = self.goto(k, None)
|
|
||||||
nk = self.edges.get((k, None), None)
|
|
||||||
if nk is not None:
|
|
||||||
#self.add(next, (nk, i+1))
|
|
||||||
#INLINED --v
|
|
||||||
new = (nk, i+1)
|
|
||||||
if new not in next:
|
|
||||||
next.append(new)
|
|
||||||
#INLINED --^
|
|
||||||
else:
|
|
||||||
add = self.gotoST(state, token)
|
|
||||||
for k in add:
|
|
||||||
if k is not None:
|
|
||||||
self.add(next, (k, parent), i+1, ptr)
|
|
||||||
#nk = self.goto(k, None)
|
|
||||||
nk = self.edges.get((k, None), None)
|
|
||||||
if nk is not None:
|
|
||||||
self.add(next, (nk, i+1))
|
|
||||||
|
|
||||||
if parent == i:
|
|
||||||
continue
|
|
||||||
|
|
||||||
for rule in self.states[state].complete:
|
|
||||||
lhs, rhs = rule
|
|
||||||
for pitem in sets[parent]:
|
|
||||||
pstate, pparent = pitem
|
|
||||||
#k = self.goto(pstate, lhs)
|
|
||||||
k = self.edges.get((pstate, lhs), None)
|
|
||||||
if k is not None:
|
|
||||||
why = (item, i, rule)
|
|
||||||
pptr = (pitem, parent)
|
|
||||||
#self.add(cur, (k, pparent),
|
|
||||||
# i, pptr, why)
|
|
||||||
#INLINED --v
|
|
||||||
new = (k, pparent)
|
|
||||||
key = (new, i)
|
|
||||||
if new not in cur:
|
|
||||||
self.links[key] = []
|
|
||||||
cur.append(new)
|
|
||||||
self.links[key].append((pptr, why))
|
|
||||||
#INLINED --^
|
|
||||||
#nk = self.goto(k, None)
|
|
||||||
nk = self.edges.get((k, None), None)
|
|
||||||
if nk is not None:
|
|
||||||
#self.add(cur, (nk, i))
|
|
||||||
#INLINED --v
|
|
||||||
new = (nk, i)
|
|
||||||
if new not in cur:
|
|
||||||
cur.append(new)
|
|
||||||
#INLINED --^
|
|
||||||
|
|
||||||
def predecessor(self, key, causal):
|
|
||||||
for p, c in self.links[key]:
|
|
||||||
if c == causal:
|
|
||||||
return p
|
|
||||||
assert 0
|
|
||||||
|
|
||||||
def causal(self, key):
|
|
||||||
links = self.links[key]
|
|
||||||
if len(links) == 1:
|
|
||||||
return links[0][1]
|
|
||||||
choices = []
|
|
||||||
rule2cause = {}
|
|
||||||
for p, c in links:
|
|
||||||
rule = c[2]
|
|
||||||
choices.append(rule)
|
|
||||||
rule2cause[rule] = c
|
|
||||||
return rule2cause[self.ambiguity(choices)]
|
|
||||||
|
|
||||||
def deriveEpsilon(self, nt):
|
|
||||||
if len(self.newrules[nt]) > 1:
|
|
||||||
rule = self.ambiguity(self.newrules[nt])
|
|
||||||
else:
|
|
||||||
rule = self.newrules[nt][0]
|
|
||||||
# print(rule)
|
|
||||||
|
|
||||||
rhs = rule[1]
|
|
||||||
attr = [None] * len(rhs)
|
|
||||||
|
|
||||||
for i in range(len(rhs)-1, -1, -1):
|
|
||||||
attr[i] = self.deriveEpsilon(rhs[i])
|
|
||||||
return self.rule2func[self.new2old[rule]](attr)
|
|
||||||
|
|
||||||
def buildTree(self, nt, item, tokens, k):
|
|
||||||
state, parent = item
|
|
||||||
|
|
||||||
choices = []
|
|
||||||
for rule in self.states[state].complete:
|
|
||||||
if rule[0] == nt:
|
|
||||||
choices.append(rule)
|
|
||||||
rule = choices[0]
|
|
||||||
if len(choices) > 1:
|
|
||||||
rule = self.ambiguity(choices)
|
|
||||||
# print(rule)
|
|
||||||
|
|
||||||
rhs = rule[1]
|
|
||||||
attr = [None] * len(rhs)
|
|
||||||
|
|
||||||
for i in range(len(rhs)-1, -1, -1):
|
|
||||||
sym = rhs[i]
|
|
||||||
if not self.newrules.has_key(sym):
|
|
||||||
if sym != self._BOF:
|
|
||||||
attr[i] = tokens[k-1]
|
|
||||||
key = (item, k)
|
|
||||||
item, k = self.predecessor(key, None)
|
|
||||||
#elif self.isnullable(sym):
|
|
||||||
elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
|
|
||||||
attr[i] = self.deriveEpsilon(sym)
|
|
||||||
else:
|
|
||||||
key = (item, k)
|
|
||||||
why = self.causal(key)
|
|
||||||
attr[i] = self.buildTree(sym, why[0],
|
|
||||||
tokens, why[1])
|
|
||||||
item, k = self.predecessor(key, why)
|
|
||||||
return self.rule2func[self.new2old[rule]](attr)
|
|
||||||
|
|
||||||
def ambiguity(self, rules):
|
|
||||||
#
|
|
||||||
# XXX - problem here and in collectRules() if the same rule
|
|
||||||
# appears in >1 method. Also undefined results if rules
|
|
||||||
# causing the ambiguity appear in the same method.
|
|
||||||
#
|
|
||||||
sortlist = []
|
|
||||||
name2index = {}
|
|
||||||
for i in range(len(rules)):
|
|
||||||
lhs, rhs = rule = rules[i]
|
|
||||||
name = self.rule2name[self.new2old[rule]]
|
|
||||||
sortlist.append((len(rhs), name))
|
|
||||||
name2index[name] = i
|
|
||||||
sortlist.sort()
|
|
||||||
list = [a_b[1] for a_b in sortlist]
|
|
||||||
return rules[name2index[self.resolve(list)]]
|
|
||||||
|
|
||||||
def resolve(self, list):
|
|
||||||
'''
|
|
||||||
Resolve ambiguity in favor of the shortest RHS.
|
|
||||||
Since we walk the tree from the top down, this
|
|
||||||
should effectively resolve in favor of a "shift".
|
|
||||||
'''
|
|
||||||
return list[0]
|
|
||||||
|
|
||||||
#
|
|
||||||
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
|
|
||||||
# for a given input. The extra argument is a class (not an instance!)
|
|
||||||
# which supports the "__setslice__" and "__len__" methods.
|
|
||||||
#
|
|
||||||
# XXX - silently overrides any user code in methods.
|
|
||||||
#
|
|
||||||
|
|
||||||
class GenericASTBuilder(GenericParser):
|
|
||||||
def __init__(self, AST, start):
|
|
||||||
GenericParser.__init__(self, start)
|
|
||||||
self.AST = AST
|
|
||||||
|
|
||||||
def preprocess(self, rule, func):
|
|
||||||
rebind = lambda lhs, self=self: \
|
|
||||||
lambda args, lhs=lhs, self=self: \
|
|
||||||
self.buildASTNode(args, lhs)
|
|
||||||
lhs, rhs = rule
|
|
||||||
return rule, rebind(lhs)
|
|
||||||
|
|
||||||
def buildASTNode(self, args, lhs):
|
|
||||||
children = []
|
|
||||||
for arg in args:
|
|
||||||
if isinstance(arg, self.AST):
|
|
||||||
children.append(arg)
|
|
||||||
else:
|
|
||||||
children.append(self.terminal(arg))
|
|
||||||
return self.nonterminal(lhs, children)
|
|
||||||
|
|
||||||
def terminal(self, token): return token
|
|
||||||
|
|
||||||
def nonterminal(self, type, args):
|
|
||||||
rv = self.AST(type)
|
|
||||||
rv[:len(args)] = args
|
|
||||||
return rv
|
|
||||||
|
|
||||||
class GenericASTTraversalPruningException:
|
|
||||||
pass
|
|
||||||
|
|
||||||
class GenericASTTraversal:
|
|
||||||
'''
|
|
||||||
GenericASTTraversal is a Visitor pattern according to Design Patterns. For
|
|
||||||
each node it attempts to invoke the method n_<node type>, falling
|
|
||||||
back onto the default() method if the n_* can't be found. The preorder
|
|
||||||
traversal also looks for an exit hook named n_<node type>_exit (no default
|
|
||||||
routine is called if it's not found). To prematurely halt traversal
|
|
||||||
of a subtree, call the prune() method -- this only makes sense for a
|
|
||||||
preorder traversal. Node type is determined via the typestring() method.
|
|
||||||
'''
|
|
||||||
def __init__(self, ast):
|
|
||||||
self.ast = ast
|
|
||||||
|
|
||||||
def typestring(self, node):
|
|
||||||
return node.type
|
|
||||||
|
|
||||||
def prune(self):
|
|
||||||
raise GenericASTTraversalPruningException
|
|
||||||
|
|
||||||
def preorder(self, node=None):
|
|
||||||
if node is None:
|
|
||||||
node = self.ast
|
|
||||||
|
|
||||||
try:
|
|
||||||
name = 'n_' + self.typestring(node)
|
|
||||||
if hasattr(self, name):
|
|
||||||
func = getattr(self, name)
|
|
||||||
func(node)
|
|
||||||
else:
|
|
||||||
self.default(node)
|
|
||||||
except GenericASTTraversalPruningException:
|
|
||||||
return
|
|
||||||
|
|
||||||
for kid in node:
|
|
||||||
self.preorder(kid)
|
|
||||||
|
|
||||||
name = name + '_exit'
|
|
||||||
if hasattr(self, name):
|
|
||||||
func = getattr(self, name)
|
|
||||||
func(node)
|
|
||||||
|
|
||||||
def default(self, node):
|
|
||||||
pass
|
|
@@ -30,16 +30,12 @@ from __future__ import print_function
|
|||||||
Probably a complete rewrite would be sensefull. hG/2000-12-27
|
Probably a complete rewrite would be sensefull. hG/2000-12-27
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import sys, types, os
|
import os, marshal, sys, types
|
||||||
|
|
||||||
if (sys.version_info > (3, 0)):
|
if (sys.version_info > (3, 0)):
|
||||||
from . import walker, verify, magics
|
from . import walker, verify, magics
|
||||||
from . import disas as dis
|
|
||||||
else:
|
else:
|
||||||
import walker, verify, magics
|
import walker, verify, magics
|
||||||
import disas as dis
|
|
||||||
|
|
||||||
import marshal
|
|
||||||
|
|
||||||
sys.setrecursionlimit(5000)
|
sys.setrecursionlimit(5000)
|
||||||
__all__ = ['uncompyle_file', 'main']
|
__all__ = ['uncompyle_file', 'main']
|
||||||
@@ -82,7 +78,9 @@ def _load_module(filename):
|
|||||||
raise ImportError("This is a Python %s file! Only Python 2.5 to 2.7 files are supported." % version)
|
raise ImportError("This is a Python %s file! Only Python 2.5 to 2.7 files are supported." % version)
|
||||||
# print version
|
# print version
|
||||||
fp.read(4) # timestamp
|
fp.read(4) # timestamp
|
||||||
co = dis.marshalLoad(fp)
|
|
||||||
|
bytecode = fp.read()
|
||||||
|
co = marshal.loads(bytecode)
|
||||||
fp.close()
|
fp.close()
|
||||||
return version, co
|
return version, co
|
||||||
|
|
||||||
|
@@ -1,151 +0,0 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
"""Disassembler of Python byte code into mnemonics."""
|
|
||||||
|
|
||||||
import marshal, pickle, sys, types
|
|
||||||
|
|
||||||
import dis as Mdis
|
|
||||||
|
|
||||||
from struct import unpack
|
|
||||||
|
|
||||||
internStrings = []
|
|
||||||
|
|
||||||
disco = Mdis.disassemble
|
|
||||||
# XXX For backwards compatibility
|
|
||||||
|
|
||||||
def marshalLoad(fp):
|
|
||||||
global internStrings
|
|
||||||
internStrings = []
|
|
||||||
return load(fp)
|
|
||||||
|
|
||||||
def load(fp):
|
|
||||||
"""
|
|
||||||
Load marshal
|
|
||||||
"""
|
|
||||||
global internStrings
|
|
||||||
|
|
||||||
marshalType = fp.read(1)
|
|
||||||
if marshalType == 'c':
|
|
||||||
Code = types.CodeType
|
|
||||||
|
|
||||||
co_argcount = unpack('i', fp.read(4))[0]
|
|
||||||
co_nlocals = unpack('i', fp.read(4))[0]
|
|
||||||
co_stacksize = unpack('i', fp.read(4))[0]
|
|
||||||
co_flags = unpack('i', fp.read(4))[0]
|
|
||||||
co_code = load(fp)
|
|
||||||
co_consts = load(fp)
|
|
||||||
co_names = load(fp)
|
|
||||||
co_varnames = load(fp)
|
|
||||||
co_freevars = load(fp)
|
|
||||||
co_cellvars = load(fp)
|
|
||||||
co_filename = load(fp)
|
|
||||||
co_name = load(fp)
|
|
||||||
co_firstlineno = unpack('i', fp.read(4))[0]
|
|
||||||
co_lnotab = load(fp)
|
|
||||||
return Code(co_argcount, co_nlocals, co_stacksize, co_flags, co_code,
|
|
||||||
co_consts, co_names, co_varnames, co_filename, co_name,
|
|
||||||
co_firstlineno, co_lnotab, co_freevars, co_cellvars)
|
|
||||||
|
|
||||||
# const type
|
|
||||||
elif marshalType == '.':
|
|
||||||
return Ellipsis
|
|
||||||
elif marshalType == '0':
|
|
||||||
raise KeyError(marshalType)
|
|
||||||
return None
|
|
||||||
elif marshalType == 'N':
|
|
||||||
return None
|
|
||||||
elif marshalType == 'T':
|
|
||||||
return True
|
|
||||||
elif marshalType == 'F':
|
|
||||||
return False
|
|
||||||
elif marshalType == 'S':
|
|
||||||
return StopIteration
|
|
||||||
# number type
|
|
||||||
elif marshalType == 'f':
|
|
||||||
n = fp.read(1)
|
|
||||||
return float(unpack('d', fp.read(n))[0])
|
|
||||||
elif marshalType == 'g':
|
|
||||||
return float(unpack('d', fp.read(8))[0])
|
|
||||||
elif marshalType == 'i':
|
|
||||||
return int(unpack('i', fp.read(4))[0])
|
|
||||||
elif marshalType == 'I':
|
|
||||||
return unpack('q', fp.read(8))[0]
|
|
||||||
elif marshalType == 'x':
|
|
||||||
raise KeyError(marshalType)
|
|
||||||
return None
|
|
||||||
elif marshalType == 'y':
|
|
||||||
raise KeyError(marshalType)
|
|
||||||
return None
|
|
||||||
elif marshalType == 'l':
|
|
||||||
n = unpack('i', fp.read(4))[0]
|
|
||||||
if n == 0:
|
|
||||||
return long(0)
|
|
||||||
size = abs(n)
|
|
||||||
d = long(0)
|
|
||||||
for j in range(0, size):
|
|
||||||
md = int(unpack('h', fp.read(2))[0])
|
|
||||||
d += md << j*15
|
|
||||||
if n < 0:
|
|
||||||
return long(d*-1)
|
|
||||||
return d
|
|
||||||
# strings type
|
|
||||||
elif marshalType == 'R':
|
|
||||||
refnum = unpack('i', fp.read(4))[0]
|
|
||||||
return internStrings[refnum]
|
|
||||||
elif marshalType == 's':
|
|
||||||
strsize = unpack('i', fp.read(4))[0]
|
|
||||||
return str(fp.read(strsize))
|
|
||||||
elif marshalType == 't':
|
|
||||||
strsize = unpack('i', fp.read(4))[0]
|
|
||||||
interned = str(fp.read(strsize))
|
|
||||||
internStrings.append(interned)
|
|
||||||
return interned
|
|
||||||
elif marshalType == 'u':
|
|
||||||
strsize = unpack('i', fp.read(4))[0]
|
|
||||||
unicodestring = fp.read(strsize)
|
|
||||||
return unicodestring.decode('utf-8')
|
|
||||||
# collection type
|
|
||||||
elif marshalType == '(':
|
|
||||||
tuplesize = unpack('i', fp.read(4))[0]
|
|
||||||
ret = tuple()
|
|
||||||
while tuplesize > 0:
|
|
||||||
ret += load(fp),
|
|
||||||
tuplesize -= 1
|
|
||||||
return ret
|
|
||||||
elif marshalType == '[':
|
|
||||||
raise KeyError(marshalType)
|
|
||||||
return None
|
|
||||||
elif marshalType == '{':
|
|
||||||
raise KeyError(marshalType)
|
|
||||||
return None
|
|
||||||
elif marshalType in ['<', '>']:
|
|
||||||
raise KeyError(marshalType)
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
sys.stderr.write("Unknown type %i (hex %x)\n" % (ord(marshalType), ord(marshalType)))
|
|
||||||
|
|
||||||
def _test():
|
|
||||||
"""Simple test program to disassemble a file."""
|
|
||||||
if sys.argv[1:]:
|
|
||||||
if sys.argv[2:]:
|
|
||||||
sys.stderr.write("usage: python dis.py [-|file]\n")
|
|
||||||
sys.exit(2)
|
|
||||||
fn = sys.argv[1]
|
|
||||||
if not fn or fn == "-":
|
|
||||||
fn = None
|
|
||||||
else:
|
|
||||||
fn = None
|
|
||||||
if fn is None:
|
|
||||||
f = sys.stdin
|
|
||||||
else:
|
|
||||||
f = open(fn)
|
|
||||||
source = f.read()
|
|
||||||
if fn is not None:
|
|
||||||
f.close()
|
|
||||||
else:
|
|
||||||
fn = "<stdin>"
|
|
||||||
code = compile(source, fn, "exec")
|
|
||||||
Mdis.dis(code)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
_test()
|
|
@@ -24,7 +24,7 @@ del op
|
|||||||
def def_op(name, op):
|
def def_op(name, op):
|
||||||
opname[op] = name
|
opname[op] = name
|
||||||
opmap[name] = op
|
opmap[name] = op
|
||||||
globals().update({name: op})
|
globals().update({name: op})
|
||||||
|
|
||||||
def name_op(name, op):
|
def name_op(name, op):
|
||||||
def_op(name, op)
|
def_op(name, op)
|
||||||
@@ -41,15 +41,15 @@ def jabs_op(name, op):
|
|||||||
def def_extArg(name, op):
|
def def_extArg(name, op):
|
||||||
def_op(name, op)
|
def_op(name, op)
|
||||||
hasArgumentExtended.append(op)
|
hasArgumentExtended.append(op)
|
||||||
|
|
||||||
def updateGlobal():
|
def updateGlobal():
|
||||||
globals().update({'PJIF': opmap['JUMP_IF_FALSE']})
|
globals().update({'PJIF': opmap['JUMP_IF_FALSE']})
|
||||||
globals().update({'PJIT': opmap['JUMP_IF_TRUE']})
|
globals().update({'PJIT': opmap['JUMP_IF_TRUE']})
|
||||||
globals().update({'JA': opmap['JUMP_ABSOLUTE']})
|
globals().update({'JA': opmap['JUMP_ABSOLUTE']})
|
||||||
globals().update({'JF': opmap['JUMP_FORWARD']})
|
globals().update({'JF': opmap['JUMP_FORWARD']})
|
||||||
globals().update({k.replace('+','_'):v for (k,v) in opmap.items()})
|
globals().update({k.replace('+', '_'): v for (k, v) in opmap.items()})
|
||||||
globals().update({'JUMP_OPs': map(lambda op: opname[op], hasjrel + hasjabs)})
|
globals().update({'JUMP_OPs': map(lambda op: opname[op], hasjrel + hasjabs)})
|
||||||
|
|
||||||
# Instruction opcodes for compiled code
|
# Instruction opcodes for compiled code
|
||||||
# Blank lines correspond to available opcodes
|
# Blank lines correspond to available opcodes
|
||||||
|
|
||||||
|
@@ -37,17 +37,17 @@ def jrel_op(name, op):
|
|||||||
def jabs_op(name, op):
|
def jabs_op(name, op):
|
||||||
def_op(name, op)
|
def_op(name, op)
|
||||||
hasjabs.append(op)
|
hasjabs.append(op)
|
||||||
|
|
||||||
def def_extArg(name, op):
|
def def_extArg(name, op):
|
||||||
def_op(name, op)
|
def_op(name, op)
|
||||||
hasArgumentExtended.append(op)
|
hasArgumentExtended.append(op)
|
||||||
|
|
||||||
def updateGlobal():
|
def updateGlobal():
|
||||||
globals().update({'PJIF': opmap['JUMP_IF_FALSE']})
|
globals().update({'PJIF': opmap['JUMP_IF_FALSE']})
|
||||||
globals().update({'PJIT': opmap['JUMP_IF_TRUE']})
|
globals().update({'PJIT': opmap['JUMP_IF_TRUE']})
|
||||||
globals().update({'JA': opmap['JUMP_ABSOLUTE']})
|
globals().update({'JA': opmap['JUMP_ABSOLUTE']})
|
||||||
globals().update({'JF': opmap['JUMP_FORWARD']})
|
globals().update({'JF': opmap['JUMP_FORWARD']})
|
||||||
globals().update({k.replace('+','_'):v for (k,v) in opmap.items()})
|
globals().update({k.replace('+', '_'): v for (k,v) in opmap.items()})
|
||||||
globals().update({'JUMP_OPs': map(lambda op: opname[op], hasjrel + hasjabs)})
|
globals().update({'JUMP_OPs': map(lambda op: opname[op], hasjrel + hasjabs)})
|
||||||
|
|
||||||
# Instruction opcodes for compiled code
|
# Instruction opcodes for compiled code
|
||||||
|
@@ -78,7 +78,6 @@ class Scanner25(scan.Scanner):
|
|||||||
while j < start_byte:
|
while j < start_byte:
|
||||||
self.lines.append(linetuple(prev_line_no, start_byte))
|
self.lines.append(linetuple(prev_line_no, start_byte))
|
||||||
j += 1
|
j += 1
|
||||||
last_op = self.code[self.prev[start_byte]]
|
|
||||||
(prev_start_byte, prev_line_no) = (start_byte, line_no)
|
(prev_start_byte, prev_line_no) = (start_byte, line_no)
|
||||||
while j < codelen:
|
while j < codelen:
|
||||||
self.lines.append(linetuple(prev_line_no, codelen))
|
self.lines.append(linetuple(prev_line_no, codelen))
|
||||||
@@ -136,7 +135,7 @@ class Scanner25(scan.Scanner):
|
|||||||
continue
|
continue
|
||||||
if op in hasconst:
|
if op in hasconst:
|
||||||
const = co.co_consts[oparg]
|
const = co.co_consts[oparg]
|
||||||
if type(const) == types.CodeType:
|
if isinstance(const, types.CodeType):
|
||||||
oparg = const
|
oparg = const
|
||||||
if const.co_name == '<lambda>':
|
if const.co_name == '<lambda>':
|
||||||
assert op_name == 'LOAD_CONST'
|
assert op_name == 'LOAD_CONST'
|
||||||
@@ -150,8 +149,8 @@ class Scanner25(scan.Scanner):
|
|||||||
# verify uses 'pattr' for comparism, since 'attr'
|
# verify uses 'pattr' for comparism, since 'attr'
|
||||||
# now holds Code(const) and thus can not be used
|
# now holds Code(const) and thus can not be used
|
||||||
# for comparism (todo: think about changing this)
|
# for comparism (todo: think about changing this)
|
||||||
#pattr = 'code_object @ 0x%x %s->%s' %\
|
# pattr = 'code_object @ 0x%x %s->%s' %\
|
||||||
# (id(const), const.co_filename, const.co_name)
|
# (id(const), const.co_filename, const.co_name)
|
||||||
pattr = '<code_object ' + const.co_name + '>'
|
pattr = '<code_object ' + const.co_name + '>'
|
||||||
else:
|
else:
|
||||||
pattr = const
|
pattr = const
|
||||||
@@ -228,7 +227,7 @@ class Scanner25(scan.Scanner):
|
|||||||
if opcode == EXTENDED_ARG:
|
if opcode == EXTENDED_ARG:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
# del POP_TOP
|
# del POP_TOP
|
||||||
if opcode in (PJIF,PJIT,JA,JF):
|
if opcode in (PJIF, PJIT, JA, JF):
|
||||||
toDel = []
|
toDel = []
|
||||||
# del POP_TOP
|
# del POP_TOP
|
||||||
if self.code[i+opsize] == POP_TOP:
|
if self.code[i+opsize] == POP_TOP:
|
||||||
@@ -247,7 +246,7 @@ class Scanner25(scan.Scanner):
|
|||||||
if destFor == i+opsize+4:
|
if destFor == i+opsize+4:
|
||||||
setupLoop = self.last_instr(0, jmpabs1target, SETUP_LOOP)
|
setupLoop = self.last_instr(0, jmpabs1target, SETUP_LOOP)
|
||||||
standarFor = self.last_instr(setupLoop, jmpabs1target, GET_ITER)
|
standarFor = self.last_instr(setupLoop, jmpabs1target, GET_ITER)
|
||||||
if standarFor == None:
|
if standarFor is None:
|
||||||
self.restructJump(jmpabs1target, destFor+self.op_size(POP_BLOCK))
|
self.restructJump(jmpabs1target, destFor+self.op_size(POP_BLOCK))
|
||||||
toDel += [setupLoop, i+opsize+1, i+opsize+4]
|
toDel += [setupLoop, i+opsize+1, i+opsize+4]
|
||||||
if len(toDel) > 0:
|
if len(toDel) > 0:
|
||||||
@@ -257,15 +256,16 @@ class Scanner25(scan.Scanner):
|
|||||||
if self.code[i+opsize] == POP_TOP:
|
if self.code[i+opsize] == POP_TOP:
|
||||||
return [i+opsize]
|
return [i+opsize]
|
||||||
if opcode == BUILD_LIST:
|
if opcode == BUILD_LIST:
|
||||||
if self.code[i+opsize] == DUP_TOP and self.code[i+opsize+1] in (STORE_NAME,STORE_FAST):
|
if (self.code[i+opsize] == DUP_TOP
|
||||||
|
and self.code[i+opsize+1] in (STORE_NAME, STORE_FAST)):
|
||||||
# del DUP/STORE_NAME x
|
# del DUP/STORE_NAME x
|
||||||
toDel = [i+opsize,i+opsize+1]
|
toDel = [i+opsize, i+opsize+1]
|
||||||
nameDel = self.get_argument(i+opsize+1)
|
nameDel = self.get_argument(i+opsize+1)
|
||||||
start = i+opsize+1
|
start = i+opsize+1
|
||||||
end = start
|
end = start
|
||||||
# del LOAD_NAME x
|
# del LOAD_NAME x
|
||||||
while end < len(self.code):
|
while end < len(self.code):
|
||||||
end = self.first_instr(end, len(self.code), (LOAD_NAME,LOAD_FAST))
|
end = self.first_instr(end, len(self.code), (LOAD_NAME, LOAD_FAST))
|
||||||
if nameDel == self.get_argument(end):
|
if nameDel == self.get_argument(end):
|
||||||
toDel += [end]
|
toDel += [end]
|
||||||
break
|
break
|
||||||
@@ -275,8 +275,8 @@ class Scanner25(scan.Scanner):
|
|||||||
end += self.op_size(LOAD_FAST)
|
end += self.op_size(LOAD_FAST)
|
||||||
# log JA/POP_TOP to del and update PJIF
|
# log JA/POP_TOP to del and update PJIF
|
||||||
while start < end:
|
while start < end:
|
||||||
start = self.first_instr(start, end, (PJIF,PJIT)) # end = len(self.code)
|
start = self.first_instr(start, end, (PJIF, PJIT)) # end = len(self.code)
|
||||||
if start == None: break
|
if start is None: break
|
||||||
target = self.get_target(start)
|
target = self.get_target(start)
|
||||||
if self.code[target] == POP_TOP and self.code[target-3] == JA:
|
if self.code[target] == POP_TOP and self.code[target-3] == JA:
|
||||||
toDel += [target, target-3]
|
toDel += [target, target-3]
|
||||||
@@ -287,7 +287,7 @@ class Scanner25(scan.Scanner):
|
|||||||
# del DELETE_NAME x
|
# del DELETE_NAME x
|
||||||
start = end
|
start = end
|
||||||
while end < len(self.code):
|
while end < len(self.code):
|
||||||
end = self.first_instr(end, len(self.code), (DELETE_NAME,DELETE_FAST))
|
end = self.first_instr(end, len(self.code), (DELETE_NAME, DELETE_FAST))
|
||||||
if nameDel == self.get_argument(end):
|
if nameDel == self.get_argument(end):
|
||||||
toDel += [end]
|
toDel += [end]
|
||||||
break
|
break
|
||||||
@@ -302,7 +302,7 @@ class Scanner25(scan.Scanner):
|
|||||||
end = self.first_instr(i, len(self.code), RETURN_VALUE)
|
end = self.first_instr(i, len(self.code), RETURN_VALUE)
|
||||||
end = self.first_instr(i, end, YIELD_VALUE)
|
end = self.first_instr(i, end, YIELD_VALUE)
|
||||||
if end and self.code[end+1] == POP_TOP and self.code[end+2] == JA and self.code[end+5] == POP_BLOCK:
|
if end and self.code[end+1] == POP_TOP and self.code[end+2] == JA and self.code[end+5] == POP_BLOCK:
|
||||||
return [i,end+5]
|
return [i, end+5]
|
||||||
# with stmt
|
# with stmt
|
||||||
if opcode == WITH_CLEANUP:
|
if opcode == WITH_CLEANUP:
|
||||||
chckDel = i-self.op_size(DELETE_NAME)
|
chckDel = i-self.op_size(DELETE_NAME)
|
||||||
@@ -313,7 +313,7 @@ class Scanner25(scan.Scanner):
|
|||||||
assert self.code[chckDel] in (LOAD_NAME, LOAD_FAST)
|
assert self.code[chckDel] in (LOAD_NAME, LOAD_FAST)
|
||||||
toDel += [chckDel]
|
toDel += [chckDel]
|
||||||
|
|
||||||
allStore = self.all_instr(0, i, (STORE_NAME,STORE_FAST))
|
allStore = self.all_instr(0, i, (STORE_NAME, STORE_FAST))
|
||||||
chckStore = -1
|
chckStore = -1
|
||||||
for store in allStore:
|
for store in allStore:
|
||||||
if nameDel == self.get_argument(store):
|
if nameDel == self.get_argument(store):
|
||||||
@@ -321,7 +321,7 @@ class Scanner25(scan.Scanner):
|
|||||||
and self.code[store-4] == DUP_TOP:
|
and self.code[store-4] == DUP_TOP:
|
||||||
chckStore = store
|
chckStore = store
|
||||||
assert chckStore > 0
|
assert chckStore > 0
|
||||||
toDel += [chckStore-4,chckStore-3,chckStore+3]
|
toDel += [chckStore-4, chckStore-3, chckStore+3]
|
||||||
|
|
||||||
chckStp = -1
|
chckStp = -1
|
||||||
allSetup = self.all_instr(chckStore+3, i, (SETUP_FINALLY))
|
allSetup = self.all_instr(chckStore+3, i, (SETUP_FINALLY))
|
||||||
@@ -334,8 +334,9 @@ class Scanner25(scan.Scanner):
|
|||||||
while chckDel < chckStp-3:
|
while chckDel < chckStp-3:
|
||||||
toDel += [chckDel]
|
toDel += [chckDel]
|
||||||
chckDel += self.op_size(self.code[chckDel])
|
chckDel += self.op_size(self.code[chckDel])
|
||||||
if self.code[chckStp-3] in (STORE_NAME,STORE_FAST) and self.code[chckStp+3] in (LOAD_NAME,LOAD_FAST) \
|
if (self.code[chckStp-3] in (STORE_NAME,STORE_FAST)
|
||||||
and self.code[chckStp+6] in (DELETE_NAME,DELETE_FAST):
|
and self.code[chckStp+3] in (LOAD_NAME,LOAD_FAST)
|
||||||
|
and self.code[chckStp+6] in (DELETE_NAME,DELETE_FAST)):
|
||||||
toDel += [chckStp-3,chckStp+3,chckStp+6]
|
toDel += [chckStp-3,chckStp+3,chckStp+6]
|
||||||
# SETUP_WITH opcode dosen't exist in 2.5 but is necessary for the grammar
|
# SETUP_WITH opcode dosen't exist in 2.5 but is necessary for the grammar
|
||||||
self.code[chckStore] = JUMP_ABSOLUTE # ugly hack
|
self.code[chckStore] = JUMP_ABSOLUTE # ugly hack
|
||||||
@@ -438,7 +439,7 @@ class Scanner25(scan.Scanner):
|
|||||||
listDel = []
|
listDel = []
|
||||||
for i in self.op_range(0, len(self.code)):
|
for i in self.op_range(0, len(self.code)):
|
||||||
ret = self.getOpcodeToDel(i)
|
ret = self.getOpcodeToDel(i)
|
||||||
if ret != None:
|
if ret is not None:
|
||||||
listDel += ret
|
listDel += ret
|
||||||
|
|
||||||
# change code structure after deleting byte
|
# change code structure after deleting byte
|
||||||
@@ -557,7 +558,7 @@ class Scanner25(scan.Scanner):
|
|||||||
j = self.prev[s]
|
j = self.prev[s]
|
||||||
while code[j] == JA:
|
while code[j] == JA:
|
||||||
j = self.prev[j]
|
j = self.prev[j]
|
||||||
if code[j] == LIST_APPEND: #list comprehension
|
if code[j] == LIST_APPEND: # list comprehension
|
||||||
stmts.remove(s)
|
stmts.remove(s)
|
||||||
continue
|
continue
|
||||||
elif code[s] == POP_TOP and code[self.prev[s]] == ROT_TWO:
|
elif code[s] == POP_TOP and code[self.prev[s]] == ROT_TWO:
|
||||||
@@ -602,7 +603,7 @@ class Scanner25(scan.Scanner):
|
|||||||
count_END_FINALLY += 1
|
count_END_FINALLY += 1
|
||||||
elif op in (SETUP_EXCEPT, SETUP_FINALLY):
|
elif op in (SETUP_EXCEPT, SETUP_FINALLY):
|
||||||
count_SETUP_ += 1
|
count_SETUP_ += 1
|
||||||
#return self.lines[start].next
|
# return self.lines[start].next
|
||||||
|
|
||||||
def detect_structure(self, pos, op=None):
|
def detect_structure(self, pos, op=None):
|
||||||
'''
|
'''
|
||||||
@@ -615,7 +616,7 @@ class Scanner25(scan.Scanner):
|
|||||||
if op is None:
|
if op is None:
|
||||||
op = code[pos]
|
op = code[pos]
|
||||||
|
|
||||||
## Detect parent structure
|
# Detect parent structure
|
||||||
parent = self.structs[0]
|
parent = self.structs[0]
|
||||||
start = parent['start']
|
start = parent['start']
|
||||||
end = parent['end']
|
end = parent['end']
|
||||||
@@ -627,7 +628,6 @@ class Scanner25(scan.Scanner):
|
|||||||
end = _end
|
end = _end
|
||||||
parent = s
|
parent = s
|
||||||
## We need to know how many new structures were added in this run
|
## We need to know how many new structures were added in this run
|
||||||
origStructCount = len(self.structs)
|
|
||||||
|
|
||||||
if op == SETUP_LOOP:
|
if op == SETUP_LOOP:
|
||||||
start = pos+3
|
start = pos+3
|
||||||
@@ -698,18 +698,18 @@ class Scanner25(scan.Scanner):
|
|||||||
end = self.restrict_to_parent(target, parent)
|
end = self.restrict_to_parent(target, parent)
|
||||||
if target != end:
|
if target != end:
|
||||||
self.fixed_jumps[pos] = end
|
self.fixed_jumps[pos] = end
|
||||||
## Add the try block
|
# Add the try block
|
||||||
self.structs.append({'type': 'try',
|
self.structs.append({'type': 'try',
|
||||||
'start': start,
|
'start': start,
|
||||||
'end': end-4})
|
'end': end-4})
|
||||||
## Now isolate the except and else blocks
|
# Now isolate the except and else blocks
|
||||||
end_else = start_else = self.get_target(self.prev[end])
|
end_else = start_else = self.get_target(self.prev[end])
|
||||||
|
|
||||||
## Add the except blocks
|
# Add the except blocks
|
||||||
i = end
|
i = end
|
||||||
while i < len(self.code) and self.code[i] != END_FINALLY:
|
while i < len(self.code) and self.code[i] != END_FINALLY:
|
||||||
jmp = self.next_except_jump(i)
|
jmp = self.next_except_jump(i)
|
||||||
if jmp == None: # check
|
if jmp is None: # check
|
||||||
i = self.next_stmt[i]
|
i = self.next_stmt[i]
|
||||||
continue
|
continue
|
||||||
if self.code[jmp] == RETURN_VALUE:
|
if self.code[jmp] == RETURN_VALUE:
|
||||||
@@ -721,14 +721,13 @@ class Scanner25(scan.Scanner):
|
|||||||
if self.get_target(jmp) != start_else:
|
if self.get_target(jmp) != start_else:
|
||||||
end_else = self.get_target(jmp)
|
end_else = self.get_target(jmp)
|
||||||
if self.code[jmp] == JF:
|
if self.code[jmp] == JF:
|
||||||
#self.fixed_jumps[i] = jmp
|
|
||||||
self.fixed_jumps[jmp] = -1
|
self.fixed_jumps[jmp] = -1
|
||||||
self.structs.append({'type': 'except',
|
self.structs.append({'type': 'except',
|
||||||
'start': i,
|
'start': i,
|
||||||
'end': jmp})
|
'end': jmp})
|
||||||
i = jmp + 3
|
i = jmp + 3
|
||||||
|
|
||||||
## Add the try-else block
|
# Add the try-else block
|
||||||
if end_else != start_else:
|
if end_else != start_else:
|
||||||
r_end_else = self.restrict_to_parent(end_else, parent)
|
r_end_else = self.restrict_to_parent(end_else, parent)
|
||||||
self.structs.append({'type': 'try-else',
|
self.structs.append({'type': 'try-else',
|
||||||
@@ -747,7 +746,7 @@ class Scanner25(scan.Scanner):
|
|||||||
if target != rtarget and parent['type'] == 'and/or':
|
if target != rtarget and parent['type'] == 'and/or':
|
||||||
self.fixed_jumps[pos] = rtarget
|
self.fixed_jumps[pos] = rtarget
|
||||||
return
|
return
|
||||||
#does this jump to right after another cond jump?
|
# does this jump to right after another cond jump?
|
||||||
# if so, it's part of a larger conditional
|
# if so, it's part of a larger conditional
|
||||||
if (code[pre[target]] in (PJIF, PJIT)) and (target > pos):
|
if (code[pre[target]] in (PJIF, PJIT)) and (target > pos):
|
||||||
self.fixed_jumps[pos] = pre[target]
|
self.fixed_jumps[pos] = pre[target]
|
||||||
|
@@ -152,7 +152,7 @@ class Scanner26(scan.Scanner):
|
|||||||
# now holds Code(const) and thus can not be used
|
# now holds Code(const) and thus can not be used
|
||||||
# for comparism (todo: think about changing this)
|
# for comparism (todo: think about changing this)
|
||||||
# pattr = 'code_object @ 0x%x %s->%s' %\
|
# pattr = 'code_object @ 0x%x %s->%s' %\
|
||||||
# (id(const), const.co_filename, const.co_name)
|
# (id(const), const.co_filename, const.co_name)
|
||||||
pattr = '<code_object ' + const.co_name + '>'
|
pattr = '<code_object ' + const.co_name + '>'
|
||||||
else:
|
else:
|
||||||
pattr = const
|
pattr = const
|
||||||
@@ -229,7 +229,7 @@ class Scanner26(scan.Scanner):
|
|||||||
if opcode == EXTENDED_ARG:
|
if opcode == EXTENDED_ARG:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
# modification of some jump structure
|
# modification of some jump structure
|
||||||
if opcode in (PJIF,PJIT,JA,JF,RETURN_VALUE):
|
if opcode in (PJIF, PJIT, JA, JF, RETURN_VALUE):
|
||||||
toDel = []
|
toDel = []
|
||||||
# del POP_TOP
|
# del POP_TOP
|
||||||
if self.code[i+opsize] == POP_TOP:
|
if self.code[i+opsize] == POP_TOP:
|
||||||
@@ -495,7 +495,7 @@ class Scanner26(scan.Scanner):
|
|||||||
|
|
||||||
def build_stmt_indices(self):
|
def build_stmt_indices(self):
|
||||||
code = self.code
|
code = self.code
|
||||||
start = 0;
|
start = 0
|
||||||
end = len(code)
|
end = len(code)
|
||||||
|
|
||||||
stmt_opcodes = {
|
stmt_opcodes = {
|
||||||
@@ -553,7 +553,7 @@ class Scanner26(scan.Scanner):
|
|||||||
j = self.prev[s]
|
j = self.prev[s]
|
||||||
while code[j] == JA:
|
while code[j] == JA:
|
||||||
j = self.prev[j]
|
j = self.prev[j]
|
||||||
if code[j] == LIST_APPEND: #list comprehension
|
if code[j] == LIST_APPEND: # list comprehension
|
||||||
stmts.remove(s)
|
stmts.remove(s)
|
||||||
continue
|
continue
|
||||||
elif code[s] == POP_TOP and code[self.prev[s]] == ROT_TWO:
|
elif code[s] == POP_TOP and code[self.prev[s]] == ROT_TWO:
|
||||||
@@ -599,7 +599,7 @@ class Scanner26(scan.Scanner):
|
|||||||
count_END_FINALLY += 1
|
count_END_FINALLY += 1
|
||||||
elif op in (SETUP_EXCEPT, SETUP_FINALLY):
|
elif op in (SETUP_EXCEPT, SETUP_FINALLY):
|
||||||
count_SETUP_ += 1
|
count_SETUP_ += 1
|
||||||
#return self.lines[start].next
|
# return self.lines[start].next
|
||||||
|
|
||||||
def detect_structure(self, pos, op=None):
|
def detect_structure(self, pos, op=None):
|
||||||
'''
|
'''
|
||||||
@@ -614,7 +614,7 @@ class Scanner26(scan.Scanner):
|
|||||||
if op is None:
|
if op is None:
|
||||||
op = code[pos]
|
op = code[pos]
|
||||||
|
|
||||||
## Detect parent structure
|
# Detect parent structure
|
||||||
parent = self.structs[0]
|
parent = self.structs[0]
|
||||||
start = parent['start']
|
start = parent['start']
|
||||||
end = parent['end']
|
end = parent['end']
|
||||||
@@ -625,8 +625,6 @@ class Scanner26(scan.Scanner):
|
|||||||
start = _start
|
start = _start
|
||||||
end = _end
|
end = _end
|
||||||
parent = s
|
parent = s
|
||||||
## We need to know how many new structures were added in this run
|
|
||||||
origStructCount = len(self.structs)
|
|
||||||
|
|
||||||
if op == SETUP_LOOP:
|
if op == SETUP_LOOP:
|
||||||
start = pos+3
|
start = pos+3
|
||||||
@@ -697,15 +695,15 @@ class Scanner26(scan.Scanner):
|
|||||||
end = self.restrict_to_parent(target, parent)
|
end = self.restrict_to_parent(target, parent)
|
||||||
if target != end:
|
if target != end:
|
||||||
self.fixed_jumps[pos] = end
|
self.fixed_jumps[pos] = end
|
||||||
#print target, end, parent
|
# print target, end, parent
|
||||||
## Add the try block
|
# Add the try block
|
||||||
self.structs.append({'type': 'try',
|
self.structs.append({'type': 'try',
|
||||||
'start': start,
|
'start': start,
|
||||||
'end': end-4})
|
'end': end-4})
|
||||||
## Now isolate the except and else blocks
|
# Now isolate the except and else blocks
|
||||||
end_else = start_else = self.get_target(self.prev[end])
|
end_else = start_else = self.get_target(self.prev[end])
|
||||||
|
|
||||||
## Add the except blocks
|
# Add the except blocks
|
||||||
i = end
|
i = end
|
||||||
while i < len(self.code) and self.code[i] != END_FINALLY:
|
while i < len(self.code) and self.code[i] != END_FINALLY:
|
||||||
jmp = self.next_except_jump(i)
|
jmp = self.next_except_jump(i)
|
||||||
@@ -721,14 +719,13 @@ class Scanner26(scan.Scanner):
|
|||||||
if self.get_target(jmp) != start_else:
|
if self.get_target(jmp) != start_else:
|
||||||
end_else = self.get_target(jmp)
|
end_else = self.get_target(jmp)
|
||||||
if self.code[jmp] == JF:
|
if self.code[jmp] == JF:
|
||||||
#self.fixed_jumps[i] = jmp
|
|
||||||
self.fixed_jumps[jmp] = -1
|
self.fixed_jumps[jmp] = -1
|
||||||
self.structs.append({'type': 'except',
|
self.structs.append({'type': 'except',
|
||||||
'start': i,
|
'start': i,
|
||||||
'end': jmp})
|
'end': jmp})
|
||||||
i = jmp + 3
|
i = jmp + 3
|
||||||
|
|
||||||
## Add the try-else block
|
# Add the try-else block
|
||||||
if end_else != start_else:
|
if end_else != start_else:
|
||||||
r_end_else = self.restrict_to_parent(end_else, parent)
|
r_end_else = self.restrict_to_parent(end_else, parent)
|
||||||
self.structs.append({'type': 'try-else',
|
self.structs.append({'type': 'try-else',
|
||||||
@@ -747,7 +744,7 @@ class Scanner26(scan.Scanner):
|
|||||||
if target != rtarget and parent['type'] == 'and/or':
|
if target != rtarget and parent['type'] == 'and/or':
|
||||||
self.fixed_jumps[pos] = rtarget
|
self.fixed_jumps[pos] = rtarget
|
||||||
return
|
return
|
||||||
#does this jump to right after another cond jump?
|
# does this jump to right after another cond jump?
|
||||||
# if so, it's part of a larger conditional
|
# if so, it's part of a larger conditional
|
||||||
if (code[pre[target]] in (PJIF, PJIT)) and (target > pos):
|
if (code[pre[target]] in (PJIF, PJIT)) and (target > pos):
|
||||||
self.fixed_jumps[pos] = pre[target]
|
self.fixed_jumps[pos] = pre[target]
|
||||||
@@ -814,7 +811,7 @@ class Scanner26(scan.Scanner):
|
|||||||
and self.get_target(target) == self.get_target(next):
|
and self.get_target(target) == self.get_target(next):
|
||||||
self.fixed_jumps[pos] = pre[next]
|
self.fixed_jumps[pos] = pre[next]
|
||||||
return
|
return
|
||||||
#don't add a struct for a while test, it's already taken care of
|
# don't add a struct for a while test, it's already taken care of
|
||||||
if pos in self.ignore_if:
|
if pos in self.ignore_if:
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -822,11 +819,11 @@ class Scanner26(scan.Scanner):
|
|||||||
and pre[rtarget] != pos and pre[pre[rtarget]] != pos \
|
and pre[rtarget] != pos and pre[pre[rtarget]] != pos \
|
||||||
and not (code[rtarget] == JA and code[rtarget+3] == POP_BLOCK and code[pre[pre[rtarget]]] != JA):
|
and not (code[rtarget] == JA and code[rtarget+3] == POP_BLOCK and code[pre[pre[rtarget]]] != JA):
|
||||||
rtarget = pre[rtarget]
|
rtarget = pre[rtarget]
|
||||||
#does the if jump just beyond a jump op, then this is probably an if statement
|
# does the if jump just beyond a jump op, then this is probably an if statement
|
||||||
if code[pre[rtarget]] in (JA, JF):
|
if code[pre[rtarget]] in (JA, JF):
|
||||||
if_end = self.get_target(pre[rtarget])
|
if_end = self.get_target(pre[rtarget])
|
||||||
|
|
||||||
#is this a loop not an if?
|
# is this a loop not an if?
|
||||||
if (if_end < pre[rtarget]) and (code[pre[if_end]] == SETUP_LOOP):
|
if (if_end < pre[rtarget]) and (code[pre[if_end]] == SETUP_LOOP):
|
||||||
if(if_end > start):
|
if(if_end > start):
|
||||||
return
|
return
|
||||||
@@ -865,8 +862,8 @@ class Scanner26(scan.Scanner):
|
|||||||
self.structs = [{'type': 'root',
|
self.structs = [{'type': 'root',
|
||||||
'start': 0,
|
'start': 0,
|
||||||
'end': n-1}]
|
'end': n-1}]
|
||||||
self.loops = [] ## All loop entry points
|
self.loops = [] # All loop entry points
|
||||||
self.fixed_jumps = {} ## Map fixed jumps to their real destination
|
self.fixed_jumps = {} # Map fixed jumps to their real destination
|
||||||
self.ignore_if = set()
|
self.ignore_if = set()
|
||||||
self.build_stmt_indices()
|
self.build_stmt_indices()
|
||||||
self.not_continue = set()
|
self.not_continue = set()
|
||||||
@@ -876,7 +873,7 @@ class Scanner26(scan.Scanner):
|
|||||||
for i in self.op_range(0, n):
|
for i in self.op_range(0, n):
|
||||||
op = code[i]
|
op = code[i]
|
||||||
|
|
||||||
## Determine structures and fix jumps for 2.3+
|
# Determine structures and fix jumps for 2.3+
|
||||||
self.detect_structure(i, op)
|
self.detect_structure(i, op)
|
||||||
|
|
||||||
if self.op_hasArgument(op):
|
if self.op_hasArgument(op):
|
||||||
@@ -885,10 +882,6 @@ class Scanner26(scan.Scanner):
|
|||||||
if label is None:
|
if label is None:
|
||||||
if op in hasjrel and op != FOR_ITER:
|
if op in hasjrel and op != FOR_ITER:
|
||||||
label = i + 3 + oparg
|
label = i + 3 + oparg
|
||||||
#elif op in hasjabs: Pas de gestion des jump abslt
|
|
||||||
#if op in (PJIF, PJIT): Or pop a faire
|
|
||||||
#if (oparg > i):
|
|
||||||
#label = oparg
|
|
||||||
if label is not None and label != -1:
|
if label is not None and label != -1:
|
||||||
targets[label] = targets.get(label, []) + [i]
|
targets[label] = targets.get(label, []) + [i]
|
||||||
elif op == END_FINALLY and i in self.fixed_jumps:
|
elif op == END_FINALLY and i in self.fixed_jumps:
|
||||||
|
@@ -126,7 +126,7 @@ class Scanner27(scan.Scanner):
|
|||||||
continue
|
continue
|
||||||
if op in hasconst:
|
if op in hasconst:
|
||||||
const = co.co_consts[oparg]
|
const = co.co_consts[oparg]
|
||||||
if type(const) == types.CodeType:
|
if isinstance(const, types.CodeType):
|
||||||
oparg = const
|
oparg = const
|
||||||
if const.co_name == '<lambda>':
|
if const.co_name == '<lambda>':
|
||||||
assert op_name == 'LOAD_CONST'
|
assert op_name == 'LOAD_CONST'
|
||||||
@@ -348,8 +348,6 @@ class Scanner27(scan.Scanner):
|
|||||||
start = _start
|
start = _start
|
||||||
end = _end
|
end = _end
|
||||||
parent = s
|
parent = s
|
||||||
# We need to know how many new structures were added in this run
|
|
||||||
origStructCount = len(self.structs)
|
|
||||||
|
|
||||||
if op == SETUP_LOOP:
|
if op == SETUP_LOOP:
|
||||||
start = pos+3
|
start = pos+3
|
||||||
|
@@ -253,7 +253,7 @@ class GenericParser:
|
|||||||
raise SystemExit
|
raise SystemExit
|
||||||
|
|
||||||
def parse(self, tokens):
|
def parse(self, tokens):
|
||||||
sets = [ [(1,0), (2,0)] ]
|
sets = [ [(1, 0), (2, 0)] ]
|
||||||
self.links = {}
|
self.links = {}
|
||||||
|
|
||||||
if self.ruleschanged:
|
if self.ruleschanged:
|
||||||
@@ -470,30 +470,30 @@ class GenericParser:
|
|||||||
if ttype is not None:
|
if ttype is not None:
|
||||||
k = self.edges.get((state, ttype), None)
|
k = self.edges.get((state, ttype), None)
|
||||||
if k is not None:
|
if k is not None:
|
||||||
#self.add(next, (k, parent), i+1, ptr)
|
# self.add(next, (k, parent), i+1, ptr)
|
||||||
#INLINED --v
|
# INLINED --------v
|
||||||
new = (k, parent)
|
new = (k, parent)
|
||||||
key = (new, i+1)
|
key = (new, i+1)
|
||||||
if new not in next:
|
if new not in next:
|
||||||
self.links[key] = []
|
self.links[key] = []
|
||||||
next.append(new)
|
next.append(new)
|
||||||
self.links[key].append((ptr, None))
|
self.links[key].append((ptr, None))
|
||||||
#INLINED --^
|
# INLINED --------^
|
||||||
#nk = self.goto(k, None)
|
# nk = self.goto(k, None)
|
||||||
nk = self.edges.get((k, None), None)
|
nk = self.edges.get((k, None), None)
|
||||||
if nk is not None:
|
if nk is not None:
|
||||||
#self.add(next, (nk, i+1))
|
# self.add(next, (nk, i+1))
|
||||||
#INLINED --v
|
# INLINED -------------v
|
||||||
new = (nk, i+1)
|
new = (nk, i+1)
|
||||||
if new not in next:
|
if new not in next:
|
||||||
next.append(new)
|
next.append(new)
|
||||||
#INLINED --^
|
# INLINED ---------------^
|
||||||
else:
|
else:
|
||||||
add = self.gotoST(state, token)
|
add = self.gotoST(state, token)
|
||||||
for k in add:
|
for k in add:
|
||||||
if k is not None:
|
if k is not None:
|
||||||
self.add(next, (k, parent), i+1, ptr)
|
self.add(next, (k, parent), i+1, ptr)
|
||||||
#nk = self.goto(k, None)
|
# nk = self.goto(k, None)
|
||||||
nk = self.edges.get((k, None), None)
|
nk = self.edges.get((k, None), None)
|
||||||
if nk is not None:
|
if nk is not None:
|
||||||
self.add(next, (nk, i+1))
|
self.add(next, (nk, i+1))
|
||||||
@@ -510,25 +510,24 @@ class GenericParser:
|
|||||||
if k is not None:
|
if k is not None:
|
||||||
why = (item, i, rule)
|
why = (item, i, rule)
|
||||||
pptr = (pitem, parent)
|
pptr = (pitem, parent)
|
||||||
#self.add(cur, (k, pparent),
|
# self.add(cur, (k, pparent), i, pptr, why)
|
||||||
# i, pptr, why)
|
# INLINED ---------v
|
||||||
#INLINED --v
|
|
||||||
new = (k, pparent)
|
new = (k, pparent)
|
||||||
key = (new, i)
|
key = (new, i)
|
||||||
if new not in cur:
|
if new not in cur:
|
||||||
self.links[key] = []
|
self.links[key] = []
|
||||||
cur.append(new)
|
cur.append(new)
|
||||||
self.links[key].append((pptr, why))
|
self.links[key].append((pptr, why))
|
||||||
#INLINED --^
|
# INLINED ----------^
|
||||||
#nk = self.goto(k, None)
|
#nk = self.goto(k, None)
|
||||||
nk = self.edges.get((k, None), None)
|
nk = self.edges.get((k, None), None)
|
||||||
if nk is not None:
|
if nk is not None:
|
||||||
#self.add(cur, (nk, i))
|
#self.add(cur, (nk, i))
|
||||||
#INLINED --v
|
# INLINED ---------v
|
||||||
new = (nk, i)
|
new = (nk, i)
|
||||||
if new not in cur:
|
if new not in cur:
|
||||||
cur.append(new)
|
cur.append(new)
|
||||||
#INLINED --^
|
# INLINED ----------^
|
||||||
|
|
||||||
def predecessor(self, key, causal):
|
def predecessor(self, key, causal):
|
||||||
for p, c in self.links[key]:
|
for p, c in self.links[key]:
|
||||||
|
@@ -81,14 +81,14 @@ class CmpErrorCode(VerifyCmpError):
|
|||||||
self.tokens = [tokens1, tokens2]
|
self.tokens = [tokens1, tokens2]
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
s = reduce(lambda s,t: "%s%-37s\t%-37s\n" % (s, t[0], t[1]),
|
s = reduce(lambda s, t: "%s%-37s\t%-37s\n" % (s, t[0], t[1]),
|
||||||
list(map(lambda a,b: (a,b),
|
list(map(lambda a,b: (a, b),
|
||||||
self.tokens[0],
|
self.tokens[0],
|
||||||
self.tokens[1])),
|
self.tokens[1])),
|
||||||
'Code differs in %s\n' % str(self.name))
|
'Code differs in %s\n' % str(self.name))
|
||||||
return ('Code differs in %s at offset %s [%s] != [%s]\n\n' % \
|
return ('Code differs in %s at offset %s [%s] != [%s]\n\n' %
|
||||||
(repr(self.name), self.index,
|
(repr(self.name), self.index,
|
||||||
repr(self.token1), repr(self.token2))) + s
|
repr(self.token1), repr(self.token2))) + s
|
||||||
|
|
||||||
class CmpErrorCodeLen(VerifyCmpError):
|
class CmpErrorCodeLen(VerifyCmpError):
|
||||||
"""Exception to be raised when code length differs."""
|
"""Exception to be raised when code length differs."""
|
||||||
@@ -97,8 +97,8 @@ class CmpErrorCodeLen(VerifyCmpError):
|
|||||||
self.tokens = [tokens1, tokens2]
|
self.tokens = [tokens1, tokens2]
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return reduce(lambda s,t: "%s%-37s\t%-37s\n" % (s, t[0], t[1]),
|
return reduce(lambda s, t: "%s%-37s\t%-37s\n" % (s, t[0], t[1]),
|
||||||
list(map(lambda a,b: (a,b),
|
list(map(lambda a, b: (a, b),
|
||||||
self.tokens[0],
|
self.tokens[0],
|
||||||
self.tokens[1])),
|
self.tokens[1])),
|
||||||
'Code len differs in %s\n' % str(self.name))
|
'Code len differs in %s\n' % str(self.name))
|
||||||
@@ -115,7 +115,7 @@ class CmpErrorMember(VerifyCmpError):
|
|||||||
(repr(self.member), repr(self.name),
|
(repr(self.member), repr(self.name),
|
||||||
repr(self.data[0]), repr(self.data[1]))
|
repr(self.data[0]), repr(self.data[1]))
|
||||||
|
|
||||||
#--- compare ---
|
# --- compare ---
|
||||||
|
|
||||||
# these members are ignored
|
# these members are ignored
|
||||||
__IGNORE_CODE_MEMBERS__ = ['co_filename', 'co_firstlineno', 'co_lnotab', 'co_stacksize', 'co_names']
|
__IGNORE_CODE_MEMBERS__ = ['co_filename', 'co_firstlineno', 'co_lnotab', 'co_stacksize', 'co_names']
|
||||||
@@ -126,10 +126,10 @@ def cmp_code_objects(version, code_obj1, code_obj2, name=''):
|
|||||||
|
|
||||||
This is the main part of this module.
|
This is the main part of this module.
|
||||||
"""
|
"""
|
||||||
#print code_obj1, type(code_obj2)
|
# print code_obj1, type(code_obj2)
|
||||||
assert type(code_obj1) == types.CodeType
|
assert isinstance(code_obj1, types.CodeType)
|
||||||
assert type(code_obj2) == types.CodeType
|
assert isinstance(code_obj2, types.CodeType)
|
||||||
#print dir(code_obj1)
|
# print dir(code_obj1)
|
||||||
if isinstance(code_obj1, object):
|
if isinstance(code_obj1, object):
|
||||||
# new style classes (Python 2.2)
|
# new style classes (Python 2.2)
|
||||||
# assume _both_ code objects to be new stle classes
|
# assume _both_ code objects to be new stle classes
|
||||||
@@ -152,14 +152,14 @@ def cmp_code_objects(version, code_obj1, code_obj2, name=''):
|
|||||||
# if this compare fails, we use the old routine to
|
# if this compare fails, we use the old routine to
|
||||||
# find out, what exactly is nor equal
|
# find out, what exactly is nor equal
|
||||||
# if this compare succeds, simply return
|
# if this compare succeds, simply return
|
||||||
#return
|
# return
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if isinstance(code_obj1, object):
|
if isinstance(code_obj1, object):
|
||||||
members = [x for x in dir(code_obj1) if x.startswith('co_')]
|
members = [x for x in dir(code_obj1) if x.startswith('co_')]
|
||||||
else:
|
else:
|
||||||
members = dir(code_obj1);
|
members = dir(code_obj1)
|
||||||
members.sort(); #members.reverse()
|
members.sort() # ; members.reverse()
|
||||||
|
|
||||||
tokens1 = None
|
tokens1 = None
|
||||||
for member in members:
|
for member in members:
|
||||||
@@ -185,9 +185,9 @@ def cmp_code_objects(version, code_obj1, code_obj2, name=''):
|
|||||||
scanner.setTokenClass(Token)
|
scanner.setTokenClass(Token)
|
||||||
try:
|
try:
|
||||||
# disassemble both code-objects
|
# disassemble both code-objects
|
||||||
tokens1,customize = scanner.disassemble(code_obj1)
|
tokens1, customize = scanner.disassemble(code_obj1)
|
||||||
del customize # save memory
|
del customize # save memory
|
||||||
tokens2,customize = scanner.disassemble(code_obj2)
|
tokens2, customize = scanner.disassemble(code_obj2)
|
||||||
del customize # save memory
|
del customize # save memory
|
||||||
finally:
|
finally:
|
||||||
scanner.resetTokenClass() # restore Token class
|
scanner.resetTokenClass() # restore Token class
|
||||||
@@ -203,7 +203,7 @@ def cmp_code_objects(version, code_obj1, code_obj2, name=''):
|
|||||||
if len(tokens1) == len(tokens2) + 2 \
|
if len(tokens1) == len(tokens2) + 2 \
|
||||||
and tokens1[-1].type == 'RETURN_VALUE' \
|
and tokens1[-1].type == 'RETURN_VALUE' \
|
||||||
and tokens1[-2].type == 'LOAD_CONST' \
|
and tokens1[-2].type == 'LOAD_CONST' \
|
||||||
and tokens1[-2].pattr == None \
|
and tokens1[-2].pattr is None \
|
||||||
and tokens1[-3].type == 'RETURN_VALUE':
|
and tokens1[-3].type == 'RETURN_VALUE':
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
@@ -275,11 +275,11 @@ def cmp_code_objects(version, code_obj1, code_obj2, name=''):
|
|||||||
raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1],
|
raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1],
|
||||||
tokens2[i2], tokens1, tokens2)
|
tokens2[i2], tokens1, tokens2)
|
||||||
else:
|
else:
|
||||||
#import pdb; pdb.set_trace()
|
# import pdb; pdb.set_trace()
|
||||||
if dest1 in check_jumps:
|
if dest1 in check_jumps:
|
||||||
check_jumps[dest1].append((i1,i2,dest2))
|
check_jumps[dest1].append((i1, i2, dest2))
|
||||||
else:
|
else:
|
||||||
check_jumps[dest1] = [(i1,i2,dest2)]
|
check_jumps[dest1] = [(i1, i2, dest2)]
|
||||||
|
|
||||||
i1 += 1
|
i1 += 1
|
||||||
i2 += 1
|
i2 += 1
|
||||||
|
Reference in New Issue
Block a user