Further conversion to Python3.
This commit is contained in:
parent
a87453f84a
commit
088bdd3cfd
@ -318,8 +318,8 @@ class MessageRule(Rule):
|
||||
self.after = fact
|
||||
|
||||
if self.before == None or self.after == None:
|
||||
print "Warning: rule does not have both principal facts."
|
||||
print self
|
||||
print("Warning: rule does not have both principal facts.")
|
||||
print(self)
|
||||
|
||||
def __str__(self):
|
||||
return "Message " + Rule.__str__(self)
|
||||
|
@ -275,14 +275,14 @@ def typeSwitch(line):
|
||||
elif res[3] == "typed":
|
||||
typeversion = True
|
||||
else:
|
||||
print "Cannot determine whether typed or untyped."
|
||||
print("Cannot determine whether typed or untyped.")
|
||||
raise ParseException
|
||||
|
||||
str = "Detected "
|
||||
if not typedversion:
|
||||
str += "un"
|
||||
str += "typed version."
|
||||
print str
|
||||
print(str)
|
||||
|
||||
# Parse a number of lines, including the first line with the type switch
|
||||
def linesParse(lines):
|
||||
@ -303,9 +303,9 @@ def fileParse(filename):
|
||||
|
||||
# Main code
|
||||
def main():
|
||||
print "Testing Ifparser module"
|
||||
print
|
||||
print fileParse("NSPK_LOWE.if")
|
||||
print("Testing Ifparser module")
|
||||
print()
|
||||
print(fileParse("NSPK_LOWE.if"))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -199,7 +199,7 @@ def sanitizeRole(protocol, role):
|
||||
msgto = knowbefore[i]
|
||||
if msgfrom != msgto:
|
||||
### TEST
|
||||
print "Substituting %s by %s" % (str(msgfrom), str(msgto))
|
||||
print("Substituting %s by %s" % (str(msgfrom), str(msgto)))
|
||||
# In all subsequent terms... TODO or
|
||||
# just the next one?
|
||||
for j in range(n+1, len(rules)):
|
||||
@ -237,7 +237,7 @@ def sanitizeRole(protocol, role):
|
||||
replacelist.append( (t,msg) )
|
||||
role.constants.append(msg)
|
||||
### TEST
|
||||
print "Substituting %s by %s" % (str(t), str(msg))
|
||||
print("Substituting %s by %s" % (str(t), str(msg)))
|
||||
# Apply replacelist
|
||||
if len(replacelist) > 0:
|
||||
for ev in role.events:
|
||||
@ -280,7 +280,7 @@ def extractRoles(protocol):
|
||||
# hrule has been picked. Work back from here
|
||||
# first make up a name
|
||||
if len(hrule.getActors()) != 1:
|
||||
print "Warning: weird actor list for hrule:", hrule.getActors()
|
||||
print("Warning: weird actor list for hrule:", hrule.getActors())
|
||||
name = "X"
|
||||
actor = None
|
||||
else:
|
||||
@ -316,7 +316,7 @@ def extractRoles(protocol):
|
||||
# Loop detection
|
||||
if rule in role.rules:
|
||||
# This is a loop TODO
|
||||
print "Warning: loop detected for role", role.name
|
||||
print("Warning: loop detected for role", role.name)
|
||||
scan = False # Current setting: stop scan
|
||||
else:
|
||||
# No loop, prepend
|
||||
|
@ -60,7 +60,7 @@ class SemiTrace(object):
|
||||
def getPrecedingEvents(self,event,previous=[]):
|
||||
# If it is cached return cached version
|
||||
if event.preceding != None:
|
||||
return filter(lambda x: x not in previous,event.preceding)
|
||||
return [x for x in event.preceding if x not in previous]
|
||||
preceding = []
|
||||
for prec in event.getBefore():
|
||||
preceding.append(prec)
|
||||
@ -71,7 +71,7 @@ class SemiTrace(object):
|
||||
preceding.extend(self.getPrecedingEvents(fol))
|
||||
preceding = uniq(preceding)
|
||||
event.preceding = preceding
|
||||
preceding = filter(lambda x: x not in previous,preceding)
|
||||
preceding = [x for x in preceding if x not in previous]
|
||||
return preceding
|
||||
|
||||
# Returns -1 if the first event has to be before the second one
|
||||
@ -150,7 +150,7 @@ class ProtocolDescription(object):
|
||||
|
||||
# Find event by label
|
||||
def findEvent(self,eventlabel,eventType=None):
|
||||
for (role,descr) in self.roledescr.items():
|
||||
for (role,descr) in list(self.roledescr.items()):
|
||||
for event in descr:
|
||||
if event.label == eventlabel:
|
||||
if eventType == None or isinstance(event,eventType):
|
||||
@ -181,7 +181,7 @@ class ProtocolDescription(object):
|
||||
# that are in the precedingEvents of a certain event
|
||||
def getPrecedingLabelSet(self,eventlabel):
|
||||
events = self.getPrecedingEvents(eventlabel)
|
||||
events = filter(lambda x: isinstance(x,EventRead),events)
|
||||
events = [x for x in events if isinstance(x,EventRead)]
|
||||
return [x.label for x in events]
|
||||
|
||||
# Calculate the roles in preceding labelset that is all roles that
|
||||
@ -194,7 +194,7 @@ class ProtocolDescription(object):
|
||||
|
||||
def __str__(self):
|
||||
s = ''
|
||||
for x in self.roledescr.values():
|
||||
for x in list(self.roledescr.values()):
|
||||
for e in x:
|
||||
s += str(e) + "\n"
|
||||
return s
|
||||
@ -279,7 +279,7 @@ class EventClaim(Event):
|
||||
# agents
|
||||
def ignore(self):
|
||||
for untrusted in self.run.attack.untrusted:
|
||||
if untrusted in self.run.roleAgents.values():
|
||||
if untrusted in list(self.run.roleAgents.values()):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -6,7 +6,7 @@ import Spdl
|
||||
|
||||
def main():
|
||||
protocol = Ifparser.fileParse("NSPK_LOWE.if")
|
||||
print Spdl.generator(protocol)
|
||||
print(Spdl.generator(protocol))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -5,15 +5,15 @@
|
||||
def confirm(question):
|
||||
answer = ''
|
||||
while answer not in ('y','n'):
|
||||
print question,
|
||||
answer = raw_input().lower()
|
||||
print(question, end=' ')
|
||||
answer = input().lower()
|
||||
return answer == 'y'
|
||||
|
||||
def exists(func,list):
|
||||
return len(filter(func,list)) > 0
|
||||
return len(list(filter(func,list))) > 0
|
||||
|
||||
def forall(func,list):
|
||||
return len(filter(func,list)) == len(list)
|
||||
return len(list(filter(func,list))) == len(list)
|
||||
|
||||
def uniq(li):
|
||||
result = []
|
||||
|
@ -205,13 +205,13 @@ def ifParser():
|
||||
|
||||
def labeledruleAction(s,l,t):
|
||||
if t[0][3] == "Protocol_Rules":
|
||||
print "-----------------"
|
||||
print "- Detected rule -"
|
||||
print "-----------------"
|
||||
print("-----------------")
|
||||
print("- Detected rule -")
|
||||
print("-----------------")
|
||||
|
||||
print t[0]
|
||||
print t[1]
|
||||
print
|
||||
print(t[0])
|
||||
print(t[1])
|
||||
print()
|
||||
|
||||
labeledrule.setParseAction(labeledruleAction)
|
||||
|
||||
@ -233,17 +233,17 @@ def typeSwitch(line):
|
||||
elif res[3] == "typed":
|
||||
typeversion = True
|
||||
else:
|
||||
print "Cannot determine whether typed or untyped."
|
||||
print("Cannot determine whether typed or untyped.")
|
||||
raise ParseException
|
||||
|
||||
except:
|
||||
print "Unexpected error while determining (un)typedness of the line", line
|
||||
print("Unexpected error while determining (un)typedness of the line", line)
|
||||
|
||||
str = "Detected "
|
||||
if not typedversion:
|
||||
str += "un"
|
||||
str += "typed version."
|
||||
print str
|
||||
print(str)
|
||||
|
||||
# Parse an entire file, including the first one
|
||||
def linesParse(lines):
|
||||
|
@ -24,7 +24,7 @@
|
||||
# Todo:
|
||||
# - add pprint() - pretty-print output of defined BNF
|
||||
#
|
||||
from __future__ import generators
|
||||
|
||||
|
||||
__doc__ = \
|
||||
"""
|
||||
@ -79,13 +79,13 @@ def _ustr(obj):
|
||||
# it won't break any existing code.
|
||||
return str(obj)
|
||||
|
||||
except UnicodeEncodeError, e:
|
||||
except UnicodeEncodeError as e:
|
||||
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
|
||||
# state that "The return value must be a string object". However, does a
|
||||
# unicode object (being a subclass of basestring) count as a "string
|
||||
# object"?
|
||||
# If so, then return a unicode object:
|
||||
return unicode(obj)
|
||||
return str(obj)
|
||||
# Else encode it... but how? There are many choices... :)
|
||||
# Replace unprintables with escape codes?
|
||||
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
|
||||
@ -125,7 +125,7 @@ class ParseBaseException(Exception):
|
||||
elif( aname == "line" ):
|
||||
return line( self.loc, self.pstr )
|
||||
else:
|
||||
raise AttributeError, aname
|
||||
raise AttributeError(aname)
|
||||
|
||||
def __str__( self ):
|
||||
return "%s (at char %d), (line:%d, col:%d)" % ( self.msg, self.loc, self.lineno, self.column )
|
||||
@ -198,7 +198,7 @@ class ParseResults(object):
|
||||
name = _ustr(name) # will always return a str, but use _ustr for consistency
|
||||
self.__name = name
|
||||
if toklist:
|
||||
if isinstance(toklist,basestring):
|
||||
if isinstance(toklist,str):
|
||||
toklist = [ toklist ]
|
||||
if asList:
|
||||
if isinstance(toklist,ParseResults):
|
||||
@ -235,25 +235,25 @@ class ParseResults(object):
|
||||
del self.__toklist[i]
|
||||
|
||||
def __contains__( self, k ):
|
||||
return self.__tokdict.has_key(k)
|
||||
return k in self.__tokdict
|
||||
|
||||
def __len__( self ): return len( self.__toklist )
|
||||
def __iter__( self ): return iter( self.__toklist )
|
||||
def keys( self ):
|
||||
"""Returns all named result keys."""
|
||||
return self.__tokdict.keys()
|
||||
return list(self.__tokdict.keys())
|
||||
|
||||
def items( self ):
|
||||
"""Returns all named result keys and values as a list of tuples."""
|
||||
return [(k,v[-1][0]) for k,v in self.__tokdict.items()]
|
||||
return [(k,v[-1][0]) for k,v in list(self.__tokdict.items())]
|
||||
|
||||
def values( self ):
|
||||
"""Returns all named result values."""
|
||||
return [ v[-1][0] for v in self.__tokdict.values() ]
|
||||
return [ v[-1][0] for v in list(self.__tokdict.values()) ]
|
||||
|
||||
def __getattr__( self, name ):
|
||||
if name not in self.__slots__:
|
||||
if self.__tokdict.has_key( name ):
|
||||
if name in self.__tokdict:
|
||||
if self.__modal:
|
||||
return self.__tokdict[name][-1][0]
|
||||
else:
|
||||
@ -266,7 +266,7 @@ class ParseResults(object):
|
||||
if other.__tokdict:
|
||||
offset = len(self.__toklist)
|
||||
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
|
||||
otherdictitems = [(k,(v[0],addoffset(v[1])) ) for (k,vlist) in other.__tokdict.items() for v in vlist]
|
||||
otherdictitems = [(k,(v[0],addoffset(v[1])) ) for (k,vlist) in list(other.__tokdict.items()) for v in vlist]
|
||||
for k,v in otherdictitems:
|
||||
self[k] = v
|
||||
if isinstance(v[0],ParseResults):
|
||||
@ -313,7 +313,7 @@ class ParseResults(object):
|
||||
|
||||
def asDict( self ):
|
||||
"""Returns the named parse results as dictionary."""
|
||||
return dict( self.items() )
|
||||
return dict( list(self.items()) )
|
||||
|
||||
def copy( self ):
|
||||
"""Returns a new copy of a ParseResults object."""
|
||||
@ -328,7 +328,7 @@ class ParseResults(object):
|
||||
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
|
||||
nl = "\n"
|
||||
out = []
|
||||
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items() for v in vlist ] )
|
||||
namedItems = dict( [ (v[1],k) for (k,vlist) in list(self.__tokdict.items()) for v in vlist ] )
|
||||
nextLevelIndent = indent + " "
|
||||
|
||||
# collapse out indents if formatting is not desired
|
||||
@ -376,7 +376,7 @@ class ParseResults(object):
|
||||
|
||||
|
||||
def __lookup(self,sub):
|
||||
for k,vlist in self.__tokdict.items():
|
||||
for k,vlist in list(self.__tokdict.items()):
|
||||
for v,loc in vlist:
|
||||
if sub is v:
|
||||
return k
|
||||
@ -394,8 +394,8 @@ class ParseResults(object):
|
||||
return None
|
||||
elif (len(self) == 1 and
|
||||
len(self.__tokdict) == 1 and
|
||||
self.__tokdict.values()[0][0][1] in (0,-1)):
|
||||
return self.__tokdict.keys()[0]
|
||||
list(self.__tokdict.values())[0][0][1] in (0,-1)):
|
||||
return list(self.__tokdict.keys())[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
@ -423,13 +423,13 @@ def line( loc, strg ):
|
||||
return strg[lastCR+1:]
|
||||
|
||||
def _defaultStartDebugAction( instring, loc, expr ):
|
||||
print "Match",expr,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )
|
||||
print("Match",expr,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
|
||||
|
||||
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
|
||||
print "Matched",expr,"->",toks.asList()
|
||||
print("Matched",expr,"->",toks.asList())
|
||||
|
||||
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
|
||||
print "Exception raised:", exc
|
||||
print("Exception raised:", exc)
|
||||
|
||||
def nullDebugAction(*args):
|
||||
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
|
||||
@ -547,8 +547,8 @@ class ParserElement(object):
|
||||
try:
|
||||
loc,tokens = self.parseImpl( instring, loc, doActions )
|
||||
except IndexError:
|
||||
raise ParseException, ( instring, len(instring), self.errmsg, self )
|
||||
except ParseException, err:
|
||||
raise ParseException( instring, len(instring), self.errmsg, self)
|
||||
except ParseException as err:
|
||||
#~ print "Exception raised:", err
|
||||
if (self.debugActions[2] ):
|
||||
self.debugActions[2]( instring, tokensStart, self, err )
|
||||
@ -561,7 +561,7 @@ class ParserElement(object):
|
||||
try:
|
||||
loc,tokens = self.parseImpl( instring, loc, doActions )
|
||||
except IndexError:
|
||||
raise ParseException, ( instring, len(instring), self.errmsg, self )
|
||||
raise ParseException( instring, len(instring), self.errmsg, self)
|
||||
else:
|
||||
loc,tokens = self.parseImpl( instring, loc, doActions )
|
||||
|
||||
@ -579,7 +579,7 @@ class ParserElement(object):
|
||||
self.resultsName,
|
||||
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
|
||||
modal=self.modalResults )
|
||||
except ParseException, err:
|
||||
except ParseException as err:
|
||||
#~ print "Exception raised in user parse action:", err
|
||||
if (self.debugActions[2] ):
|
||||
self.debugActions[2]( instring, tokensStart, self, err )
|
||||
@ -670,49 +670,49 @@ class ParserElement(object):
|
||||
|
||||
def __add__(self, other ):
|
||||
"""Implementation of + operator - returns And"""
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return And( [ self, other ] )
|
||||
|
||||
def __radd__(self, other ):
|
||||
"""Implementation of += operator"""
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return other + self
|
||||
|
||||
def __or__(self, other ):
|
||||
"""Implementation of | operator - returns MatchFirst"""
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return MatchFirst( [ self, other ] )
|
||||
|
||||
def __ror__(self, other ):
|
||||
"""Implementation of |= operator"""
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return other | self
|
||||
|
||||
def __xor__(self, other ):
|
||||
"""Implementation of ^ operator - returns Or"""
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return Or( [ self, other ] )
|
||||
|
||||
def __rxor__(self, other ):
|
||||
"""Implementation of ^= operator"""
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return other ^ self
|
||||
|
||||
def __and__(self, other ):
|
||||
"""Implementation of & operator - returns Each"""
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return Each( [ self, other ] )
|
||||
|
||||
def __rand__(self, other ):
|
||||
"""Implementation of right-& operator"""
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return other & self
|
||||
|
||||
@ -985,7 +985,7 @@ class Word(Token):
|
||||
if max > 0:
|
||||
self.maxLen = max
|
||||
else:
|
||||
self.maxLen = sys.maxint
|
||||
self.maxLen = sys.maxsize
|
||||
|
||||
if exact > 0:
|
||||
self.maxLen = exact
|
||||
@ -1064,7 +1064,7 @@ class CharsNotIn(Token):
|
||||
if max > 0:
|
||||
self.maxLen = max
|
||||
else:
|
||||
self.maxLen = sys.maxint
|
||||
self.maxLen = sys.maxsize
|
||||
|
||||
if exact > 0:
|
||||
self.maxLen = exact
|
||||
@ -1143,7 +1143,7 @@ class White(Token):
|
||||
if max > 0:
|
||||
self.maxLen = max
|
||||
else:
|
||||
self.maxLen = sys.maxint
|
||||
self.maxLen = sys.maxsize
|
||||
|
||||
if exact > 0:
|
||||
self.maxLen = exact
|
||||
@ -1197,7 +1197,7 @@ class GoToColumn(PositionToken):
|
||||
def parseImpl( self, instring, loc, doActions=True ):
|
||||
thiscol = col( loc, instring )
|
||||
if thiscol > self.col:
|
||||
raise ParseException, ( instring, loc, "Text not in expected column", self )
|
||||
raise ParseException( instring, loc, "Text not in expected column", self)
|
||||
newloc = loc + self.col - thiscol
|
||||
ret = instring[ loc: newloc ]
|
||||
return newloc, ret
|
||||
@ -1287,7 +1287,7 @@ class ParseExpression(ParserElement):
|
||||
super(ParseExpression,self).__init__(savelist)
|
||||
if isinstance( exprs, list ):
|
||||
self.exprs = exprs
|
||||
elif isinstance( exprs, basestring ):
|
||||
elif isinstance( exprs, str ):
|
||||
self.exprs = [ Literal( exprs ) ]
|
||||
else:
|
||||
self.exprs = [ exprs ]
|
||||
@ -1390,12 +1390,12 @@ class And(ParseExpression):
|
||||
loc, resultlist = self.exprs[0].parse( instring, loc, doActions )
|
||||
for e in self.exprs[1:]:
|
||||
loc, exprtokens = e.parse( instring, loc, doActions )
|
||||
if exprtokens or exprtokens.keys():
|
||||
if exprtokens or list(exprtokens.keys()):
|
||||
resultlist += exprtokens
|
||||
return loc, resultlist
|
||||
|
||||
def __iadd__(self, other ):
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return self.append( other ) #And( [ self, other ] )
|
||||
|
||||
@ -1435,11 +1435,11 @@ class Or(ParseExpression):
|
||||
for e in self.exprs:
|
||||
try:
|
||||
loc2 = e.tryParse( instring, loc )
|
||||
except ParseException, err:
|
||||
except ParseException as err:
|
||||
if err.loc > maxExcLoc:
|
||||
maxException = err
|
||||
maxExcLoc = err.loc
|
||||
except IndexError, err:
|
||||
except IndexError as err:
|
||||
if len(instring) > maxExcLoc:
|
||||
maxException = ParseException(instring,len(instring),e.errmsg,self)
|
||||
maxExcLoc = len(instring)
|
||||
@ -1454,7 +1454,7 @@ class Or(ParseExpression):
|
||||
return maxMatchExp.parse( instring, loc, doActions )
|
||||
|
||||
def __ixor__(self, other ):
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return self.append( other ) #Or( [ self, other ] )
|
||||
|
||||
@ -1492,11 +1492,11 @@ class MatchFirst(ParseExpression):
|
||||
try:
|
||||
ret = e.parse( instring, loc, doActions )
|
||||
return ret
|
||||
except ParseException, err:
|
||||
except ParseException as err:
|
||||
if err.loc > maxExcLoc:
|
||||
maxException = err
|
||||
maxExcLoc = err.loc
|
||||
except IndexError, err:
|
||||
except IndexError as err:
|
||||
if len(instring) > maxExcLoc:
|
||||
maxException = ParseException(instring,len(instring),e.errmsg,self)
|
||||
maxExcLoc = len(instring)
|
||||
@ -1506,7 +1506,7 @@ class MatchFirst(ParseExpression):
|
||||
raise maxException
|
||||
|
||||
def __ior__(self, other ):
|
||||
if isinstance( other, basestring ):
|
||||
if isinstance( other, str ):
|
||||
other = Literal( other )
|
||||
return self.append( other ) #MatchFirst( [ self, other ] )
|
||||
|
||||
@ -1580,13 +1580,13 @@ class Each(ParseExpression):
|
||||
finalResults = ParseResults([])
|
||||
for r in resultlist:
|
||||
dups = {}
|
||||
for k in r.keys():
|
||||
if k in finalResults.keys():
|
||||
for k in list(r.keys()):
|
||||
if k in list(finalResults.keys()):
|
||||
tmp = ParseResults(finalResults[k])
|
||||
tmp += ParseResults(r[k])
|
||||
dups[k] = tmp
|
||||
finalResults += ParseResults(r)
|
||||
for k,v in dups.items():
|
||||
for k,v in list(dups.items()):
|
||||
finalResults[k] = v
|
||||
return loc, finalResults
|
||||
|
||||
@ -1609,7 +1609,7 @@ class ParseElementEnhance(ParserElement):
|
||||
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
|
||||
def __init__( self, expr, savelist=False ):
|
||||
super(ParseElementEnhance,self).__init__(savelist)
|
||||
if isinstance( expr, basestring ):
|
||||
if isinstance( expr, str ):
|
||||
expr = Literal(expr)
|
||||
self.expr = expr
|
||||
self.strRepr = None
|
||||
@ -1739,7 +1739,7 @@ class ZeroOrMore(ParseElementEnhance):
|
||||
if hasIgnoreExprs:
|
||||
loc = self.skipIgnorables( instring, loc )
|
||||
loc, tmptokens = self.expr.parse( instring, loc, doActions )
|
||||
if tmptokens or tmptokens.keys():
|
||||
if tmptokens or list(tmptokens.keys()):
|
||||
tokens += tmptokens
|
||||
except (ParseException,IndexError):
|
||||
pass
|
||||
@ -1772,7 +1772,7 @@ class OneOrMore(ParseElementEnhance):
|
||||
if hasIgnoreExprs:
|
||||
loc = self.skipIgnorables( instring, loc )
|
||||
loc, tmptokens = self.expr.parse( instring, loc, doActions )
|
||||
if tmptokens or tmptokens.keys():
|
||||
if tmptokens or list(tmptokens.keys()):
|
||||
tokens += tmptokens
|
||||
except (ParseException,IndexError):
|
||||
pass
|
||||
@ -1928,7 +1928,7 @@ class Upcase(TokenConverter):
|
||||
DeprecationWarning,stacklevel=2)
|
||||
|
||||
def postParse( self, instring, loc, tokenlist ):
|
||||
return map( string.upper, tokenlist )
|
||||
return list(map( string.upper, tokenlist ))
|
||||
|
||||
|
||||
class Combine(TokenConverter):
|
||||
@ -1957,7 +1957,7 @@ class Combine(TokenConverter):
|
||||
del retToks[:]
|
||||
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
|
||||
|
||||
if self.resultsName and len(retToks.keys())>0:
|
||||
if self.resultsName and len(list(retToks.keys()))>0:
|
||||
return [ retToks ]
|
||||
else:
|
||||
return retToks
|
||||
@ -1991,7 +1991,7 @@ class Dict(TokenConverter):
|
||||
else:
|
||||
dictvalue = tok.copy() #ParseResults(i)
|
||||
del dictvalue[0]
|
||||
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
|
||||
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and list(dictvalue.keys())):
|
||||
tokenlist[ikey] = (dictvalue,i)
|
||||
else:
|
||||
tokenlist[ikey] = (dictvalue[0],i)
|
||||
@ -2074,13 +2074,13 @@ empty = Empty().setName("empty")
|
||||
|
||||
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
|
||||
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
|
||||
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
|
||||
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
|
||||
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:chr(int(t[0],16)))
|
||||
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:chr(int(t[0],8)))
|
||||
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
|
||||
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
|
||||
_reBracketExpr = "[" + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
|
||||
|
||||
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
|
||||
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ chr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
|
||||
|
||||
def srange(s):
|
||||
r"""Helper to easily define string ranges for use in Word construction. Borrows
|
||||
@ -2120,11 +2120,11 @@ def removeQuotes(s,l,t):
|
||||
|
||||
def upcaseTokens(s,l,t):
|
||||
"""Helper parse action to convert tokens to upper case."""
|
||||
return map( str.upper, t )
|
||||
return list(map( str.upper, t ))
|
||||
|
||||
def downcaseTokens(s,l,t):
|
||||
"""Helper parse action to convert tokens to lower case."""
|
||||
return map( str.lower, t )
|
||||
return list(map( str.lower, t ))
|
||||
|
||||
def _makeTags(tagStr, xml):
|
||||
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
|
||||
@ -2185,20 +2185,20 @@ commaSeparatedList = delimitedList( Optional( quotedString | _commasepitem, defa
|
||||
if __name__ == "__main__":
|
||||
|
||||
def test( teststring ):
|
||||
print teststring,"->",
|
||||
print(teststring,"->", end=' ')
|
||||
try:
|
||||
tokens = simpleSQL.parseString( teststring )
|
||||
tokenlist = tokens.asList()
|
||||
print tokenlist
|
||||
print "tokens = ", tokens
|
||||
print "tokens.columns =", tokens.columns
|
||||
print "tokens.tables =", tokens.tables
|
||||
print tokens.asXML("SQL",True)
|
||||
except ParseException, err:
|
||||
print err.line
|
||||
print " "*(err.column-1) + "^"
|
||||
print err
|
||||
print
|
||||
print(tokenlist)
|
||||
print("tokens = ", tokens)
|
||||
print("tokens.columns =", tokens.columns)
|
||||
print("tokens.tables =", tokens.tables)
|
||||
print(tokens.asXML("SQL",True))
|
||||
except ParseException as err:
|
||||
print(err.line)
|
||||
print(" "*(err.column-1) + "^")
|
||||
print(err)
|
||||
print()
|
||||
|
||||
selectToken = CaselessLiteral( "select" )
|
||||
fromToken = CaselessLiteral( "from" )
|
||||
|
@ -48,6 +48,6 @@ def getDescription():
|
||||
if __name__ == '__main__':
|
||||
tag = getDescription()
|
||||
writeTag(tag)
|
||||
print tag
|
||||
print(tag)
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import commands
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
@ -33,26 +33,26 @@ def main():
|
||||
|
||||
""" Force indent """
|
||||
cmd = "indent *.c *.h"
|
||||
output = commands.getoutput(cmd)
|
||||
output = subprocess.getoutput(cmd)
|
||||
|
||||
""" Force ctags """
|
||||
cmd = "ctags *.c *.h"
|
||||
output = commands.getoutput(cmd)
|
||||
output = subprocess.getoutput(cmd)
|
||||
|
||||
excludes = ['scanner.c','scanner.h','parser.c','parser.h']
|
||||
fnames = findfunctions(excludes)
|
||||
for fname in fnames.keys():
|
||||
for fname in list(fnames.keys()):
|
||||
"""
|
||||
The ..* construct makes sure that function definitions are
|
||||
skipped (based on the indent settings
|
||||
"""
|
||||
cmd = "grep '..*%s' *.c" % (fname)
|
||||
#print cmd
|
||||
output = commands.getoutput(cmd).splitlines()
|
||||
output = subprocess.getoutput(cmd).splitlines()
|
||||
if len(output) <= mincount:
|
||||
print "%s\t%s" % (fnames[fname],fname)
|
||||
print("%s\t%s" % (fnames[fname],fname))
|
||||
if len(output) > 0:
|
||||
print output
|
||||
print(output)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
results:
|
||||
./regression-test.py
|
||||
python3 ./regression-test.py
|
||||
|
||||
.PHONY: results
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Scyther : An automatic verifier for security protocols.
|
||||
Copyright (C) 2007-2013 Cas Cremers
|
||||
@ -99,7 +99,7 @@ def runTests(fn,destdir="."):
|
||||
fp = open(fn,'r')
|
||||
tests = []
|
||||
clen = 0
|
||||
for l in fp.xreadlines():
|
||||
for l in fp:
|
||||
if l.startswith("#") or l.startswith("%"):
|
||||
continue
|
||||
d = l.strip()
|
||||
@ -110,8 +110,8 @@ def runTests(fn,destdir="."):
|
||||
clen = clen + 1
|
||||
fp.close()
|
||||
|
||||
print "Running %i tests." % (clen)
|
||||
print "Destination: %s" % (destdir)
|
||||
print(("Running %i tests." % (clen)))
|
||||
print(("Destination: %s" % (destdir)))
|
||||
cnt = 1
|
||||
setting = ""
|
||||
for l in tests:
|
||||
@ -121,10 +121,10 @@ def runTests(fn,destdir="."):
|
||||
if len(setting.strip()) == 0:
|
||||
setting = ""
|
||||
|
||||
print "Changing global setting to \"%s\"" % (setting)
|
||||
print(("Changing global setting to \"%s\"" % (setting)))
|
||||
|
||||
else:
|
||||
print "%i/%i: Evaluating %s" % (cnt,clen,l+setting)
|
||||
print(("%i/%i: Evaluating %s" % (cnt,clen,l+setting)))
|
||||
runTest(l+setting,destdir)
|
||||
cnt = cnt + 1
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import commands
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
class Tag(object):
|
||||
@ -70,11 +70,11 @@ def tagoccurs(problems,tag,filter=[]):
|
||||
"""
|
||||
|
||||
cmd = "grep \"\\<%s\\>\" *.[chly]" % tag
|
||||
(reslist,count) = outToRes(commands.getoutput(cmd),[tag.filename])
|
||||
(reslist,count) = outToRes(subprocess.getoutput(cmd),[tag.filename])
|
||||
if (len(reslist) == 0) and (count < 2):
|
||||
if tag.filename not in filter:
|
||||
# this might be a problem, store it
|
||||
if tag.filename not in problems.keys():
|
||||
if tag.filename not in list(problems.keys()):
|
||||
problems[tag.filename] = {}
|
||||
problems[tag.filename][tag.id] = count
|
||||
|
||||
@ -82,36 +82,36 @@ def tagoccurs(problems,tag,filter=[]):
|
||||
|
||||
|
||||
def tagreport(problems):
|
||||
for fn in problems.keys():
|
||||
print "file: %s" % fn
|
||||
for t in problems[fn].keys():
|
||||
print "\t%i\t%s" % (problems[fn][t],t)
|
||||
for fn in list(problems.keys()):
|
||||
print("file: %s" % fn)
|
||||
for t in list(problems[fn].keys()):
|
||||
print("\t%i\t%s" % (problems[fn][t],t))
|
||||
|
||||
|
||||
def main():
|
||||
# Generate tags
|
||||
print "Generating tags using 'ctags'"
|
||||
print("Generating tags using 'ctags'")
|
||||
cmd = "ctags *.c *.h *.l *.y"
|
||||
commands.getoutput(cmd)
|
||||
subprocess.getoutput(cmd)
|
||||
|
||||
# Analyze results
|
||||
print "Analyzing results"
|
||||
print("Analyzing results")
|
||||
filter = ["scanner.c","parser.c"]
|
||||
tags = gettags()
|
||||
problems = {}
|
||||
total = len(tags)
|
||||
count = 0
|
||||
steps = 20
|
||||
print "_ " * (steps)
|
||||
print("_ " * (steps))
|
||||
|
||||
for t in tags:
|
||||
problems = tagoccurs(problems,t,filter)
|
||||
count = count + 1
|
||||
if count % (total / steps) == 0:
|
||||
print "^",
|
||||
print("^", end=' ')
|
||||
sys.stdout.flush()
|
||||
print
|
||||
print
|
||||
print()
|
||||
print()
|
||||
|
||||
tagreport (problems)
|
||||
|
||||
|
@ -88,7 +88,7 @@ protocol ffgg%i(A,B)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) > 1:
|
||||
print ffgg(int(sys.argv[1]))
|
||||
print(ffgg(int(sys.argv[1])))
|
||||
else:
|
||||
print "Please provide a number n to generate ffgg_n"
|
||||
print("Please provide a number n to generate ffgg_n")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user