#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import re,string,traceback from waflib import Logs,Utils,Errors from waflib.Logs import debug,error class PreprocError(Errors.WafError): pass POPFILE='-' recursion_limit=150 go_absolute=False standard_includes=['/usr/include'] if Utils.is_win32: standard_includes=[] use_trigraphs=0 strict_quotes=0 g_optrans={'not':'!','and':'&&','bitand':'&','and_eq':'&=','or':'||','bitor':'|','or_eq':'|=','xor':'^','xor_eq':'^=','compl':'~',} re_lines=re.compile('^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$',re.IGNORECASE|re.MULTILINE) re_mac=re.compile("^[a-zA-Z_]\w*") re_fun=re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]') re_pragma_once=re.compile('^\s*once\s*',re.IGNORECASE) re_nl=re.compile('\\\\\r*\n',re.MULTILINE) re_cpp=re.compile(r"""(/\*[^*]*\*+([^/*][^*]*\*+)*/)|//[^\n]*|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^/"'\\]*)""",re.MULTILINE) trig_def=[('??'+a,b)for a,b in zip("=-/!'()<>",r'#~\|^[]{}')] chr_esc={'0':0,'a':7,'b':8,'t':9,'n':10,'f':11,'v':12,'r':13,'\\':92,"'":39} NUM='i' OP='O' IDENT='T' STR='s' CHAR='c' tok_types=[NUM,STR,IDENT,OP] exp_types=[r"""0[xX](?P[a-fA-F0-9]+)(?P[uUlL]*)|L*?'(?P(\\.|[^\\'])+)'|(?P\d+)[Ee](?P[+-]*?\d+)(?P[fFlL]*)|(?P\d*\.\d+)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P\d+\.\d*)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P0*)(?P\d+)(?P[uUlL]*)""",r'L?"([^"\\]|\\.)*"',r'[a-zA-Z_]\w*',r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]',] re_clexer=re.compile('|'.join(["(?P<%s>%s)"%(name,part)for name,part in zip(tok_types,exp_types)]),re.M) accepted='a' ignored='i' undefined='u' skipped='s' def repl(m): s=m.group(1) if s: return' ' return m.group(3)or'' def filter_comments(filename): code=Utils.readf(filename) if use_trigraphs: for(a,b)in trig_def:code=code.split(a).join(b) code=re_nl.sub('',code) code=re_cpp.sub(repl,code) return[(m.group(2),m.group(3))for m in re.finditer(re_lines,code)] prec={} ops=['* / %','+ -','<< >>','< <= >= >','== !=','& | ^','&& ||',','] for x in range(len(ops)): syms=ops[x] for u in syms.split(): prec[u]=x def trimquotes(s): if not s:return'' s=s.rstrip() if s[0]=="'"and s[-1]=="'":return s[1:-1] return s def reduce_nums(val_1,val_2,val_op): try:a=0+val_1 except TypeError:a=int(val_1) try:b=0+val_2 except TypeError:b=int(val_2) d=val_op if d=='%':c=a%b elif d=='+':c=a+b elif d=='-':c=a-b elif d=='*':c=a*b elif d=='/':c=a/b elif d=='^':c=a^b elif d=='|':c=a|b elif d=='||':c=int(a or b) elif d=='&':c=a&b elif d=='&&':c=int(a and b) elif d=='==':c=int(a==b) elif d=='!=':c=int(a!=b) elif d=='<=':c=int(a<=b) elif d=='<':c=int(a':c=int(a>b) elif d=='>=':c=int(a>=b) elif d=='^':c=int(a^b) elif d=='<<':c=a<>':c=a>>b else:c=0 return c def get_num(lst): if not lst:raise PreprocError("empty list for get_num") (p,v)=lst[0] if p==OP: if v=='(': count_par=1 i=1 while i=prec[v]: num2=reduce_nums(num,num2,v) return get_term([(NUM,num2)]+lst) else: num3,lst=get_num(lst[1:]) num3=reduce_nums(num2,num3,v2) return get_term([(NUM,num),(p,v),(NUM,num3)]+lst) raise PreprocError("cannot reduce %r"%lst) def reduce_eval(lst): num,lst=get_term(lst) return(NUM,num) def stringize(lst): lst=[str(v2)for(p2,v2)in lst] return"".join(lst) def paste_tokens(t1,t2): p1=None if t1[0]==OP and t2[0]==OP: p1=OP elif t1[0]==IDENT and(t2[0]==IDENT or t2[0]==NUM): p1=IDENT elif t1[0]==NUM and t2[0]==NUM: p1=NUM if not p1: raise PreprocError('tokens do not make a valid paste %r and %r'%(t1,t2)) return(p1,t1[1]+t2[1]) def reduce_tokens(lst,defs,ban=[]): i=0 while i=len(lst): raise PreprocError("expected '(' after %r (got nothing)"%v) (p2,v2)=lst[i] if p2!=OP or v2!='(': raise PreprocError("expected '(' after %r"%v) del lst[i] one_param=[] count_paren=0 while i1: (p3,v3)=accu[-1] (p4,v4)=accu[-2] if v3=='##': accu.pop() if v4==','and pt.*)>|"(?P.*)")') def extract_include(txt,defs): m=re_include.search(txt) if m: if m.group('a'):return'<',m.group('a') if m.group('b'):return'"',m.group('b') toks=tokenize(txt) reduce_tokens(toks,defs,['waf_include']) if not toks: raise PreprocError("could not parse include %s"%txt) if len(toks)==1: if toks[0][0]==STR: return'"',toks[0][1] else: if toks[0][1]=='<'and toks[-1][1]=='>': return stringize(toks).lstrip('<').rstrip('>') raise PreprocError("could not parse include %s."%txt) def parse_char(txt): if not txt:raise PreprocError("attempted to parse a null char") if txt[0]!='\\': return ord(txt) c=txt[1] if c=='x': if len(txt)==4 and txt[3]in string.hexdigits:return int(txt[2:],16) return int(txt[2:],16) elif c.isdigit(): if c=='0'and len(txt)==2:return 0 for i in 3,2,1: if len(txt)>i and txt[1:1+i].isdigit(): return(1+i,int(txt[1:1+i],8)) else: try:return chr_esc[c] except KeyError:raise PreprocError("could not parse char literal '%s'"%txt) def tokenize(s): return tokenize_private(s)[:] @Utils.run_once def tokenize_private(s): ret=[] for match in re_clexer.finditer(s): m=match.group for name in tok_types: v=m(name) if v: if name==IDENT: try:v=g_optrans[v];name=OP except KeyError: if v.lower()=="true": v=1 name=NUM elif v.lower()=="false": v=0 name=NUM elif name==NUM: if m('oct'):v=int(v,8) elif m('hex'):v=int(m('hex'),16) elif m('n0'):v=m('n0') else: v=m('char') if v:v=parse_char(v) else:v=m('n2')or m('n4') elif name==OP: if v=='%:':v='#' elif v=='%:%:':v='##' elif name==STR: v=v[1:-1] ret.append((name,v)) break return ret @Utils.run_once def define_name(line): return re_mac.match(line).group(0) class c_parser(object): def __init__(self,nodepaths=None,defines=None): self.lines=[] if defines is None: self.defs={} else: self.defs=dict(defines) self.state=[] self.count_files=0 self.currentnode_stack=[] self.nodepaths=nodepaths or[] self.nodes=[] self.names=[] self.curfile='' self.ban_includes=set([]) def cached_find_resource(self,node,filename): try: nd=node.ctx.cache_nd except AttributeError: nd=node.ctx.cache_nd={} tup=(node,filename) try: return nd[tup] except KeyError: ret=node.find_resource(filename) if ret: if getattr(ret,'children',None): ret=None elif ret.is_child_of(node.ctx.bldnode): tmp=node.ctx.srcnode.search_node(ret.path_from(node.ctx.bldnode)) if tmp and getattr(tmp,'children',None): ret=None nd[tup]=ret return ret def tryfind(self,filename): self.curfile=filename found=self.cached_find_resource(self.currentnode_stack[-1],filename) for n in self.nodepaths: if found: break found=self.cached_find_resource(n,filename) if found: self.nodes.append(found) if filename[-4:]!='.moc': self.addlines(found) else: if not filename in self.names: self.names.append(filename) return found def addlines(self,node): self.currentnode_stack.append(node.parent) filepath=node.abspath() self.count_files+=1 if self.count_files>recursion_limit: raise PreprocError("recursion limit exceeded") pc=self.parse_cache debug('preproc: reading file %r',filepath) try: lns=pc[filepath] except KeyError: pass else: self.lines.extend(lns) return try: lines=filter_comments(filepath) lines.append((POPFILE,'')) lines.reverse() pc[filepath]=lines self.lines.extend(lines) except IOError: raise PreprocError("could not read the file %s"%filepath) except Exception: if Logs.verbose>0: error("parsing %s failed"%filepath) traceback.print_exc() def start(self,node,env): debug('preproc: scanning %s (in %s)',node.name,node.parent.name) bld=node.ctx try: self.parse_cache=bld.parse_cache except AttributeError: bld.parse_cache={} self.parse_cache=bld.parse_cache self.addlines(node) if env['DEFINES']: try: lst=['%s %s'%(x[0],trimquotes('='.join(x[1:])))for x in[y.split('=')for y in env['DEFINES']]] lst.reverse() self.lines.extend([('define',x)for x in lst]) except AttributeError: pass while self.lines: (token,line)=self.lines.pop() if token==POPFILE: self.count_files-=1 self.currentnode_stack.pop() continue try: ve=Logs.verbose if ve:debug('preproc: line is %s - %s state is %s',token,line,self.state) state=self.state if token[:2]=='if': state.append(undefined) elif token=='endif': state.pop() if token[0]!='e': if skipped in self.state or ignored in self.state: continue if token=='if': ret=eval_macro(tokenize(line),self.defs) if ret:state[-1]=accepted else:state[-1]=ignored elif token=='ifdef': m=re_mac.match(line) if m and m.group(0)in self.defs:state[-1]=accepted else:state[-1]=ignored elif token=='ifndef': m=re_mac.match(line) if m and m.group(0)in self.defs:state[-1]=ignored else:state[-1]=accepted elif token=='include'or token=='import': (kind,inc)=extract_include(line,self.defs) if inc in self.ban_includes: continue if token=='import':self.ban_includes.add(inc) if ve:debug('preproc: include found %s (%s) ',inc,kind) if kind=='"'or not strict_quotes: self.tryfind(inc) elif token=='elif': if state[-1]==accepted: state[-1]=skipped elif state[-1]==ignored: if eval_macro(tokenize(line),self.defs): state[-1]=accepted elif token=='else': if state[-1]==accepted:state[-1]=skipped elif state[-1]==ignored:state[-1]=accepted elif token=='define': try: self.defs[define_name(line)]=line except Exception: raise PreprocError("Invalid define line %s"%line) elif token=='undef': m=re_mac.match(line) if m and m.group(0)in self.defs: self.defs.__delitem__(m.group(0)) elif token=='pragma': if re_pragma_once.match(line.lower()): self.ban_includes.add(self.curfile) except Exception ,e: if Logs.verbose: debug('preproc: line parsing failed (%s): %s %s',e,line,Utils.ex_stack()) def scan(task): global go_absolute try: incn=task.generator.includes_nodes except AttributeError: raise Errors.WafError('%r is missing a feature such as "c", "cxx" or "includes": '%task.generator) if go_absolute: nodepaths=incn+[task.generator.bld.root.find_dir(x)for x in standard_includes] else: nodepaths=[x for x in incn if x.is_child_of(x.ctx.srcnode)or x.is_child_of(x.ctx.bldnode)] tmp=c_parser(nodepaths) tmp.start(task.inputs[0],task.env) if Logs.verbose: debug('deps: deps for %r: %r; unresolved %r'%(task.inputs,tmp.nodes,tmp.names)) return(tmp.nodes,tmp.names)