Include waf as an extracted source directory, rather than as a one-in-a-file script.
[samba.git] / buildtools / wafadmin / Tools / preproc.py
1 #!/usr/bin/env python
2 # encoding: utf-8
3 # Thomas Nagy, 2006-2009 (ita)
4
5 """
6 C/C++ preprocessor for finding dependencies
7
8 Reasons for using the Waf preprocessor by default
9 1. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files)
10 2. Not all compilers provide .d files for obtaining the dependencies (portability)
11 3. A naive file scanner will not catch the constructs such as "#include foo()"
12 4. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything)
13
14 Regarding the speed concerns:
15 a. the preprocessing is performed only when files must be compiled
16 b. the macros are evaluated only for #if/#elif/#include
17 c. the time penalty is about 10%
18 d. system headers are not scanned
19
20 Now if you do not want the Waf preprocessor, the tool "gccdeps" uses the .d files produced
21 during the compilation to track the dependencies (useful when used with the boost libraries).
22 It only works with gcc though, and it cannot be used with Qt builds. A dumb
23 file scanner will be added in the future, so we will have most bahaviours.
24 """
25 # TODO: more varargs, pragma once
26 # TODO: dumb file scanner tracking all includes
27
28 import re, sys, os, string
29 import Logs, Build, Utils
30 from Logs import debug, error
31 import traceback
32
33 class PreprocError(Utils.WafError):
34         pass
35
36 POPFILE = '-'
37
38
39 recursion_limit = 5000
40 "do not loop too much on header inclusion"
41
42 go_absolute = 0
43 "set to 1 to track headers on files in /usr/include - else absolute paths are ignored"
44
45 standard_includes = ['/usr/include']
46 if sys.platform == "win32":
47         standard_includes = []
48
49 use_trigraphs = 0
50 'apply the trigraph rules first'
51
52 strict_quotes = 0
53 "Keep <> for system includes (do not search for those includes)"
54
55 g_optrans = {
56 'not':'!',
57 'and':'&&',
58 'bitand':'&',
59 'and_eq':'&=',
60 'or':'||',
61 'bitor':'|',
62 'or_eq':'|=',
63 'xor':'^',
64 'xor_eq':'^=',
65 'compl':'~',
66 }
67 "these ops are for c++, to reset, set an empty dict"
68
69 # ignore #warning and #error
70 re_lines = re.compile(\
71         '^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$',
72         re.IGNORECASE | re.MULTILINE)
73
74 re_mac = re.compile("^[a-zA-Z_]\w*")
75 re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]')
76 re_pragma_once = re.compile('^\s*once\s*', re.IGNORECASE)
77 re_nl = re.compile('\\\\\r*\n', re.MULTILINE)
78 re_cpp = re.compile(
79         r"""(/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)|//[^\n]*|("(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|.[^/"'\\]*)""",
80         re.MULTILINE)
81 trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')]
82 chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39}
83
84 NUM   = 'i'
85 OP    = 'O'
86 IDENT = 'T'
87 STR   = 's'
88 CHAR  = 'c'
89
90 tok_types = [NUM, STR, IDENT, OP]
91 exp_types = [
92         r"""0[xX](?P<hex>[a-fA-F0-9]+)(?P<qual1>[uUlL]*)|L*?'(?P<char>(\\.|[^\\'])+)'|(?P<n1>\d+)[Ee](?P<exp0>[+-]*?\d+)(?P<float0>[fFlL]*)|(?P<n2>\d*\.\d+)([Ee](?P<exp1>[+-]*?\d+))?(?P<float1>[fFlL]*)|(?P<n4>\d+\.\d*)([Ee](?P<exp2>[+-]*?\d+))?(?P<float2>[fFlL]*)|(?P<oct>0*)(?P<n0>\d+)(?P<qual2>[uUlL]*)""",
93         r'L?"([^"\\]|\\.)*"',
94         r'[a-zA-Z_]\w*',
95         r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]',
96 ]
97 re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M)
98
99 accepted  = 'a'
100 ignored   = 'i'
101 undefined = 'u'
102 skipped   = 's'
103
104 def repl(m):
105         if m.group(1):
106                 return ' '
107         s = m.group(2)
108         if s is None:
109                 return ''
110         return s
111
112 def filter_comments(filename):
113         # return a list of tuples : keyword, line
114         code = Utils.readf(filename)
115         if use_trigraphs:
116                 for (a, b) in trig_def: code = code.split(a).join(b)
117         code = re_nl.sub('', code)
118         code = re_cpp.sub(repl, code)
119         return [(m.group(2), m.group(3)) for m in re.finditer(re_lines, code)]
120
121 prec = {}
122 # op -> number, needed for such expressions:   #if 1 && 2 != 0
123 ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ',']
124 for x in range(len(ops)):
125         syms = ops[x]
126         for u in syms.split():
127                 prec[u] = x
128
129 def reduce_nums(val_1, val_2, val_op):
130         """apply arithmetic rules and try to return an integer result"""
131         #print val_1, val_2, val_op
132
133         # now perform the operation, make certain a and b are numeric
134         try:    a = 0 + val_1
135         except TypeError: a = int(val_1)
136         try:    b = 0 + val_2
137         except TypeError: b = int(val_2)
138
139         d = val_op
140         if d == '%':  c = a%b
141         elif d=='+':  c = a+b
142         elif d=='-':  c = a-b
143         elif d=='*':  c = a*b
144         elif d=='/':  c = a/b
145         elif d=='^':  c = a^b
146         elif d=='|':  c = a|b
147         elif d=='||': c = int(a or b)
148         elif d=='&':  c = a&b
149         elif d=='&&': c = int(a and b)
150         elif d=='==': c = int(a == b)
151         elif d=='!=': c = int(a != b)
152         elif d=='<=': c = int(a <= b)
153         elif d=='<':  c = int(a < b)
154         elif d=='>':  c = int(a > b)
155         elif d=='>=': c = int(a >= b)
156         elif d=='^':  c = int(a^b)
157         elif d=='<<': c = a<<b
158         elif d=='>>': c = a>>b
159         else: c = 0
160         return c
161
162 def get_num(lst):
163         if not lst: raise PreprocError("empty list for get_num")
164         (p, v) = lst[0]
165         if p == OP:
166                 if v == '(':
167                         count_par = 1
168                         i = 1
169                         while i < len(lst):
170                                 (p, v) = lst[i]
171
172                                 if p == OP:
173                                         if v == ')':
174                                                 count_par -= 1
175                                                 if count_par == 0:
176                                                         break
177                                         elif v == '(':
178                                                 count_par += 1
179                                 i += 1
180                         else:
181                                 raise PreprocError("rparen expected %r" % lst)
182
183                         (num, _) = get_term(lst[1:i])
184                         return (num, lst[i+1:])
185
186                 elif v == '+':
187                         return get_num(lst[1:])
188                 elif v == '-':
189                         num, lst = get_num(lst[1:])
190                         return (reduce_nums('-1', num, '*'), lst)
191                 elif v == '!':
192                         num, lst = get_num(lst[1:])
193                         return (int(not int(num)), lst)
194                 elif v == '~':
195                         return (~ int(num), lst)
196                 else:
197                         raise PreprocError("invalid op token %r for get_num" % lst)
198         elif p == NUM:
199                 return v, lst[1:]
200         elif p == IDENT:
201                 # all macros should have been replaced, remaining identifiers eval to 0
202                 return 0, lst[1:]
203         else:
204                 raise PreprocError("invalid token %r for get_num" % lst)
205
206 def get_term(lst):
207         if not lst: raise PreprocError("empty list for get_term")
208         num, lst = get_num(lst)
209         if not lst:
210                 return (num, [])
211         (p, v) = lst[0]
212         if p == OP:
213                 if v == '&&' and not num:
214                         return (num, [])
215                 elif v == '||' and num:
216                         return (num, [])
217                 elif v == ',':
218                         # skip
219                         return get_term(lst[1:])
220                 elif v == '?':
221                         count_par = 0
222                         i = 1
223                         while i < len(lst):
224                                 (p, v) = lst[i]
225
226                                 if p == OP:
227                                         if v == ')':
228                                                 count_par -= 1
229                                         elif v == '(':
230                                                 count_par += 1
231                                         elif v == ':':
232                                                 if count_par == 0:
233                                                         break
234                                 i += 1
235                         else:
236                                 raise PreprocError("rparen expected %r" % lst)
237
238                         if int(num):
239                                 return get_term(lst[1:i])
240                         else:
241                                 return get_term(lst[i+1:])
242
243                 else:
244                         num2, lst = get_num(lst[1:])
245
246                         if not lst:
247                                 # no more tokens to process
248                                 num2 = reduce_nums(num, num2, v)
249                                 return get_term([(NUM, num2)] + lst)
250
251                         # operator precedence
252                         p2, v2 = lst[0]
253                         if p2 != OP:
254                                 raise PreprocError("op expected %r" % lst)
255
256                         if prec[v2] >= prec[v]:
257                                 num2 = reduce_nums(num, num2, v)
258                                 return get_term([(NUM, num2)] + lst)
259                         else:
260                                 num3, lst = get_num(lst[1:])
261                                 num3 = reduce_nums(num2, num3, v2)
262                                 return get_term([(NUM, num), (p, v), (NUM, num3)] + lst)
263
264
265         raise PreprocError("cannot reduce %r" % lst)
266
267 def reduce_eval(lst):
268         """take a list of tokens and output true or false (#if/#elif conditions)"""
269         num, lst = get_term(lst)
270         return (NUM, num)
271
272 def stringize(lst):
273         """use for converting a list of tokens to a string"""
274         lst = [str(v2) for (p2, v2) in lst]
275         return "".join(lst)
276
277 def paste_tokens(t1, t2):
278         """
279         here is what we can paste:
280          a ## b  ->  ab
281          > ## =  ->  >=
282          a ## 2  ->  a2
283         """
284         p1 = None
285         if t1[0] == OP and t2[0] == OP:
286                 p1 = OP
287         elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM):
288                 p1 = IDENT
289         elif t1[0] == NUM and t2[0] == NUM:
290                 p1 = NUM
291         if not p1:
292                 raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2))
293         return (p1, t1[1] + t2[1])
294
295 def reduce_tokens(lst, defs, ban=[]):
296         """replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied"""
297         i = 0
298
299         while i < len(lst):
300                 (p, v) = lst[i]
301
302                 if p == IDENT and v == "defined":
303                         del lst[i]
304                         if i < len(lst):
305                                 (p2, v2) = lst[i]
306                                 if p2 == IDENT:
307                                         if v2 in defs:
308                                                 lst[i] = (NUM, 1)
309                                         else:
310                                                 lst[i] = (NUM, 0)
311                                 elif p2 == OP and v2 == '(':
312                                         del lst[i]
313                                         (p2, v2) = lst[i]
314                                         del lst[i] # remove the ident, and change the ) for the value
315                                         if v2 in defs:
316                                                 lst[i] = (NUM, 1)
317                                         else:
318                                                 lst[i] = (NUM, 0)
319                                 else:
320                                         raise PreprocError("invalid define expression %r" % lst)
321
322                 elif p == IDENT and v in defs:
323
324                         if isinstance(defs[v], str):
325                                 a, b = extract_macro(defs[v])
326                                 defs[v] = b
327                         macro_def = defs[v]
328                         to_add = macro_def[1]
329
330                         if isinstance(macro_def[0], list):
331                                 # macro without arguments
332                                 del lst[i]
333                                 for x in xrange(len(to_add)):
334                                         lst.insert(i, to_add[x])
335                                         i += 1
336                         else:
337                                 # collect the arguments for the funcall
338
339                                 args = []
340                                 del lst[i]
341
342                                 if i >= len(lst):
343                                         raise PreprocError("expected '(' after %r (got nothing)" % v)
344
345                                 (p2, v2) = lst[i]
346                                 if p2 != OP or v2 != '(':
347                                         raise PreprocError("expected '(' after %r" % v)
348
349                                 del lst[i]
350
351                                 one_param = []
352                                 count_paren = 0
353                                 while i < len(lst):
354                                         p2, v2 = lst[i]
355
356                                         del lst[i]
357                                         if p2 == OP and count_paren == 0:
358                                                 if v2 == '(':
359                                                         one_param.append((p2, v2))
360                                                         count_paren += 1
361                                                 elif v2 == ')':
362                                                         if one_param: args.append(one_param)
363                                                         break
364                                                 elif v2 == ',':
365                                                         if not one_param: raise PreprocError("empty param in funcall %s" % p)
366                                                         args.append(one_param)
367                                                         one_param = []
368                                                 else:
369                                                         one_param.append((p2, v2))
370                                         else:
371                                                 one_param.append((p2, v2))
372                                                 if   v2 == '(': count_paren += 1
373                                                 elif v2 == ')': count_paren -= 1
374                                 else:
375                                         raise PreprocError('malformed macro')
376
377                                 # substitute the arguments within the define expression
378                                 accu = []
379                                 arg_table = macro_def[0]
380                                 j = 0
381                                 while j < len(to_add):
382                                         (p2, v2) = to_add[j]
383
384                                         if p2 == OP and v2 == '#':
385                                                 # stringize is for arguments only
386                                                 if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
387                                                         toks = args[arg_table[to_add[j+1][1]]]
388                                                         accu.append((STR, stringize(toks)))
389                                                         j += 1
390                                                 else:
391                                                         accu.append((p2, v2))
392                                         elif p2 == OP and v2 == '##':
393                                                 # token pasting, how can man invent such a complicated system?
394                                                 if accu and j+1 < len(to_add):
395                                                         # we have at least two tokens
396
397                                                         t1 = accu[-1]
398
399                                                         if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
400                                                                 toks = args[arg_table[to_add[j+1][1]]]
401
402                                                                 if toks:
403                                                                         accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1])
404                                                                         accu.extend(toks[1:])
405                                                                 else:
406                                                                         # error, case "a##"
407                                                                         accu.append((p2, v2))
408                                                                         accu.extend(toks)
409                                                         elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__':
410                                                                 # TODO not sure
411                                                                 # first collect the tokens
412                                                                 va_toks = []
413                                                                 st = len(macro_def[0])
414                                                                 pt = len(args)
415                                                                 for x in args[pt-st+1:]:
416                                                                         va_toks.extend(x)
417                                                                         va_toks.append((OP, ','))
418                                                                 if va_toks: va_toks.pop() # extra comma
419                                                                 if len(accu)>1:
420                                                                         (p3, v3) = accu[-1]
421                                                                         (p4, v4) = accu[-2]
422                                                                         if v3 == '##':
423                                                                                 # remove the token paste
424                                                                                 accu.pop()
425                                                                                 if v4 == ',' and pt < st:
426                                                                                         # remove the comma
427                                                                                         accu.pop()
428                                                                 accu += va_toks
429                                                         else:
430                                                                 accu[-1] = paste_tokens(t1, to_add[j+1])
431
432                                                         j += 1
433                                                 else:
434                                                         # invalid paste, case    "##a" or "b##"
435                                                         accu.append((p2, v2))
436
437                                         elif p2 == IDENT and v2 in arg_table:
438                                                 toks = args[arg_table[v2]]
439                                                 reduce_tokens(toks, defs, ban+[v])
440                                                 accu.extend(toks)
441                                         else:
442                                                 accu.append((p2, v2))
443
444                                         j += 1
445
446
447                                 reduce_tokens(accu, defs, ban+[v])
448
449                                 for x in xrange(len(accu)-1, -1, -1):
450                                         lst.insert(i, accu[x])
451
452                 i += 1
453
454
455 def eval_macro(lst, adefs):
456         """reduce the tokens from the list lst, and try to return a 0/1 result"""
457         reduce_tokens(lst, adefs, [])
458         if not lst: raise PreprocError("missing tokens to evaluate")
459         (p, v) = reduce_eval(lst)
460         return int(v) != 0
461
462 def extract_macro(txt):
463         """process a macro definition from "#define f(x, y) x * y" into a function or a simple macro without arguments"""
464         t = tokenize(txt)
465         if re_fun.search(txt):
466                 p, name = t[0]
467
468                 p, v = t[1]
469                 if p != OP: raise PreprocError("expected open parenthesis")
470
471                 i = 1
472                 pindex = 0
473                 params = {}
474                 prev = '('
475
476                 while 1:
477                         i += 1
478                         p, v = t[i]
479
480                         if prev == '(':
481                                 if p == IDENT:
482                                         params[v] = pindex
483                                         pindex += 1
484                                         prev = p
485                                 elif p == OP and v == ')':
486                                         break
487                                 else:
488                                         raise PreprocError("unexpected token (3)")
489                         elif prev == IDENT:
490                                 if p == OP and v == ',':
491                                         prev = v
492                                 elif p == OP and v == ')':
493                                         break
494                                 else:
495                                         raise PreprocError("comma or ... expected")
496                         elif prev == ',':
497                                 if p == IDENT:
498                                         params[v] = pindex
499                                         pindex += 1
500                                         prev = p
501                                 elif p == OP and v == '...':
502                                         raise PreprocError("not implemented (1)")
503                                 else:
504                                         raise PreprocError("comma or ... expected (2)")
505                         elif prev == '...':
506                                 raise PreprocError("not implemented (2)")
507                         else:
508                                 raise PreprocError("unexpected else")
509
510                 #~ print (name, [params, t[i+1:]])
511                 return (name, [params, t[i+1:]])
512         else:
513                 (p, v) = t[0]
514                 return (v, [[], t[1:]])
515
516 re_include = re.compile('^\s*(<(?P<a>.*)>|"(?P<b>.*)")')
517 def extract_include(txt, defs):
518         """process a line in the form "#include foo" to return a string representing the file"""
519         m = re_include.search(txt)
520         if m:
521                 if m.group('a'): return '<', m.group('a')
522                 if m.group('b'): return '"', m.group('b')
523
524         # perform preprocessing and look at the result, it must match an include
525         toks = tokenize(txt)
526         reduce_tokens(toks, defs, ['waf_include'])
527
528         if not toks:
529                 raise PreprocError("could not parse include %s" % txt)
530
531         if len(toks) == 1:
532                 if toks[0][0] == STR:
533                         return '"', toks[0][1]
534         else:
535                 if toks[0][1] == '<' and toks[-1][1] == '>':
536                         return stringize(toks).lstrip('<').rstrip('>')
537
538         raise PreprocError("could not parse include %s." % txt)
539
540 def parse_char(txt):
541         if not txt: raise PreprocError("attempted to parse a null char")
542         if txt[0] != '\\':
543                 return ord(txt)
544         c = txt[1]
545         if c == 'x':
546                 if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16)
547                 return int(txt[2:], 16)
548         elif c.isdigit():
549                 if c == '0' and len(txt)==2: return 0
550                 for i in 3, 2, 1:
551                         if len(txt) > i and txt[1:1+i].isdigit():
552                                 return (1+i, int(txt[1:1+i], 8))
553         else:
554                 try: return chr_esc[c]
555                 except KeyError: raise PreprocError("could not parse char literal '%s'" % txt)
556
557 @Utils.run_once
558 def tokenize(s):
559         """convert a string into a list of tokens (shlex.split does not apply to c/c++/d)"""
560         ret = []
561         for match in re_clexer.finditer(s):
562                 m = match.group
563                 for name in tok_types:
564                         v = m(name)
565                         if v:
566                                 if name == IDENT:
567                                         try: v = g_optrans[v]; name = OP
568                                         except KeyError:
569                                                 # c++ specific
570                                                 if v.lower() == "true":
571                                                         v = 1
572                                                         name = NUM
573                                                 elif v.lower() == "false":
574                                                         v = 0
575                                                         name = NUM
576                                 elif name == NUM:
577                                         if m('oct'): v = int(v, 8)
578                                         elif m('hex'): v = int(m('hex'), 16)
579                                         elif m('n0'): v = m('n0')
580                                         else:
581                                                 v = m('char')
582                                                 if v: v = parse_char(v)
583                                                 else: v = m('n2') or m('n4')
584                                 elif name == OP:
585                                         if v == '%:': v = '#'
586                                         elif v == '%:%:': v = '##'
587                                 elif name == STR:
588                                         # remove the quotes around the string
589                                         v = v[1:-1]
590                                 ret.append((name, v))
591                                 break
592         return ret
593
594 @Utils.run_once
595 def define_name(line):
596         return re_mac.match(line).group(0)
597
598 class c_parser(object):
599         def __init__(self, nodepaths=None, defines=None):
600                 #self.lines = txt.split('\n')
601                 self.lines = []
602
603                 if defines is None:
604                         self.defs  = {}
605                 else:
606                         self.defs  = dict(defines) # make a copy
607                 self.state = []
608
609                 self.env   = None # needed for the variant when searching for files
610
611                 self.count_files = 0
612                 self.currentnode_stack = []
613
614                 self.nodepaths = nodepaths or []
615
616                 self.nodes = []
617                 self.names = []
618
619                 # file added
620                 self.curfile = ''
621                 self.ban_includes = set([])
622
623         def cached_find_resource(self, node, filename):
624                 try:
625                         nd = node.bld.cache_nd
626                 except:
627                         nd = node.bld.cache_nd = {}
628
629                 tup = (node.id, filename)
630                 try:
631                         return nd[tup]
632                 except KeyError:
633                         ret = node.find_resource(filename)
634                         nd[tup] = ret
635                         return ret
636
637         def tryfind(self, filename):
638                 self.curfile = filename
639
640                 # for msvc it should be a for loop on the whole stack
641                 found = self.cached_find_resource(self.currentnode_stack[-1], filename)
642
643                 for n in self.nodepaths:
644                         if found:
645                                 break
646                         found = self.cached_find_resource(n, filename)
647
648                 if found:
649                         self.nodes.append(found)
650                         if filename[-4:] != '.moc':
651                                 self.addlines(found)
652                 else:
653                         if not filename in self.names:
654                                 self.names.append(filename)
655                 return found
656
657         def addlines(self, node):
658
659                 self.currentnode_stack.append(node.parent)
660                 filepath = node.abspath(self.env)
661
662                 self.count_files += 1
663                 if self.count_files > recursion_limit: raise PreprocError("recursion limit exceeded")
664                 pc = self.parse_cache
665                 debug('preproc: reading file %r', filepath)
666                 try:
667                         lns = pc[filepath]
668                 except KeyError:
669                         pass
670                 else:
671                         self.lines.extend(lns)
672                         return
673
674                 try:
675                         lines = filter_comments(filepath)
676                         lines.append((POPFILE, ''))
677                         lines.reverse()
678                         pc[filepath] = lines # cache the lines filtered
679                         self.lines.extend(lines)
680                 except IOError:
681                         raise PreprocError("could not read the file %s" % filepath)
682                 except Exception:
683                         if Logs.verbose > 0:
684                                 error("parsing %s failed" % filepath)
685                                 traceback.print_exc()
686
687         def start(self, node, env):
688                 debug('preproc: scanning %s (in %s)', node.name, node.parent.name)
689
690                 self.env = env
691                 variant = node.variant(env)
692                 bld = node.__class__.bld
693                 try:
694                         self.parse_cache = bld.parse_cache
695                 except AttributeError:
696                         bld.parse_cache = {}
697                         self.parse_cache = bld.parse_cache
698
699                 self.addlines(node)
700                 if env['DEFLINES']:
701                         lst = [('define', x) for x in env['DEFLINES']]
702                         lst.reverse()
703                         self.lines.extend(lst)
704
705                 while self.lines:
706                         (kind, line) = self.lines.pop()
707                         if kind == POPFILE:
708                                 self.currentnode_stack.pop()
709                                 continue
710                         try:
711                                 self.process_line(kind, line)
712                         except Exception, e:
713                                 if Logs.verbose:
714                                         debug('preproc: line parsing failed (%s): %s %s', e, line, Utils.ex_stack())
715
716         def process_line(self, token, line):
717                 """
718                 WARNING: a new state must be added for if* because the endif
719                 """
720                 ve = Logs.verbose
721                 if ve: debug('preproc: line is %s - %s state is %s', token, line, self.state)
722                 state = self.state
723
724                 # make certain we define the state if we are about to enter in an if block
725                 if token in ['ifdef', 'ifndef', 'if']:
726                         state.append(undefined)
727                 elif token == 'endif':
728                         state.pop()
729
730                 # skip lines when in a dead 'if' branch, wait for the endif
731                 if not token in ['else', 'elif', 'endif']:
732                         if skipped in self.state or ignored in self.state:
733                                 return
734
735                 if token == 'if':
736                         ret = eval_macro(tokenize(line), self.defs)
737                         if ret: state[-1] = accepted
738                         else: state[-1] = ignored
739                 elif token == 'ifdef':
740                         m = re_mac.match(line)
741                         if m and m.group(0) in self.defs: state[-1] = accepted
742                         else: state[-1] = ignored
743                 elif token == 'ifndef':
744                         m = re_mac.match(line)
745                         if m and m.group(0) in self.defs: state[-1] = ignored
746                         else: state[-1] = accepted
747                 elif token == 'include' or token == 'import':
748                         (kind, inc) = extract_include(line, self.defs)
749                         if inc in self.ban_includes: return
750                         if token == 'import': self.ban_includes.add(inc)
751                         if ve: debug('preproc: include found %s    (%s) ', inc, kind)
752                         if kind == '"' or not strict_quotes:
753                                 self.tryfind(inc)
754                 elif token == 'elif':
755                         if state[-1] == accepted:
756                                 state[-1] = skipped
757                         elif state[-1] == ignored:
758                                 if eval_macro(tokenize(line), self.defs):
759                                         state[-1] = accepted
760                 elif token == 'else':
761                         if state[-1] == accepted: state[-1] = skipped
762                         elif state[-1] == ignored: state[-1] = accepted
763                 elif token == 'define':
764                         try:
765                                 self.defs[define_name(line)] = line
766                         except:
767                                 raise PreprocError("invalid define line %s" % line)
768                 elif token == 'undef':
769                         m = re_mac.match(line)
770                         if m and m.group(0) in self.defs:
771                                 self.defs.__delitem__(m.group(0))
772                                 #print "undef %s" % name
773                 elif token == 'pragma':
774                         if re_pragma_once.match(line.lower()):
775                                 self.ban_includes.add(self.curfile)
776
777 def get_deps(node, env, nodepaths=[]):
778         """
779         Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind
780         #include some_macro()
781         """
782
783         gruik = c_parser(nodepaths)
784         gruik.start(node, env)
785         return (gruik.nodes, gruik.names)
786
787 #################### dumb dependency scanner
788
789 re_inc = re.compile(\
790         '^[ \t]*(#|%:)[ \t]*(include)[ \t]*(.*)\r*$',
791         re.IGNORECASE | re.MULTILINE)
792
793 def lines_includes(filename):
794         code = Utils.readf(filename)
795         if use_trigraphs:
796                 for (a, b) in trig_def: code = code.split(a).join(b)
797         code = re_nl.sub('', code)
798         code = re_cpp.sub(repl, code)
799         return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)]
800
801 def get_deps_simple(node, env, nodepaths=[], defines={}):
802         """
803         Get the dependencies by just looking recursively at the #include statements
804         """
805
806         nodes = []
807         names = []
808
809         def find_deps(node):
810                 lst = lines_includes(node.abspath(env))
811
812                 for (_, line) in lst:
813                         (t, filename) = extract_include(line, defines)
814                         if filename in names:
815                                 continue
816
817                         if filename.endswith('.moc'):
818                                 names.append(filename)
819
820                         found = None
821                         for n in nodepaths:
822                                 if found:
823                                         break
824                                 found = n.find_resource(filename)
825
826                         if not found:
827                                 if not filename in names:
828                                         names.append(filename)
829                         elif not found in nodes:
830                                 nodes.append(found)
831                                 find_deps(node)
832
833         find_deps(node)
834         return (nodes, names)
835
836