3 # WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
7 # Carlos Rafael Giani, 2006
8 # Thomas Nagy, 2010-2018 (ita)
11 Unit testing system for C/C++/D and interpreted languages providing test execution:
13 * in parallel, by using ``waf -j``
14 * partial (only the tests that have changed) or full (by using ``waf --alltests``)
16 The tests are declared by adding the **test** feature to programs::
19 opt.load('compiler_cxx waf_unit_test')
21 conf.load('compiler_cxx waf_unit_test')
23 bld(features='cxx cxxprogram test', source='main.cpp', target='app')
25 bld.program(features='test', source='main2.cpp', target='app2')
27 When the build is executed, the program 'test' will be built and executed without arguments.
28 The success/failure is detected by looking at the return code. The status and the standard output/error
29 are stored on the build context.
31 The results can be displayed by registering a callback function. Here is how to call
32 the predefined callback::
35 bld(features='cxx cxxprogram test', source='main.c', target='app')
36 from waflib.Tools import waf_unit_test
37 bld.add_post_fun(waf_unit_test.summary)
39 By passing --dump-test-scripts the build outputs corresponding python files
40 (with extension _run.py) that are useful for debugging purposes.
44 from waflib.TaskGen import feature, after_method, taskgen_method
45 from waflib import Utils, Task, Logs, Options
46 from waflib.Tools import ccroot
47 testlock = Utils.threading.Lock()
49 SCRIPT_TEMPLATE = """#! %(python)s
50 import subprocess, sys
52 # if you want to debug with gdb:
53 #cmd = ['gdb', '-args'] + cmd
55 status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str))
60 def handle_ut_cwd(self, key):
62 Task generator method, used internally to limit code duplication.
63 This method may disappear anytime.
65 cwd = getattr(self, key, None)
67 if isinstance(cwd, str):
68 # we want a Node instance
69 if os.path.isabs(cwd):
70 self.ut_cwd = self.bld.root.make_node(cwd)
72 self.ut_cwd = self.path.make_node(cwd)
74 @feature('test_scripts')
75 def make_interpreted_test(self):
76 """Create interpreted unit tests."""
77 for x in ['test_scripts_source', 'test_scripts_template']:
78 if not hasattr(self, x):
79 Logs.warn('a test_scripts taskgen i missing %s' % x)
82 self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False))
84 script_nodes = self.to_nodes(self.test_scripts_source)
85 for script_node in script_nodes:
86 tsk = self.create_task('utest', [script_node])
87 tsk.vars = lst + tsk.vars
88 tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd())
90 self.handle_ut_cwd('test_scripts_cwd')
92 env = getattr(self, 'test_scripts_env', None)
96 self.ut_env = dict(os.environ)
98 paths = getattr(self, 'test_scripts_paths', {})
99 for (k,v) in paths.items():
100 p = self.ut_env.get(k, '').split(os.pathsep)
101 if isinstance(v, str):
102 v = v.split(os.pathsep)
103 self.ut_env[k] = os.pathsep.join(p + v)
106 @after_method('apply_link', 'process_use')
108 """Create the unit test task. There can be only one unit test task by task generator."""
109 if not getattr(self, 'link_task', None):
112 tsk = self.create_task('utest', self.link_task.outputs)
113 if getattr(self, 'ut_str', None):
114 self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False))
115 tsk.vars = lst + tsk.vars
117 self.handle_ut_cwd('ut_cwd')
119 if not hasattr(self, 'ut_paths'):
121 for x in self.tmp_use_sorted:
123 y = self.bld.get_tgen_by_name(x).link_task
124 except AttributeError:
127 if not isinstance(y, ccroot.stlink_task):
128 paths.append(y.outputs[0].parent.abspath())
129 self.ut_paths = os.pathsep.join(paths) + os.pathsep
131 if not hasattr(self, 'ut_env'):
132 self.ut_env = dct = dict(os.environ)
134 dct[var] = self.ut_paths + dct.get(var,'')
137 elif Utils.unversioned_sys_platform() == 'darwin':
138 add_path('DYLD_LIBRARY_PATH')
139 add_path('LD_LIBRARY_PATH')
141 add_path('LD_LIBRARY_PATH')
143 if not hasattr(self, 'ut_cmd'):
144 self.ut_cmd = getattr(Options.options, 'testcmd', False)
147 def add_test_results(self, tup):
148 """Override and return tup[1] to interrupt the build immediately if a test does not run"""
149 Logs.debug("ut: %r", tup)
151 self.utest_results.append(tup)
152 except AttributeError:
153 self.utest_results = [tup]
155 self.bld.utest_results.append(tup)
156 except AttributeError:
157 self.bld.utest_results = [tup]
159 class utest(Task.Task):
164 after = ['vnum', 'inst']
167 def runnable_status(self):
169 Always execute the task if `waf --alltests` was used or no
170 tests if ``waf --notests`` was used
172 if getattr(Options.options, 'no_tests', False):
175 ret = super(utest, self).runnable_status()
176 if ret == Task.SKIP_ME:
177 if getattr(Options.options, 'all_tests', False):
181 def get_test_env(self):
183 In general, tests may require any library built anywhere in the project.
184 Override this method if fewer paths are needed
186 return self.generator.ut_env
189 super(utest, self).post_run()
190 if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]:
191 self.generator.bld.task_sigs[self.uid()] = None
195 Execute the test. The execution is always successful, and the results
196 are stored on ``self.generator.bld.utest_results`` for postprocessing.
198 Override ``add_test_results`` to interrupt the build
200 if hasattr(self.generator, 'ut_run'):
201 return self.generator.ut_run(self)
203 self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()])
204 ut_cmd = getattr(self.generator, 'ut_cmd', False)
206 self.ut_exec = shlex.split(ut_cmd % ' '.join(self.ut_exec))
208 return self.exec_command(self.ut_exec)
210 def exec_command(self, cmd, **kw):
211 Logs.debug('runner: %r', cmd)
212 if getattr(Options.options, 'dump_test_scripts', False):
213 script_code = SCRIPT_TEMPLATE % {
214 'python': sys.executable,
215 'env': self.get_test_env(),
216 'cwd': self.get_cwd().abspath(),
219 script_file = self.inputs[0].abspath() + '_run.py'
220 Utils.writef(script_file, script_code)
221 os.chmod(script_file, Utils.O755)
223 Logs.info('Test debug file written as %r' % script_file)
225 proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(),
226 stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str))
227 (stdout, stderr) = proc.communicate()
228 self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr)
231 return self.generator.add_test_results(tup)
236 return getattr(self.generator, 'ut_cwd', self.inputs[0].parent)
238 def sig_explicit_deps(self):
239 lst = [os.stat(node.abspath()).st_mtime for node in self.inputs]
240 self.m.update(str(lst))
241 return super(utest, self).sig_explicit_deps()
245 Display an execution summary::
248 bld(features='cxx cxxprogram test', source='main.c', target='app')
249 from waflib.Tools import waf_unit_test
250 bld.add_post_fun(waf_unit_test.summary)
252 lst = getattr(bld, 'utest_results', [])
254 Logs.pprint('CYAN', 'execution summary')
257 tfail = len([x for x in lst if x[1]])
259 Logs.pprint('GREEN', ' tests that pass %d/%d' % (total-tfail, total))
260 for (f, code, out, err) in lst:
262 Logs.pprint('GREEN', ' %s' % f)
264 Logs.pprint('GREEN' if tfail == 0 else 'RED', ' tests that fail %d/%d' % (tfail, total))
265 for (f, code, out, err) in lst:
267 Logs.pprint('RED', ' %s' % f)
269 def set_exit_code(bld):
271 If any of the tests fail waf will exit with that exit code.
272 This is useful if you have an automated build system which need
273 to report on errors from the tests.
274 You may use it like this:
277 bld(features='cxx cxxprogram test', source='main.c', target='app')
278 from waflib.Tools import waf_unit_test
279 bld.add_post_fun(waf_unit_test.set_exit_code)
281 lst = getattr(bld, 'utest_results', [])
282 for (f, code, out, err) in lst:
286 msg.append('stdout:%s%s' % (os.linesep, out.decode('utf-8')))
288 msg.append('stderr:%s%s' % (os.linesep, err.decode('utf-8')))
289 bld.fatal(os.linesep.join(msg))
294 Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options.
296 opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests')
297 opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests')
298 opt.add_option('--clear-failed', action='store_true', default=False,
299 help='Force failed unit tests to run again next time', dest='clear_failed_tests')
300 opt.add_option('--testcmd', action='store', default=False, dest='testcmd',
301 help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind')
302 opt.add_option('--dump-test-scripts', action='store_true', default=False,
303 help='Create python scripts to help debug tests', dest='dump_test_scripts')