blob: 1c48c4ba54570e45f887298ee9e4673315687b4d [file] [log] [blame]
[email protected]cb155a82011-11-29 17:25:341#!/usr/bin/env python
[email protected]81814bce2011-09-10 03:03:002# Copyright (c) 2011 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Implements a simple "negative compile" test for C++ on linux.
7
8Sometimes a C++ API needs to ensure that various usages cannot compile. To
9enable unittesting of these assertions, we use this python script to
10invoke gcc on a source file and assert that compilation fails.
11
12For more info, see:
13 http://dev.chromium.org/developers/testing/no-compile-tests
14"""
15
tzikb5f8f4b2016-02-16 02:34:4516import StringIO
[email protected]81814bce2011-09-10 03:03:0017import ast
18import locale
19import os
20import re
21import select
22import shlex
23import subprocess
24import sys
25import time
26
27
28# Matches lines that start with #if and have the substring TEST in the
29# conditional. Also extracts the comment. This allows us to search for
30# lines like the following:
31#
32# #ifdef NCTEST_NAME_OF_TEST // [r'expected output']
33# #if defined(NCTEST_NAME_OF_TEST) // [r'expected output']
34# #if NCTEST_NAME_OF_TEST // [r'expected output']
35# #elif NCTEST_NAME_OF_TEST // [r'expected output']
36# #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output']
37#
38# inside the unittest file.
39NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
40
41
42# Matches and removes the defined() preprocesor predicate. This is useful
43# for test cases that use the preprocessor if-statement form:
44#
45# #if defined(NCTEST_NAME_OF_TEST)
46#
47# Should be used to post-process the results found by NCTEST_CONFIG_RE.
48STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
49
50
51# Used to grab the expectation from comment at the end of an #ifdef. See
52# NCTEST_CONFIG_RE's comment for examples of what the format should look like.
53#
54# The extracted substring should be a python array of regular expressions.
55EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
56
57
58# The header for the result file so that it can be compiled.
59RESULT_FILE_HEADER = """
60// This file is generated by the no compile test from:
61// %s
62
63#include "base/logging.h"
64#include "testing/gtest/include/gtest/gtest.h"
65
66"""
67
68
69# The GUnit test function to output on a successful test completion.
70SUCCESS_GUNIT_TEMPLATE = """
71TEST(%s, %s) {
72 LOG(INFO) << "Took %f secs. Started at %f, ended at %f";
73}
74"""
75
76# The GUnit test function to output for a disabled test.
77DISABLED_GUNIT_TEMPLATE = """
78TEST(%s, %s) { }
79"""
80
81
82# Timeout constants.
83NCTEST_TERMINATE_TIMEOUT_SEC = 60
84NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
85BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
86
87
88def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path):
89 """Make sure the arguments being passed in are sane."""
90 assert parallelism >= 1
91 assert type(sourcefile_path) is str
92 assert type(cflags) is str
93 assert type(resultfile_path) is str
94
95
96def ParseExpectation(expectation_string):
97 """Extracts expectation definition from the trailing comment on the ifdef.
98
99 See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
100
101 Args:
102 expectation_string: A string like "// [r'some_regex']"
103
104 Returns:
105 A list of compiled regular expressions indicating all possible valid
106 compiler outputs. If the list is empty, all outputs are considered valid.
107 """
108 assert expectation_string is not None
109
110 match = EXTRACT_EXPECTATION_RE.match(expectation_string)
111 assert match
112
113 raw_expectation = ast.literal_eval(match.group(1))
114 assert type(raw_expectation) is list
115
116 expectation = []
117 for regex_str in raw_expectation:
118 assert type(regex_str) is str
119 expectation.append(re.compile(regex_str))
120 return expectation
121
122
tzikb5f8f4b2016-02-16 02:34:45123def ExtractTestConfigs(sourcefile_path, suite_name):
derat39e7fb12016-09-09 20:59:26124 """Parses the source file for test configurations.
[email protected]81814bce2011-09-10 03:03:00125
126 Each no-compile test in the file is separated by an ifdef macro. We scan
127 the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
128 they demark one no-compile test and try to extract the test configuration
129 from that.
130
131 Args:
132 sourcefile_path: The path to the source file.
tzikb5f8f4b2016-02-16 02:34:45133 suite_name: The name of the test suite.
[email protected]81814bce2011-09-10 03:03:00134
135 Returns:
136 A list of test configurations. Each test configuration is a dictionary of
137 the form:
138
139 { name: 'NCTEST_NAME'
140 suite_name: 'SOURCE_FILE_NAME'
141 expectations: [re.Pattern, re.Pattern] }
142
143 The |suite_name| is used to generate a pretty gtest output on successful
144 completion of the no compile test.
145
146 The compiled regexps in |expectations| define the valid outputs of the
147 compiler. If any one of the listed patterns matches either the stderr or
148 stdout from the compilation, and the compilation failed, then the test is
149 considered to have succeeded. If the list is empty, than we ignore the
150 compiler output and just check for failed compilation. If |expectations|
151 is actually None, then this specifies a compiler sanity check test, which
152 should expect a SUCCESSFUL compilation.
153 """
154 sourcefile = open(sourcefile_path, 'r')
155
[email protected]81814bce2011-09-10 03:03:00156 # Start with at least the compiler sanity test. You need to always have one
157 # sanity test to show that compiler flags and configuration are not just
158 # wrong. Otherwise, having a misconfigured compiler, or an error in the
159 # shared portions of the .nc file would cause all tests to erroneously pass.
tzikb5f8f4b2016-02-16 02:34:45160 test_configs = []
[email protected]81814bce2011-09-10 03:03:00161
162 for line in sourcefile:
163 match_result = NCTEST_CONFIG_RE.match(line)
164 if not match_result:
165 continue
166
167 groups = match_result.groups()
168
169 # Grab the name and remove the defined() predicate if there is one.
170 name = groups[0]
171 strip_result = STRIP_DEFINED_RE.match(name)
172 if strip_result:
173 name = strip_result.group(1)
174
175 # Read expectations if there are any.
176 test_configs.append({'name': name,
177 'suite_name': suite_name,
178 'expectations': ParseExpectation(groups[1])})
179 sourcefile.close()
180 return test_configs
181
182
183def StartTest(sourcefile_path, cflags, config):
184 """Start one negative compile test.
185
186 Args:
187 sourcefile_path: The path to the source file.
188 cflags: A string with all the CFLAGS to give to gcc. This string will be
189 split by shelex so be careful with escaping.
190 config: A dictionary describing the test. See ExtractTestConfigs
191 for a description of the config format.
192
193 Returns:
194 A dictionary containing all the information about the started test. The
195 fields in the dictionary are as follows:
196 { 'proc': A subprocess object representing the compiler run.
tzikb5f8f4b2016-02-16 02:34:45197 'cmdline': The executed command line.
[email protected]81814bce2011-09-10 03:03:00198 'name': The name of the test.
199 'suite_name': The suite name to use when generating the gunit test
200 result.
201 'terminate_timeout': The timestamp in seconds since the epoch after
202 which the test should be terminated.
203 'kill_timeout': The timestamp in seconds since the epoch after which
204 the test should be given a hard kill signal.
205 'started_at': A timestamp in seconds since the epoch for when this test
206 was started.
207 'aborted_at': A timestamp in seconds since the epoch for when this test
208 was aborted. If the test completed successfully,
209 this value is 0.
210 'finished_at': A timestamp in seconds since the epoch for when this
211 test was successfully complete. If the test is aborted,
212 or running, this value is 0.
213 'expectations': A dictionary with the test expectations. See
214 ParseExpectation() for the structure.
215 }
216 """
217 # TODO(ajwong): Get the compiler from gyp.
dchengb760d722014-11-03 21:29:30218 cmdline = [os.path.join(os.path.dirname(os.path.realpath(__file__)),
219 '../third_party/llvm-build/Release+Asserts/bin',
220 'clang++')]
[email protected]81814bce2011-09-10 03:03:00221 cmdline.extend(shlex.split(cflags))
222 name = config['name']
223 expectations = config['expectations']
224 if expectations is not None:
225 cmdline.append('-D%s' % name)
dchengb760d722014-11-03 21:29:30226 cmdline.extend(['-std=c++11', '-o', '/dev/null', '-c', '-x', 'c++',
227 sourcefile_path])
[email protected]81814bce2011-09-10 03:03:00228
229 process = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
230 stderr=subprocess.PIPE)
231 now = time.time()
232 return {'proc': process,
233 'cmdline': ' '.join(cmdline),
234 'name': name,
235 'suite_name': config['suite_name'],
236 'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
237 'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
238 'started_at': now,
239 'aborted_at': 0,
240 'finished_at': 0,
241 'expectations': expectations}
242
243
244def PassTest(resultfile, test):
245 """Logs the result of a test started by StartTest(), or a disabled test
246 configuration.
247
248 Args:
249 resultfile: File object for .cc file that results are written to.
250 test: An instance of the dictionary returned by StartTest(), a
251 configuration from ExtractTestConfigs().
252 """
253 # The 'started_at' key is only added if a test has been started.
254 if 'started_at' in test:
255 resultfile.write(SUCCESS_GUNIT_TEMPLATE % (
256 test['suite_name'], test['name'],
257 test['finished_at'] - test['started_at'],
258 test['started_at'], test['finished_at']))
259 else:
260 resultfile.write(DISABLED_GUNIT_TEMPLATE % (
261 test['suite_name'], test['name']))
262
263
264def FailTest(resultfile, test, error, stdout=None, stderr=None):
265 """Logs the result of a test started by StartTest()
266
267 Args:
268 resultfile: File object for .cc file that results are written to.
269 test: An instance of the dictionary returned by StartTest()
270 error: The printable reason for the failure.
271 stdout: The test's output to stdout.
272 stderr: The test's output to stderr.
273 """
[email protected]2d72cfe2013-01-23 23:49:55274 resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error))
275 resultfile.write('#error "compile line: %s"\n' % test['cmdline'])
[email protected]81814bce2011-09-10 03:03:00276 if stdout and len(stdout) != 0:
[email protected]2d72cfe2013-01-23 23:49:55277 resultfile.write('#error "%s stdout:"\n' % test['name'])
[email protected]81814bce2011-09-10 03:03:00278 for line in stdout.split('\n'):
[email protected]2d72cfe2013-01-23 23:49:55279 resultfile.write('#error " %s:"\n' % line)
[email protected]81814bce2011-09-10 03:03:00280
281 if stderr and len(stderr) != 0:
[email protected]2d72cfe2013-01-23 23:49:55282 resultfile.write('#error "%s stderr:"\n' % test['name'])
[email protected]81814bce2011-09-10 03:03:00283 for line in stderr.split('\n'):
[email protected]2d72cfe2013-01-23 23:49:55284 resultfile.write('#error " %s"\n' % line)
[email protected]81814bce2011-09-10 03:03:00285 resultfile.write('\n')
286
287
288def WriteStats(resultfile, suite_name, timings):
289 """Logs the peformance timings for each stage of the script into a fake test.
290
291 Args:
292 resultfile: File object for .cc file that results are written to.
293 suite_name: The name of the GUnit suite this test belongs to.
294 timings: Dictionary with timestamps for each stage of the script run.
295 """
296 stats_template = ("Started %f, Ended %f, Total %fs, Extract %fs, "
297 "Compile %fs, Process %fs")
298 total_secs = timings['results_processed'] - timings['started']
299 extract_secs = timings['extract_done'] - timings['started']
300 compile_secs = timings['compile_done'] - timings['extract_done']
301 process_secs = timings['results_processed'] - timings['compile_done']
302 resultfile.write('TEST(%s, Stats) { LOG(INFO) << "%s"; }\n' % (
303 suite_name, stats_template % (
304 timings['started'], timings['results_processed'], total_secs,
305 extract_secs, compile_secs, process_secs)))
306
307
308def ProcessTestResult(resultfile, test):
309 """Interprets and logs the result of a test started by StartTest()
310
311 Args:
312 resultfile: File object for .cc file that results are written to.
313 test: The dictionary from StartTest() to process.
314 """
315 # Snap a copy of stdout and stderr into the test dictionary immediately
316 # cause we can only call this once on the Popen object, and lots of stuff
317 # below will want access to it.
318 proc = test['proc']
319 (stdout, stderr) = proc.communicate()
320
321 if test['aborted_at'] != 0:
322 FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
323 (test['started_at'], test['aborted_at']))
324 return
325
tzikb5f8f4b2016-02-16 02:34:45326 if proc.poll() == 0:
[email protected]81814bce2011-09-10 03:03:00327 # Handle failure due to successful compile.
328 FailTest(resultfile, test,
329 'Unexpected successful compilation.',
330 stdout, stderr)
331 return
332 else:
333 # Check the output has the right expectations. If there are no
334 # expectations, then we just consider the output "matched" by default.
335 if len(test['expectations']) == 0:
336 PassTest(resultfile, test)
337 return
338
339 # Otherwise test against all expectations.
340 for regexp in test['expectations']:
341 if (regexp.search(stdout) is not None or
342 regexp.search(stderr) is not None):
343 PassTest(resultfile, test)
344 return
345 expectation_str = ', '.join(
346 ["r'%s'" % regexp.pattern for regexp in test['expectations']])
347 FailTest(resultfile, test,
348 'Expectations [%s] did not match output.' % expectation_str,
349 stdout, stderr)
350 return
351
352
353def CompleteAtLeastOneTest(resultfile, executing_tests):
354 """Blocks until at least one task is removed from executing_tests.
355
356 This function removes completed tests from executing_tests, logging failures
357 and output. If no tests can be removed, it will enter a poll-loop until one
358 test finishes or times out. On a timeout, this function is responsible for
359 terminating the process in the appropriate fashion.
360
361 Args:
362 executing_tests: A dict mapping a string containing the test name to the
363 test dict return from StartTest().
364
365 Returns:
366 A list of tests that have finished.
367 """
368 finished_tests = []
369 busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
370 while len(finished_tests) == 0:
371 # If we don't make progress for too long, assume the code is just dead.
372 assert busy_loop_timeout > time.time()
373
374 # Select on the output pipes.
375 read_set = []
376 for test in executing_tests.values():
377 read_set.extend([test['proc'].stderr, test['proc'].stdout])
378 result = select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
379
380 # Now attempt to process results.
381 now = time.time()
382 for test in executing_tests.values():
383 proc = test['proc']
384 if proc.poll() is not None:
385 test['finished_at'] = now
386 finished_tests.append(test)
387 elif test['terminate_timeout'] < now:
388 proc.terminate()
389 test['aborted_at'] = now
390 elif test['kill_timeout'] < now:
391 proc.kill()
392 test['aborted_at'] = now
393
394 for test in finished_tests:
395 del executing_tests[test['name']]
396 return finished_tests
397
398
399def main():
400 if len(sys.argv) != 5:
401 print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' %
402 sys.argv[0])
403 sys.exit(1)
404
405 # Force us into the "C" locale so the compiler doesn't localize its output.
406 # In particular, this stops gcc from using smart quotes when in english UTF-8
407 # locales. This makes the expectation writing much easier.
408 os.environ['LC_ALL'] = 'C'
409
410 parallelism = int(sys.argv[1])
411 sourcefile_path = sys.argv[2]
412 cflags = sys.argv[3]
413 resultfile_path = sys.argv[4]
414
415 timings = {'started': time.time()}
416
417 ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path)
418
tzikb5f8f4b2016-02-16 02:34:45419 # Convert filename from underscores to CamelCase.
420 words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
421 words = [w.capitalize() for w in words]
422 suite_name = 'NoCompile' + ''.join(words)
423
424 test_configs = ExtractTestConfigs(sourcefile_path, suite_name)
[email protected]81814bce2011-09-10 03:03:00425 timings['extract_done'] = time.time()
426
tzikb5f8f4b2016-02-16 02:34:45427 resultfile = StringIO.StringIO()
[email protected]81814bce2011-09-10 03:03:00428 resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
429
430 # Run the no-compile tests, but ensure we do not run more than |parallelism|
431 # tests at once.
432 timings['header_written'] = time.time()
433 executing_tests = {}
434 finished_tests = []
tzikb5f8f4b2016-02-16 02:34:45435
436 test = StartTest(
437 sourcefile_path,
438 cflags + ' -MMD -MF %s.d -MT %s' % (resultfile_path, resultfile_path),
439 { 'name': 'NCTEST_SANITY',
440 'suite_name': suite_name,
441 'expectations': None,
442 })
443 executing_tests[test['name']] = test
444
[email protected]81814bce2011-09-10 03:03:00445 for config in test_configs:
446 # CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
447 # acts as a semaphore. We cannot use threads + a real semaphore because
448 # subprocess forks, which can cause all sorts of hilarity with threads.
449 if len(executing_tests) >= parallelism:
450 finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests))
451
452 if config['name'].startswith('DISABLED_'):
453 PassTest(resultfile, config)
454 else:
455 test = StartTest(sourcefile_path, cflags, config)
456 assert test['name'] not in executing_tests
457 executing_tests[test['name']] = test
458
459 # If there are no more test to start, we still need to drain the running
460 # ones.
461 while len(executing_tests) > 0:
462 finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests))
463 timings['compile_done'] = time.time()
464
465 for test in finished_tests:
tzikb5f8f4b2016-02-16 02:34:45466 if test['name'] == 'NCTEST_SANITY':
467 _, stderr = test['proc'].communicate()
468 return_code = test['proc'].poll()
469 if return_code != 0:
470 sys.stderr.write(stderr)
471 continue
[email protected]81814bce2011-09-10 03:03:00472 ProcessTestResult(resultfile, test)
473 timings['results_processed'] = time.time()
474
tzikb5f8f4b2016-02-16 02:34:45475 WriteStats(resultfile, suite_name, timings)
476
477 if return_code == 0:
478 with open(resultfile_path, 'w') as fd:
479 fd.write(resultfile.getvalue())
[email protected]81814bce2011-09-10 03:03:00480
481 resultfile.close()
tzikb5f8f4b2016-02-16 02:34:45482 sys.exit(return_code)
[email protected]81814bce2011-09-10 03:03:00483
484
485if __name__ == '__main__':
486 main()