[email protected] | cb155a8 | 2011-11-29 17:25:34 | [diff] [blame] | 1 | #!/usr/bin/env python |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 2 | # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 3 | # Use of this source code is governed by a BSD-style license that can be |
| 4 | # found in the LICENSE file. |
| 5 | |
| 6 | """Implements a simple "negative compile" test for C++ on linux. |
| 7 | |
| 8 | Sometimes a C++ API needs to ensure that various usages cannot compile. To |
| 9 | enable unittesting of these assertions, we use this python script to |
| 10 | invoke gcc on a source file and assert that compilation fails. |
| 11 | |
| 12 | For more info, see: |
| 13 | http://dev.chromium.org/developers/testing/no-compile-tests |
| 14 | """ |
| 15 | |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 16 | import StringIO |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 17 | import ast |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 18 | import os |
| 19 | import re |
| 20 | import select |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 21 | import subprocess |
| 22 | import sys |
| 23 | import time |
| 24 | |
| 25 | |
| 26 | # Matches lines that start with #if and have the substring TEST in the |
| 27 | # conditional. Also extracts the comment. This allows us to search for |
| 28 | # lines like the following: |
| 29 | # |
| 30 | # #ifdef NCTEST_NAME_OF_TEST // [r'expected output'] |
| 31 | # #if defined(NCTEST_NAME_OF_TEST) // [r'expected output'] |
| 32 | # #if NCTEST_NAME_OF_TEST // [r'expected output'] |
| 33 | # #elif NCTEST_NAME_OF_TEST // [r'expected output'] |
| 34 | # #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output'] |
| 35 | # |
| 36 | # inside the unittest file. |
| 37 | NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?') |
| 38 | |
| 39 | |
| 40 | # Matches and removes the defined() preprocesor predicate. This is useful |
| 41 | # for test cases that use the preprocessor if-statement form: |
| 42 | # |
| 43 | # #if defined(NCTEST_NAME_OF_TEST) |
| 44 | # |
| 45 | # Should be used to post-process the results found by NCTEST_CONFIG_RE. |
| 46 | STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)') |
| 47 | |
| 48 | |
| 49 | # Used to grab the expectation from comment at the end of an #ifdef. See |
| 50 | # NCTEST_CONFIG_RE's comment for examples of what the format should look like. |
| 51 | # |
| 52 | # The extracted substring should be a python array of regular expressions. |
| 53 | EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])') |
| 54 | |
| 55 | |
| 56 | # The header for the result file so that it can be compiled. |
| 57 | RESULT_FILE_HEADER = """ |
| 58 | // This file is generated by the no compile test from: |
| 59 | // %s |
| 60 | |
| 61 | #include "base/logging.h" |
| 62 | #include "testing/gtest/include/gtest/gtest.h" |
| 63 | |
| 64 | """ |
| 65 | |
| 66 | |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 67 | # The log message on a test completion. |
| 68 | LOG_TEMPLATE = """ |
| 69 | TEST(%s, %s) took %f secs. Started at %f, ended at %f. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 70 | """ |
| 71 | |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 72 | # The GUnit test function to output for a successful or disabled test. |
| 73 | GUNIT_TEMPLATE = """ |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 74 | TEST(%s, %s) { } |
| 75 | """ |
| 76 | |
| 77 | |
| 78 | # Timeout constants. |
| 79 | NCTEST_TERMINATE_TIMEOUT_SEC = 60 |
| 80 | NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2 |
| 81 | BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2 |
| 82 | |
| 83 | |
| 84 | def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path): |
| 85 | """Make sure the arguments being passed in are sane.""" |
| 86 | assert parallelism >= 1 |
| 87 | assert type(sourcefile_path) is str |
Trent Apted | 71169bb | 2017-07-28 00:12:10 | [diff] [blame] | 88 | assert type(cflags) is list |
| 89 | for flag in cflags: |
| 90 | assert(type(flag) is str) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 91 | assert type(resultfile_path) is str |
| 92 | |
| 93 | |
| 94 | def ParseExpectation(expectation_string): |
| 95 | """Extracts expectation definition from the trailing comment on the ifdef. |
| 96 | |
| 97 | See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing. |
| 98 | |
| 99 | Args: |
| 100 | expectation_string: A string like "// [r'some_regex']" |
| 101 | |
| 102 | Returns: |
| 103 | A list of compiled regular expressions indicating all possible valid |
| 104 | compiler outputs. If the list is empty, all outputs are considered valid. |
| 105 | """ |
| 106 | assert expectation_string is not None |
| 107 | |
| 108 | match = EXTRACT_EXPECTATION_RE.match(expectation_string) |
| 109 | assert match |
| 110 | |
| 111 | raw_expectation = ast.literal_eval(match.group(1)) |
| 112 | assert type(raw_expectation) is list |
| 113 | |
| 114 | expectation = [] |
| 115 | for regex_str in raw_expectation: |
| 116 | assert type(regex_str) is str |
| 117 | expectation.append(re.compile(regex_str)) |
| 118 | return expectation |
| 119 | |
| 120 | |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 121 | def ExtractTestConfigs(sourcefile_path, suite_name): |
derat | 39e7fb1 | 2016-09-09 20:59:26 | [diff] [blame] | 122 | """Parses the source file for test configurations. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 123 | |
| 124 | Each no-compile test in the file is separated by an ifdef macro. We scan |
| 125 | the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like |
| 126 | they demark one no-compile test and try to extract the test configuration |
| 127 | from that. |
| 128 | |
| 129 | Args: |
| 130 | sourcefile_path: The path to the source file. |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 131 | suite_name: The name of the test suite. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 132 | |
| 133 | Returns: |
| 134 | A list of test configurations. Each test configuration is a dictionary of |
| 135 | the form: |
| 136 | |
| 137 | { name: 'NCTEST_NAME' |
| 138 | suite_name: 'SOURCE_FILE_NAME' |
| 139 | expectations: [re.Pattern, re.Pattern] } |
| 140 | |
| 141 | The |suite_name| is used to generate a pretty gtest output on successful |
| 142 | completion of the no compile test. |
| 143 | |
| 144 | The compiled regexps in |expectations| define the valid outputs of the |
| 145 | compiler. If any one of the listed patterns matches either the stderr or |
| 146 | stdout from the compilation, and the compilation failed, then the test is |
| 147 | considered to have succeeded. If the list is empty, than we ignore the |
| 148 | compiler output and just check for failed compilation. If |expectations| |
| 149 | is actually None, then this specifies a compiler sanity check test, which |
| 150 | should expect a SUCCESSFUL compilation. |
| 151 | """ |
| 152 | sourcefile = open(sourcefile_path, 'r') |
| 153 | |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 154 | # Start with at least the compiler sanity test. You need to always have one |
| 155 | # sanity test to show that compiler flags and configuration are not just |
| 156 | # wrong. Otherwise, having a misconfigured compiler, or an error in the |
| 157 | # shared portions of the .nc file would cause all tests to erroneously pass. |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 158 | test_configs = [] |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 159 | |
| 160 | for line in sourcefile: |
| 161 | match_result = NCTEST_CONFIG_RE.match(line) |
| 162 | if not match_result: |
| 163 | continue |
| 164 | |
| 165 | groups = match_result.groups() |
| 166 | |
| 167 | # Grab the name and remove the defined() predicate if there is one. |
| 168 | name = groups[0] |
| 169 | strip_result = STRIP_DEFINED_RE.match(name) |
| 170 | if strip_result: |
| 171 | name = strip_result.group(1) |
| 172 | |
| 173 | # Read expectations if there are any. |
| 174 | test_configs.append({'name': name, |
| 175 | 'suite_name': suite_name, |
| 176 | 'expectations': ParseExpectation(groups[1])}) |
| 177 | sourcefile.close() |
| 178 | return test_configs |
| 179 | |
| 180 | |
| 181 | def StartTest(sourcefile_path, cflags, config): |
| 182 | """Start one negative compile test. |
| 183 | |
| 184 | Args: |
| 185 | sourcefile_path: The path to the source file. |
Trent Apted | 71169bb | 2017-07-28 00:12:10 | [diff] [blame] | 186 | cflags: An array of strings with all the CFLAGS to give to gcc. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 187 | config: A dictionary describing the test. See ExtractTestConfigs |
| 188 | for a description of the config format. |
| 189 | |
| 190 | Returns: |
| 191 | A dictionary containing all the information about the started test. The |
| 192 | fields in the dictionary are as follows: |
| 193 | { 'proc': A subprocess object representing the compiler run. |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 194 | 'cmdline': The executed command line. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 195 | 'name': The name of the test. |
| 196 | 'suite_name': The suite name to use when generating the gunit test |
| 197 | result. |
| 198 | 'terminate_timeout': The timestamp in seconds since the epoch after |
| 199 | which the test should be terminated. |
| 200 | 'kill_timeout': The timestamp in seconds since the epoch after which |
| 201 | the test should be given a hard kill signal. |
| 202 | 'started_at': A timestamp in seconds since the epoch for when this test |
| 203 | was started. |
| 204 | 'aborted_at': A timestamp in seconds since the epoch for when this test |
| 205 | was aborted. If the test completed successfully, |
| 206 | this value is 0. |
| 207 | 'finished_at': A timestamp in seconds since the epoch for when this |
| 208 | test was successfully complete. If the test is aborted, |
| 209 | or running, this value is 0. |
| 210 | 'expectations': A dictionary with the test expectations. See |
| 211 | ParseExpectation() for the structure. |
| 212 | } |
| 213 | """ |
| 214 | # TODO(ajwong): Get the compiler from gyp. |
dcheng | b760d72 | 2014-11-03 21:29:30 | [diff] [blame] | 215 | cmdline = [os.path.join(os.path.dirname(os.path.realpath(__file__)), |
| 216 | '../third_party/llvm-build/Release+Asserts/bin', |
| 217 | 'clang++')] |
Trent Apted | 71169bb | 2017-07-28 00:12:10 | [diff] [blame] | 218 | cmdline.extend(cflags) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 219 | name = config['name'] |
| 220 | expectations = config['expectations'] |
| 221 | if expectations is not None: |
| 222 | cmdline.append('-D%s' % name) |
tzik | cffd16b | 2017-08-01 00:23:32 | [diff] [blame] | 223 | cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++', |
dcheng | b760d72 | 2014-11-03 21:29:30 | [diff] [blame] | 224 | sourcefile_path]) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 225 | |
| 226 | process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, |
| 227 | stderr=subprocess.PIPE) |
| 228 | now = time.time() |
| 229 | return {'proc': process, |
| 230 | 'cmdline': ' '.join(cmdline), |
| 231 | 'name': name, |
| 232 | 'suite_name': config['suite_name'], |
| 233 | 'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC, |
| 234 | 'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC, |
| 235 | 'started_at': now, |
| 236 | 'aborted_at': 0, |
| 237 | 'finished_at': 0, |
| 238 | 'expectations': expectations} |
| 239 | |
| 240 | |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 241 | def PassTest(resultfile, resultlog, test): |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 242 | """Logs the result of a test started by StartTest(), or a disabled test |
| 243 | configuration. |
| 244 | |
| 245 | Args: |
| 246 | resultfile: File object for .cc file that results are written to. |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 247 | resultlog: File object for the log file. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 248 | test: An instance of the dictionary returned by StartTest(), a |
| 249 | configuration from ExtractTestConfigs(). |
| 250 | """ |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 251 | resultfile.write(GUNIT_TEMPLATE % ( |
| 252 | test['suite_name'], test['name'])) |
| 253 | |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 254 | # The 'started_at' key is only added if a test has been started. |
| 255 | if 'started_at' in test: |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 256 | resultlog.write(LOG_TEMPLATE % ( |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 257 | test['suite_name'], test['name'], |
| 258 | test['finished_at'] - test['started_at'], |
| 259 | test['started_at'], test['finished_at'])) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 260 | |
| 261 | |
| 262 | def FailTest(resultfile, test, error, stdout=None, stderr=None): |
| 263 | """Logs the result of a test started by StartTest() |
| 264 | |
| 265 | Args: |
| 266 | resultfile: File object for .cc file that results are written to. |
| 267 | test: An instance of the dictionary returned by StartTest() |
| 268 | error: The printable reason for the failure. |
| 269 | stdout: The test's output to stdout. |
| 270 | stderr: The test's output to stderr. |
| 271 | """ |
[email protected] | 2d72cfe | 2013-01-23 23:49:55 | [diff] [blame] | 272 | resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error)) |
| 273 | resultfile.write('#error "compile line: %s"\n' % test['cmdline']) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 274 | if stdout and len(stdout) != 0: |
[email protected] | 2d72cfe | 2013-01-23 23:49:55 | [diff] [blame] | 275 | resultfile.write('#error "%s stdout:"\n' % test['name']) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 276 | for line in stdout.split('\n'): |
[email protected] | 2d72cfe | 2013-01-23 23:49:55 | [diff] [blame] | 277 | resultfile.write('#error " %s:"\n' % line) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 278 | |
| 279 | if stderr and len(stderr) != 0: |
[email protected] | 2d72cfe | 2013-01-23 23:49:55 | [diff] [blame] | 280 | resultfile.write('#error "%s stderr:"\n' % test['name']) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 281 | for line in stderr.split('\n'): |
[email protected] | 2d72cfe | 2013-01-23 23:49:55 | [diff] [blame] | 282 | resultfile.write('#error " %s"\n' % line) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 283 | resultfile.write('\n') |
| 284 | |
| 285 | |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 286 | def WriteStats(resultlog, suite_name, timings): |
| 287 | """Logs the peformance timings for each stage of the script. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 288 | |
| 289 | Args: |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 290 | resultlog: File object for the log file. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 291 | suite_name: The name of the GUnit suite this test belongs to. |
| 292 | timings: Dictionary with timestamps for each stage of the script run. |
| 293 | """ |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 294 | stats_template = """ |
| 295 | TEST(%s): Started %f, Ended %f, Total %fs, Extract %fs, Compile %fs, Process %fs |
| 296 | """ |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 297 | total_secs = timings['results_processed'] - timings['started'] |
| 298 | extract_secs = timings['extract_done'] - timings['started'] |
| 299 | compile_secs = timings['compile_done'] - timings['extract_done'] |
| 300 | process_secs = timings['results_processed'] - timings['compile_done'] |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 301 | resultlog.write(stats_template % ( |
| 302 | suite_name, timings['started'], timings['results_processed'], total_secs, |
| 303 | extract_secs, compile_secs, process_secs)) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 304 | |
| 305 | |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 306 | def ProcessTestResult(resultfile, resultlog, test): |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 307 | """Interprets and logs the result of a test started by StartTest() |
| 308 | |
| 309 | Args: |
| 310 | resultfile: File object for .cc file that results are written to. |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 311 | resultlog: File object for the log file. |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 312 | test: The dictionary from StartTest() to process. |
| 313 | """ |
| 314 | # Snap a copy of stdout and stderr into the test dictionary immediately |
| 315 | # cause we can only call this once on the Popen object, and lots of stuff |
| 316 | # below will want access to it. |
| 317 | proc = test['proc'] |
| 318 | (stdout, stderr) = proc.communicate() |
| 319 | |
| 320 | if test['aborted_at'] != 0: |
| 321 | FailTest(resultfile, test, "Compile timed out. Started %f ended %f." % |
| 322 | (test['started_at'], test['aborted_at'])) |
| 323 | return |
| 324 | |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 325 | if proc.poll() == 0: |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 326 | # Handle failure due to successful compile. |
| 327 | FailTest(resultfile, test, |
| 328 | 'Unexpected successful compilation.', |
| 329 | stdout, stderr) |
| 330 | return |
| 331 | else: |
| 332 | # Check the output has the right expectations. If there are no |
| 333 | # expectations, then we just consider the output "matched" by default. |
| 334 | if len(test['expectations']) == 0: |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 335 | PassTest(resultfile, resultlog, test) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 336 | return |
| 337 | |
| 338 | # Otherwise test against all expectations. |
| 339 | for regexp in test['expectations']: |
| 340 | if (regexp.search(stdout) is not None or |
| 341 | regexp.search(stderr) is not None): |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 342 | PassTest(resultfile, resultlog, test) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 343 | return |
| 344 | expectation_str = ', '.join( |
| 345 | ["r'%s'" % regexp.pattern for regexp in test['expectations']]) |
| 346 | FailTest(resultfile, test, |
| 347 | 'Expectations [%s] did not match output.' % expectation_str, |
| 348 | stdout, stderr) |
| 349 | return |
| 350 | |
| 351 | |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 352 | def CompleteAtLeastOneTest(executing_tests): |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 353 | """Blocks until at least one task is removed from executing_tests. |
| 354 | |
| 355 | This function removes completed tests from executing_tests, logging failures |
| 356 | and output. If no tests can be removed, it will enter a poll-loop until one |
| 357 | test finishes or times out. On a timeout, this function is responsible for |
| 358 | terminating the process in the appropriate fashion. |
| 359 | |
| 360 | Args: |
| 361 | executing_tests: A dict mapping a string containing the test name to the |
| 362 | test dict return from StartTest(). |
| 363 | |
| 364 | Returns: |
| 365 | A list of tests that have finished. |
| 366 | """ |
| 367 | finished_tests = [] |
| 368 | busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC |
| 369 | while len(finished_tests) == 0: |
| 370 | # If we don't make progress for too long, assume the code is just dead. |
| 371 | assert busy_loop_timeout > time.time() |
| 372 | |
| 373 | # Select on the output pipes. |
| 374 | read_set = [] |
| 375 | for test in executing_tests.values(): |
| 376 | read_set.extend([test['proc'].stderr, test['proc'].stdout]) |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 377 | select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 378 | |
| 379 | # Now attempt to process results. |
| 380 | now = time.time() |
| 381 | for test in executing_tests.values(): |
| 382 | proc = test['proc'] |
| 383 | if proc.poll() is not None: |
| 384 | test['finished_at'] = now |
| 385 | finished_tests.append(test) |
| 386 | elif test['terminate_timeout'] < now: |
| 387 | proc.terminate() |
| 388 | test['aborted_at'] = now |
| 389 | elif test['kill_timeout'] < now: |
| 390 | proc.kill() |
| 391 | test['aborted_at'] = now |
| 392 | |
| 393 | for test in finished_tests: |
| 394 | del executing_tests[test['name']] |
| 395 | return finished_tests |
| 396 | |
| 397 | |
| 398 | def main(): |
Trent Apted | 71169bb | 2017-07-28 00:12:10 | [diff] [blame] | 399 | if len(sys.argv) < 5 or sys.argv[4] != '--': |
| 400 | print ('Usage: %s <parallelism> <sourcefile> <resultfile> -- <cflags...>' % |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 401 | sys.argv[0]) |
| 402 | sys.exit(1) |
| 403 | |
| 404 | # Force us into the "C" locale so the compiler doesn't localize its output. |
| 405 | # In particular, this stops gcc from using smart quotes when in english UTF-8 |
| 406 | # locales. This makes the expectation writing much easier. |
| 407 | os.environ['LC_ALL'] = 'C' |
| 408 | |
| 409 | parallelism = int(sys.argv[1]) |
| 410 | sourcefile_path = sys.argv[2] |
Trent Apted | 71169bb | 2017-07-28 00:12:10 | [diff] [blame] | 411 | resultfile_path = sys.argv[3] |
| 412 | cflags = sys.argv[5:] |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 413 | |
| 414 | timings = {'started': time.time()} |
| 415 | |
| 416 | ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path) |
| 417 | |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 418 | # Convert filename from underscores to CamelCase. |
| 419 | words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_') |
| 420 | words = [w.capitalize() for w in words] |
| 421 | suite_name = 'NoCompile' + ''.join(words) |
| 422 | |
| 423 | test_configs = ExtractTestConfigs(sourcefile_path, suite_name) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 424 | timings['extract_done'] = time.time() |
| 425 | |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 426 | resultfile = StringIO.StringIO() |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 427 | resultlog = StringIO.StringIO() |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 428 | resultfile.write(RESULT_FILE_HEADER % sourcefile_path) |
| 429 | |
| 430 | # Run the no-compile tests, but ensure we do not run more than |parallelism| |
| 431 | # tests at once. |
| 432 | timings['header_written'] = time.time() |
| 433 | executing_tests = {} |
| 434 | finished_tests = [] |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 435 | |
Trent Apted | 71169bb | 2017-07-28 00:12:10 | [diff] [blame] | 436 | cflags.extend(['-MMD', '-MF', resultfile_path + '.d', '-MT', resultfile_path]) |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 437 | test = StartTest( |
| 438 | sourcefile_path, |
Trent Apted | 71169bb | 2017-07-28 00:12:10 | [diff] [blame] | 439 | cflags, |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 440 | { 'name': 'NCTEST_SANITY', |
| 441 | 'suite_name': suite_name, |
| 442 | 'expectations': None, |
| 443 | }) |
| 444 | executing_tests[test['name']] = test |
| 445 | |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 446 | for config in test_configs: |
| 447 | # CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this |
| 448 | # acts as a semaphore. We cannot use threads + a real semaphore because |
| 449 | # subprocess forks, which can cause all sorts of hilarity with threads. |
| 450 | if len(executing_tests) >= parallelism: |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 451 | finished_tests.extend(CompleteAtLeastOneTest(executing_tests)) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 452 | |
| 453 | if config['name'].startswith('DISABLED_'): |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 454 | PassTest(resultfile, resultlog, config) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 455 | else: |
| 456 | test = StartTest(sourcefile_path, cflags, config) |
| 457 | assert test['name'] not in executing_tests |
| 458 | executing_tests[test['name']] = test |
| 459 | |
| 460 | # If there are no more test to start, we still need to drain the running |
| 461 | # ones. |
| 462 | while len(executing_tests) > 0: |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 463 | finished_tests.extend(CompleteAtLeastOneTest(executing_tests)) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 464 | timings['compile_done'] = time.time() |
| 465 | |
yyanagisawa | c6d96b9 | 2017-01-25 14:58:25 | [diff] [blame] | 466 | finished_tests = sorted(finished_tests, key=lambda test: test['name']) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 467 | for test in finished_tests: |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 468 | if test['name'] == 'NCTEST_SANITY': |
| 469 | _, stderr = test['proc'].communicate() |
| 470 | return_code = test['proc'].poll() |
| 471 | if return_code != 0: |
| 472 | sys.stderr.write(stderr) |
| 473 | continue |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 474 | ProcessTestResult(resultfile, resultlog, test) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 475 | timings['results_processed'] = time.time() |
| 476 | |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 477 | WriteStats(resultlog, suite_name, timings) |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 478 | |
wychen | 77cbaaa | 2017-01-23 12:46:47 | [diff] [blame] | 479 | with open(resultfile_path + '.log', 'w') as fd: |
| 480 | fd.write(resultlog.getvalue()) |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 481 | if return_code == 0: |
| 482 | with open(resultfile_path, 'w') as fd: |
| 483 | fd.write(resultfile.getvalue()) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 484 | |
| 485 | resultfile.close() |
tzik | b5f8f4b | 2016-02-16 02:34:45 | [diff] [blame] | 486 | sys.exit(return_code) |
[email protected] | 81814bce | 2011-09-10 03:03:00 | [diff] [blame] | 487 | |
| 488 | |
| 489 | if __name__ == '__main__': |
| 490 | main() |