1#!/usr/bin/env python
2# Copyright (c) 2011 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Implements a simple "negative compile" test for C++ on linux.
7
8Sometimes a C++ API needs to ensure that various usages cannot compile. To
9enable unittesting of these assertions, we use this python script to
10invoke the compiler on a source file and assert that compilation fails.
11
12For more info, see:
13  http://dev.chromium.org/developers/testing/no-compile-tests
14"""
15
16from __future__ import print_function
17
18import StringIO
19import ast
20import os
21import re
22import select
23import subprocess
24import sys
25import tempfile
26import time
27
28
29# Matches lines that start with #if and have the substring TEST in the
30# conditional. Also extracts the comment.  This allows us to search for
31# lines like the following:
32#
33#   #ifdef NCTEST_NAME_OF_TEST  // [r'expected output']
34#   #if defined(NCTEST_NAME_OF_TEST)  // [r'expected output']
35#   #if NCTEST_NAME_OF_TEST  // [r'expected output']
36#   #elif NCTEST_NAME_OF_TEST  // [r'expected output']
37#   #elif DISABLED_NCTEST_NAME_OF_TEST  // [r'expected output']
38#
39# inside the unittest file.
40NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
41
42
43# Matches and removes the defined() preprocesor predicate. This is useful
44# for test cases that use the preprocessor if-statement form:
45#
46#   #if defined(NCTEST_NAME_OF_TEST)
47#
48# Should be used to post-process the results found by NCTEST_CONFIG_RE.
49STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
50
51
52# Used to grab the expectation from comment at the end of an #ifdef.  See
53# NCTEST_CONFIG_RE's comment for examples of what the format should look like.
54#
55# The extracted substring should be a python array of regular expressions.
56EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
57
58
59# The header for the result file so that it can be compiled.
60RESULT_FILE_HEADER = """
61// This file is generated by the no compile test from:
62//   %s
63
64#include "base/logging.h"
65#include "testing/gtest/include/gtest/gtest.h"
66
67"""
68
69
70# The log message on a test completion.
71LOG_TEMPLATE = """
72TEST(%s, %s) took %f secs. Started at %f, ended at %f.
73"""
74
75# The GUnit test function to output for a successful or disabled test.
76GUNIT_TEMPLATE = """
77TEST(%s, %s) { }
78"""
79
80
81# Timeout constants.
82NCTEST_TERMINATE_TIMEOUT_SEC = 120
83NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
84BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
85
86
87def ValidateInput(compiler, parallelism, sourcefile_path, cflags,
88                  resultfile_path):
89  """Make sure the arguments being passed in are sane."""
90  assert os.path.isfile(compiler)
91  assert parallelism >= 1
92  assert type(sourcefile_path) is str
93  assert type(cflags) is list
94  for flag in cflags:
95    assert type(flag) is str
96  assert type(resultfile_path) is str
97
98
99def ParseExpectation(expectation_string):
100  """Extracts expectation definition from the trailing comment on the ifdef.
101
102  See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
103
104  Args:
105    expectation_string: A string like "// [r'some_regex']"
106
107  Returns:
108    A list of compiled regular expressions indicating all possible valid
109    compiler outputs.  If the list is empty, all outputs are considered valid.
110  """
111  assert expectation_string is not None
112
113  match = EXTRACT_EXPECTATION_RE.match(expectation_string)
114  assert match
115
116  raw_expectation = ast.literal_eval(match.group(1))
117  assert type(raw_expectation) is list
118
119  expectation = []
120  for regex_str in raw_expectation:
121    assert type(regex_str) is str
122    expectation.append(re.compile(regex_str))
123  return expectation
124
125
126def ExtractTestConfigs(sourcefile_path, suite_name):
127  """Parses the source file for test configurations.
128
129  Each no-compile test in the file is separated by an ifdef macro.  We scan
130  the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
131  they demark one no-compile test and try to extract the test configuration
132  from that.
133
134  Args:
135    sourcefile_path: The path to the source file.
136    suite_name: The name of the test suite.
137
138  Returns:
139    A list of test configurations. Each test configuration is a dictionary of
140    the form:
141
142      { name: 'NCTEST_NAME'
143        suite_name: 'SOURCE_FILE_NAME'
144        expectations: [re.Pattern, re.Pattern] }
145
146    The |suite_name| is used to generate a pretty gtest output on successful
147    completion of the no compile test.
148
149    The compiled regexps in |expectations| define the valid outputs of the
150    compiler.  If any one of the listed patterns matches either the stderr or
151    stdout from the compilation, and the compilation failed, then the test is
152    considered to have succeeded.  If the list is empty, than we ignore the
153    compiler output and just check for failed compilation. If |expectations|
154    is actually None, then this specifies a compiler sanity check test, which
155    should expect a SUCCESSFUL compilation.
156  """
157  sourcefile = open(sourcefile_path, 'r')
158
159  # Start with at least the compiler sanity test.  You need to always have one
160  # sanity test to show that compiler flags and configuration are not just
161  # wrong.  Otherwise, having a misconfigured compiler, or an error in the
162  # shared portions of the .nc file would cause all tests to erroneously pass.
163  test_configs = []
164
165  for line in sourcefile:
166    match_result = NCTEST_CONFIG_RE.match(line)
167    if not match_result:
168      continue
169
170    groups = match_result.groups()
171
172    # Grab the name and remove the defined() predicate if there is one.
173    name = groups[0]
174    strip_result = STRIP_DEFINED_RE.match(name)
175    if strip_result:
176      name = strip_result.group(1)
177
178    # Read expectations if there are any.
179    test_configs.append({'name': name,
180                         'suite_name': suite_name,
181                         'expectations': ParseExpectation(groups[1])})
182  sourcefile.close()
183  return test_configs
184
185
186def StartTest(compiler, sourcefile_path, tempfile_dir, cflags, config):
187  """Start one negative compile test.
188
189  Args:
190    sourcefile_path: The path to the source file.
191    tempfile_dir: A directory to store temporary data from tests.
192    cflags: An array of strings with all the CFLAGS to give to gcc.
193    config: A dictionary describing the test.  See ExtractTestConfigs
194      for a description of the config format.
195
196  Returns:
197    A dictionary containing all the information about the started test. The
198    fields in the dictionary are as follows:
199      { 'proc': A subprocess object representing the compiler run.
200        'cmdline': The executed command line.
201        'name': The name of the test.
202        'suite_name': The suite name to use when generating the gunit test
203                      result.
204        'terminate_timeout': The timestamp in seconds since the epoch after
205                             which the test should be terminated.
206        'kill_timeout': The timestamp in seconds since the epoch after which
207                        the test should be given a hard kill signal.
208        'started_at': A timestamp in seconds since the epoch for when this test
209                      was started.
210        'aborted_at': A timestamp in seconds since the epoch for when this test
211                      was aborted.  If the test completed successfully,
212                      this value is 0.
213        'finished_at': A timestamp in seconds since the epoch for when this
214                       test was successfully complete.  If the test is aborted,
215                       or running, this value is 0.
216        'expectations': A dictionary with the test expectations. See
217                        ParseExpectation() for the structure.
218        }
219  """
220  cmdline = [compiler]
221  cmdline.extend(cflags)
222  name = config['name']
223  expectations = config['expectations']
224  if expectations is not None:
225    cmdline.append('-D%s' % name)
226  cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++',
227                  sourcefile_path])
228  test_stdout = tempfile.TemporaryFile(dir=tempfile_dir)
229  test_stderr = tempfile.TemporaryFile(dir=tempfile_dir)
230
231  process = subprocess.Popen(cmdline, stdout=test_stdout, stderr=test_stderr)
232  now = time.time()
233  return {'proc': process,
234          'cmdline': ' '.join(cmdline),
235          'stdout': test_stdout,
236          'stderr': test_stderr,
237          'name': name,
238          'suite_name': config['suite_name'],
239          'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
240          'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
241          'started_at': now,
242          'aborted_at': 0,
243          'finished_at': 0,
244          'expectations': expectations}
245
246
247def PassTest(resultfile, resultlog, test):
248  """Logs the result of a test started by StartTest(), or a disabled test
249  configuration.
250
251  Args:
252    resultfile: File object for .cc file that results are written to.
253    resultlog: File object for the log file.
254    test: An instance of the dictionary returned by StartTest(), a
255          configuration from ExtractTestConfigs().
256  """
257  resultfile.write(GUNIT_TEMPLATE % (
258      test['suite_name'], test['name']))
259
260  # The 'started_at' key is only added if a test has been started.
261  if 'started_at' in test:
262    resultlog.write(LOG_TEMPLATE % (
263        test['suite_name'], test['name'],
264        test['finished_at'] - test['started_at'],
265        test['started_at'], test['finished_at']))
266
267
268def FailTest(resultfile, test, error, stdout=None, stderr=None):
269  """Logs the result of a test started by StartTest()
270
271  Args:
272    resultfile: File object for .cc file that results are written to.
273    test: An instance of the dictionary returned by StartTest()
274    error: The printable reason for the failure.
275    stdout: The test's output to stdout.
276    stderr: The test's output to stderr.
277  """
278  resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error))
279  resultfile.write('#error "compile line: %s"\n' % test['cmdline'])
280  if stdout and len(stdout) != 0:
281    resultfile.write('#error "%s stdout:"\n' % test['name'])
282    for line in stdout.split('\n'):
283      resultfile.write('#error "  %s:"\n' % line)
284
285  if stderr and len(stderr) != 0:
286    resultfile.write('#error "%s stderr:"\n' % test['name'])
287    for line in stderr.split('\n'):
288      resultfile.write('#error "  %s"\n' % line)
289  resultfile.write('\n')
290
291
292def WriteStats(resultlog, suite_name, timings):
293  """Logs the peformance timings for each stage of the script.
294
295  Args:
296    resultlog: File object for the log file.
297    suite_name: The name of the GUnit suite this test belongs to.
298    timings: Dictionary with timestamps for each stage of the script run.
299  """
300  stats_template = """
301TEST(%s): Started %f, Ended %f, Total %fs, Extract %fs, Compile %fs, Process %fs
302"""
303  total_secs = timings['results_processed'] - timings['started']
304  extract_secs = timings['extract_done'] - timings['started']
305  compile_secs = timings['compile_done'] - timings['extract_done']
306  process_secs = timings['results_processed'] - timings['compile_done']
307  resultlog.write(stats_template % (
308      suite_name, timings['started'], timings['results_processed'], total_secs,
309      extract_secs, compile_secs, process_secs))
310
311def ExtractTestOutputAndCleanup(test):
312  """Test output is in temp files. Read those and delete them.
313  Returns: A tuple (stderr, stdout).
314  """
315  outputs = [None, None]
316  for i, stream_name in ((0, "stdout"), (1, "stderr")):
317    stream = test[stream_name]
318    stream.seek(0)
319    outputs[i] = stream.read()
320    stream.close()
321
322  return outputs
323
324def ProcessTestResult(resultfile, resultlog, test):
325  """Interprets and logs the result of a test started by StartTest()
326
327  Args:
328    resultfile: File object for .cc file that results are written to.
329    resultlog: File object for the log file.
330    test: The dictionary from StartTest() to process.
331  """
332  proc = test['proc']
333  proc.wait()
334  (stdout, stderr) = ExtractTestOutputAndCleanup(test)
335
336  if test['aborted_at'] != 0:
337    FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
338             (test['started_at'], test['aborted_at']))
339    return
340
341  if proc.poll() == 0:
342    # Handle failure due to successful compile.
343    FailTest(resultfile, test,
344             'Unexpected successful compilation.',
345             stdout, stderr)
346    return
347  else:
348    # Check the output has the right expectations.  If there are no
349    # expectations, then we just consider the output "matched" by default.
350    if len(test['expectations']) == 0:
351      PassTest(resultfile, resultlog, test)
352      return
353
354    # Otherwise test against all expectations.
355    for regexp in test['expectations']:
356      if (regexp.search(stdout) is not None or
357          regexp.search(stderr) is not None):
358        PassTest(resultfile, resultlog, test)
359        return
360    expectation_str = ', '.join(
361        ["r'%s'" % regexp.pattern for regexp in test['expectations']])
362    FailTest(resultfile, test,
363             'Expectations [%s] did not match output.' % expectation_str,
364             stdout, stderr)
365    return
366
367
368def CompleteAtLeastOneTest(executing_tests):
369  """Blocks until at least one task is removed from executing_tests.
370
371  This function removes completed tests from executing_tests, logging failures
372  and output.  If no tests can be removed, it will enter a poll-loop until one
373  test finishes or times out.  On a timeout, this function is responsible for
374  terminating the process in the appropriate fashion.
375
376  Args:
377    executing_tests: A dict mapping a string containing the test name to the
378                     test dict return from StartTest().
379
380  Returns:
381    A list of tests that have finished.
382  """
383  finished_tests = []
384  busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
385  while len(finished_tests) == 0:
386    # If we don't make progress for too long, assume the code is just dead.
387    assert busy_loop_timeout > time.time()
388
389    # Select on the output files to block until we have something to
390    # do. We ignore the return value from select and just poll all
391    # processes.
392    read_set = []
393    for test in executing_tests.values():
394      read_set.extend([test['stdout'], test['stderr']])
395    select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
396
397    # Now attempt to process results.
398    now = time.time()
399    for test in executing_tests.values():
400      proc = test['proc']
401      if proc.poll() is not None:
402        test['finished_at'] = now
403        finished_tests.append(test)
404      elif test['terminate_timeout'] < now:
405        proc.terminate()
406        test['aborted_at'] = now
407      elif test['kill_timeout'] < now:
408        proc.kill()
409        test['aborted_at'] = now
410
411    if len(finished_tests) == 0:
412      # We had output from some process but no process had
413      # finished. To avoid busy looping while waiting for a process to
414      # finish, insert a small 100 ms delay here.
415      time.sleep(0.1)
416
417  for test in finished_tests:
418    del executing_tests[test['name']]
419  return finished_tests
420
421
422def main():
423  if len(sys.argv) < 6 or sys.argv[5] != '--':
424    print('Usage: %s <compiler> <parallelism> <sourcefile> <resultfile> '
425          '-- <cflags...>' % sys.argv[0])
426    sys.exit(1)
427
428  # Force us into the "C" locale so the compiler doesn't localize its output.
429  # In particular, this stops gcc from using smart quotes when in english UTF-8
430  # locales.  This makes the expectation writing much easier.
431  os.environ['LC_ALL'] = 'C'
432
433  compiler = sys.argv[1]
434  parallelism = int(sys.argv[2])
435  sourcefile_path = sys.argv[3]
436  resultfile_path = sys.argv[4]
437  cflags = sys.argv[6:]
438
439  timings = {'started': time.time()}
440
441  ValidateInput(compiler, parallelism, sourcefile_path, cflags, resultfile_path)
442
443  # Convert filename from underscores to CamelCase.
444  words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
445  words = [w.capitalize() for w in words]
446  suite_name = 'NoCompile' + ''.join(words)
447
448  test_configs = ExtractTestConfigs(sourcefile_path, suite_name)
449  timings['extract_done'] = time.time()
450
451  resultfile = StringIO.StringIO()
452  resultlog = StringIO.StringIO()
453  resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
454
455  # Run the no-compile tests, but ensure we do not run more than |parallelism|
456  # tests at once.
457  timings['header_written'] = time.time()
458  executing_tests = {}
459  finished_tests = []
460
461  cflags.extend(['-MMD', '-MF', resultfile_path + '.d', '-MT', resultfile_path])
462  test = StartTest(
463      compiler,
464      sourcefile_path,
465      os.path.dirname(resultfile_path),
466      cflags,
467      { 'name': 'NCTEST_SANITY',
468        'suite_name': suite_name,
469        'expectations': None,
470      })
471  executing_tests[test['name']] = test
472
473  for config in test_configs:
474    # CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
475    # acts as a semaphore.  We cannot use threads + a real semaphore because
476    # subprocess forks, which can cause all sorts of hilarity with threads.
477    if len(executing_tests) >= parallelism:
478      finished_tests.extend(CompleteAtLeastOneTest(executing_tests))
479
480    if config['name'].startswith('DISABLED_'):
481      PassTest(resultfile, resultlog, config)
482    else:
483      test = StartTest(compiler, sourcefile_path,
484                       os.path.dirname(resultfile_path), cflags, config)
485      assert test['name'] not in executing_tests
486      executing_tests[test['name']] = test
487
488  # If there are no more test to start, we still need to drain the running
489  # ones.
490  while len(executing_tests) > 0:
491    finished_tests.extend(CompleteAtLeastOneTest(executing_tests))
492  timings['compile_done'] = time.time()
493
494  finished_tests = sorted(finished_tests, key=lambda test: test['name'])
495  for test in finished_tests:
496    if test['name'] == 'NCTEST_SANITY':
497      test['proc'].wait()
498      (stdout, stderr) = ExtractTestOutputAndCleanup(test)
499      return_code = test['proc'].returncode
500      if return_code != 0:
501        sys.stdout.write(stdout)
502        sys.stderr.write(stderr)
503      continue
504    ProcessTestResult(resultfile, resultlog, test)
505  timings['results_processed'] = time.time()
506
507  WriteStats(resultlog, suite_name, timings)
508
509  with open(resultfile_path + '.log', 'w') as fd:
510    fd.write(resultlog.getvalue())
511  if return_code == 0:
512    with open(resultfile_path, 'w') as fd:
513      fd.write(resultfile.getvalue())
514
515  resultfile.close()
516  if return_code != 0:
517    print("No-compile driver failure with return_code %d. Result log:" %
518          return_code)
519    print(resultlog.getvalue())
520  sys.exit(return_code)
521
522
523if __name__ == '__main__':
524  main()
525