1# -*- coding: utf-8 -*- 2# The LLVM Compiler Infrastructure 3# 4# This file is distributed under the University of Illinois Open Source 5# License. See LICENSE.TXT for details. 6""" This module implements the 'scan-build' command API. 7 8To run the static analyzer against a build is done in multiple steps: 9 10 -- Intercept: capture the compilation command during the build, 11 -- Analyze: run the analyzer against the captured commands, 12 -- Report: create a cover report from the analyzer outputs. """ 13 14import re 15import os 16import os.path 17import json 18import logging 19import multiprocessing 20import tempfile 21import functools 22import subprocess 23import contextlib 24import datetime 25import shutil 26import glob 27from collections import defaultdict 28 29from libscanbuild import command_entry_point, compiler_wrapper, \ 30 wrapper_environment, run_build, run_command, CtuConfig 31from libscanbuild.arguments import parse_args_for_scan_build, \ 32 parse_args_for_analyze_build 33from libscanbuild.intercept import capture 34from libscanbuild.report import document 35from libscanbuild.compilation import split_command, classify_source, \ 36 compiler_language 37from libscanbuild.clang import get_version, get_arguments, get_triple_arch 38from libscanbuild.shell import decode 39 40__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] 41 42COMPILER_WRAPPER_CC = 'analyze-cc' 43COMPILER_WRAPPER_CXX = 'analyze-c++' 44 45CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' 46CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' 47 48 49@command_entry_point 50def scan_build(): 51 """ Entry point for scan-build command. """ 52 53 args = parse_args_for_scan_build() 54 # will re-assign the report directory as new output 55 with report_directory(args.output, args.keep_empty) as args.output: 56 # Run against a build command. there are cases, when analyzer run 57 # is not required. But we need to set up everything for the 58 # wrappers, because 'configure' needs to capture the CC/CXX values 59 # for the Makefile. 60 if args.intercept_first: 61 # Run build command with intercept module. 62 exit_code = capture(args) 63 # Run the analyzer against the captured commands. 64 if need_analyzer(args.build): 65 govern_analyzer_runs(args) 66 else: 67 # Run build command and analyzer with compiler wrappers. 68 environment = setup_environment(args) 69 exit_code = run_build(args.build, env=environment) 70 # Cover report generation and bug counting. 71 number_of_bugs = document(args) 72 # Set exit status as it was requested. 73 return number_of_bugs if args.status_bugs else exit_code 74 75 76@command_entry_point 77def analyze_build(): 78 """ Entry point for analyze-build command. """ 79 80 args = parse_args_for_analyze_build() 81 # will re-assign the report directory as new output 82 with report_directory(args.output, args.keep_empty) as args.output: 83 # Run the analyzer against a compilation db. 84 govern_analyzer_runs(args) 85 # Cover report generation and bug counting. 86 number_of_bugs = document(args) 87 # Set exit status as it was requested. 88 return number_of_bugs if args.status_bugs else 0 89 90 91def need_analyzer(args): 92 """ Check the intent of the build command. 93 94 When static analyzer run against project configure step, it should be 95 silent and no need to run the analyzer or generate report. 96 97 To run `scan-build` against the configure step might be necessary, 98 when compiler wrappers are used. That's the moment when build setup 99 check the compiler and capture the location for the build process. """ 100 101 return len(args) and not re.search('configure|autogen', args[0]) 102 103 104def prefix_with(constant, pieces): 105 """ From a sequence create another sequence where every second element 106 is from the original sequence and the odd elements are the prefix. 107 108 eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """ 109 110 return [elem for piece in pieces for elem in [constant, piece]] 111 112 113def get_ctu_config_from_args(args): 114 """ CTU configuration is created from the chosen phases and dir. """ 115 116 return ( 117 CtuConfig(collect=args.ctu_phases.collect, 118 analyze=args.ctu_phases.analyze, 119 dir=args.ctu_dir, 120 extdef_map_cmd=args.extdef_map_cmd) 121 if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir') 122 else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd='')) 123 124 125def get_ctu_config_from_json(ctu_conf_json): 126 """ CTU configuration is created from the chosen phases and dir. """ 127 128 ctu_config = json.loads(ctu_conf_json) 129 # Recover namedtuple from json when coming from analyze-cc or analyze-c++ 130 return CtuConfig(collect=ctu_config[0], 131 analyze=ctu_config[1], 132 dir=ctu_config[2], 133 extdef_map_cmd=ctu_config[3]) 134 135 136def create_global_ctu_extdef_map(extdef_map_lines): 137 """ Takes iterator of individual external definition maps and creates a 138 global map keeping only unique names. We leave conflicting names out of 139 CTU. 140 141 :param extdef_map_lines: Contains the id of a definition (mangled name) and 142 the originating source (the corresponding AST file) name. 143 :type extdef_map_lines: Iterator of str. 144 :returns: Mangled name - AST file pairs. 145 :rtype: List of (str, str) tuples. 146 """ 147 148 mangled_to_asts = defaultdict(set) 149 150 for line in extdef_map_lines: 151 mangled_name, ast_file = line.strip().split(' ', 1) 152 mangled_to_asts[mangled_name].add(ast_file) 153 154 mangled_ast_pairs = [] 155 156 for mangled_name, ast_files in mangled_to_asts.items(): 157 if len(ast_files) == 1: 158 mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) 159 160 return mangled_ast_pairs 161 162 163def merge_ctu_extdef_maps(ctudir): 164 """ Merge individual external definition maps into a global one. 165 166 As the collect phase runs parallel on multiple threads, all compilation 167 units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER. 168 These definition maps contain the mangled names and the source 169 (AST generated from the source) which had their definition. 170 These files should be merged at the end into a global map file: 171 CTU_EXTDEF_MAP_FILENAME.""" 172 173 def generate_extdef_map_lines(extdefmap_dir): 174 """ Iterate over all lines of input files in a determined order. """ 175 176 files = glob.glob(os.path.join(extdefmap_dir, '*')) 177 files.sort() 178 for filename in files: 179 with open(filename, 'r') as in_file: 180 for line in in_file: 181 yield line 182 183 def write_global_map(arch, mangled_ast_pairs): 184 """ Write (mangled name, ast file) pairs into final file. """ 185 186 extern_defs_map_file = os.path.join(ctudir, arch, 187 CTU_EXTDEF_MAP_FILENAME) 188 with open(extern_defs_map_file, 'w') as out_file: 189 for mangled_name, ast_file in mangled_ast_pairs: 190 out_file.write('%s %s\n' % (mangled_name, ast_file)) 191 192 triple_arches = glob.glob(os.path.join(ctudir, '*')) 193 for triple_path in triple_arches: 194 if os.path.isdir(triple_path): 195 triple_arch = os.path.basename(triple_path) 196 extdefmap_dir = os.path.join(ctudir, triple_arch, 197 CTU_TEMP_DEFMAP_FOLDER) 198 199 extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) 200 mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) 201 write_global_map(triple_arch, mangled_ast_pairs) 202 203 # Remove all temporary files 204 shutil.rmtree(extdefmap_dir, ignore_errors=True) 205 206 207def run_analyzer_parallel(args): 208 """ Runs the analyzer against the given compilation database. """ 209 210 def exclude(filename): 211 """ Return true when any excluded directory prefix the filename. """ 212 return any(re.match(r'^' + directory, filename) 213 for directory in args.excludes) 214 215 consts = { 216 'clang': args.clang, 217 'output_dir': args.output, 218 'output_format': args.output_format, 219 'output_failures': args.output_failures, 220 'direct_args': analyzer_params(args), 221 'force_debug': args.force_debug, 222 'ctu': get_ctu_config_from_args(args) 223 } 224 225 logging.debug('run analyzer against compilation database') 226 with open(args.cdb, 'r') as handle: 227 generator = (dict(cmd, **consts) 228 for cmd in json.load(handle) if not exclude(cmd['file'])) 229 # when verbose output requested execute sequentially 230 pool = multiprocessing.Pool(1 if args.verbose > 2 else None) 231 for current in pool.imap_unordered(run, generator): 232 if current is not None: 233 # display error message from the static analyzer 234 for line in current['error_output']: 235 logging.info(line.rstrip()) 236 pool.close() 237 pool.join() 238 239 240def govern_analyzer_runs(args): 241 """ Governs multiple runs in CTU mode or runs once in normal mode. """ 242 243 ctu_config = get_ctu_config_from_args(args) 244 # If we do a CTU collect (1st phase) we remove all previous collection 245 # data first. 246 if ctu_config.collect: 247 shutil.rmtree(ctu_config.dir, ignore_errors=True) 248 249 # If the user asked for a collect (1st) and analyze (2nd) phase, we do an 250 # all-in-one run where we deliberately remove collection data before and 251 # also after the run. If the user asks only for a single phase data is 252 # left so multiple analyze runs can use the same data gathered by a single 253 # collection run. 254 if ctu_config.collect and ctu_config.analyze: 255 # CTU strings are coming from args.ctu_dir and extdef_map_cmd, 256 # so we can leave it empty 257 args.ctu_phases = CtuConfig(collect=True, analyze=False, 258 dir='', extdef_map_cmd='') 259 run_analyzer_parallel(args) 260 merge_ctu_extdef_maps(ctu_config.dir) 261 args.ctu_phases = CtuConfig(collect=False, analyze=True, 262 dir='', extdef_map_cmd='') 263 run_analyzer_parallel(args) 264 shutil.rmtree(ctu_config.dir, ignore_errors=True) 265 else: 266 # Single runs (collect or analyze) are launched from here. 267 run_analyzer_parallel(args) 268 if ctu_config.collect: 269 merge_ctu_extdef_maps(ctu_config.dir) 270 271 272def setup_environment(args): 273 """ Set up environment for build command to interpose compiler wrapper. """ 274 275 environment = dict(os.environ) 276 environment.update(wrapper_environment(args)) 277 environment.update({ 278 'CC': COMPILER_WRAPPER_CC, 279 'CXX': COMPILER_WRAPPER_CXX, 280 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '', 281 'ANALYZE_BUILD_REPORT_DIR': args.output, 282 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 283 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '', 284 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 285 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '', 286 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) 287 }) 288 return environment 289 290 291@command_entry_point 292def analyze_compiler_wrapper(): 293 """ Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """ 294 295 return compiler_wrapper(analyze_compiler_wrapper_impl) 296 297 298def analyze_compiler_wrapper_impl(result, execution): 299 """ Implements analyzer compiler wrapper functionality. """ 300 301 # don't run analyzer when compilation fails. or when it's not requested. 302 if result or not os.getenv('ANALYZE_BUILD_CLANG'): 303 return 304 305 # check is it a compilation? 306 compilation = split_command(execution.cmd) 307 if compilation is None: 308 return 309 # collect the needed parameters from environment, crash when missing 310 parameters = { 311 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 312 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 313 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 314 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 315 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', 316 '').split(' '), 317 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 318 'directory': execution.cwd, 319 'command': [execution.cmd[0], '-c'] + compilation.flags, 320 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) 321 } 322 # call static analyzer against the compilation 323 for source in compilation.files: 324 parameters.update({'file': source}) 325 logging.debug('analyzer parameters %s', parameters) 326 current = run(parameters) 327 # display error message from the static analyzer 328 if current is not None: 329 for line in current['error_output']: 330 logging.info(line.rstrip()) 331 332 333@contextlib.contextmanager 334def report_directory(hint, keep): 335 """ Responsible for the report directory. 336 337 hint -- could specify the parent directory of the output directory. 338 keep -- a boolean value to keep or delete the empty report directory. """ 339 340 stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' 341 stamp = datetime.datetime.now().strftime(stamp_format) 342 parent_dir = os.path.abspath(hint) 343 if not os.path.exists(parent_dir): 344 os.makedirs(parent_dir) 345 name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) 346 347 logging.info('Report directory created: %s', name) 348 349 try: 350 yield name 351 finally: 352 if os.listdir(name): 353 msg = "Run 'scan-view %s' to examine bug reports." 354 keep = True 355 else: 356 if keep: 357 msg = "Report directory '%s' contains no report, but kept." 358 else: 359 msg = "Removing directory '%s' because it contains no report." 360 logging.warning(msg, name) 361 362 if not keep: 363 os.rmdir(name) 364 365 366def analyzer_params(args): 367 """ A group of command line arguments can mapped to command 368 line arguments of the analyzer. This method generates those. """ 369 370 result = [] 371 372 if args.store_model: 373 result.append('-analyzer-store={0}'.format(args.store_model)) 374 if args.constraints_model: 375 result.append('-analyzer-constraints={0}'.format( 376 args.constraints_model)) 377 if args.internal_stats: 378 result.append('-analyzer-stats') 379 if args.analyze_headers: 380 result.append('-analyzer-opt-analyze-headers') 381 if args.stats: 382 result.append('-analyzer-checker=debug.Stats') 383 if args.maxloop: 384 result.extend(['-analyzer-max-loop', str(args.maxloop)]) 385 if args.output_format: 386 result.append('-analyzer-output={0}'.format(args.output_format)) 387 if args.analyzer_config: 388 result.extend(['-analyzer-config', args.analyzer_config]) 389 if args.verbose >= 4: 390 result.append('-analyzer-display-progress') 391 if args.plugins: 392 result.extend(prefix_with('-load', args.plugins)) 393 if args.enable_checker: 394 checkers = ','.join(args.enable_checker) 395 result.extend(['-analyzer-checker', checkers]) 396 if args.disable_checker: 397 checkers = ','.join(args.disable_checker) 398 result.extend(['-analyzer-disable-checker', checkers]) 399 400 return prefix_with('-Xclang', result) 401 402 403def require(required): 404 """ Decorator for checking the required values in state. 405 406 It checks the required attributes in the passed state and stop when 407 any of those is missing. """ 408 409 def decorator(function): 410 @functools.wraps(function) 411 def wrapper(*args, **kwargs): 412 for key in required: 413 if key not in args[0]: 414 raise KeyError('{0} not passed to {1}'.format( 415 key, function.__name__)) 416 417 return function(*args, **kwargs) 418 419 return wrapper 420 421 return decorator 422 423 424@require(['command', # entry from compilation database 425 'directory', # entry from compilation database 426 'file', # entry from compilation database 427 'clang', # clang executable name (and path) 428 'direct_args', # arguments from command line 429 'force_debug', # kill non debug macros 430 'output_dir', # where generated report files shall go 431 'output_format', # it's 'plist', 'html', both or plist-multi-file 432 'output_failures', # generate crash reports or not 433 'ctu']) # ctu control options 434def run(opts): 435 """ Entry point to run (or not) static analyzer against a single entry 436 of the compilation database. 437 438 This complex task is decomposed into smaller methods which are calling 439 each other in chain. If the analyzis is not possible the given method 440 just return and break the chain. 441 442 The passed parameter is a python dictionary. Each method first check 443 that the needed parameters received. (This is done by the 'require' 444 decorator. It's like an 'assert' to check the contract between the 445 caller and the called method.) """ 446 447 try: 448 command = opts.pop('command') 449 command = command if isinstance(command, list) else decode(command) 450 logging.debug("Run analyzer against '%s'", command) 451 opts.update(classify_parameters(command)) 452 453 return arch_check(opts) 454 except Exception: 455 logging.error("Problem occurred during analyzis.", exc_info=1) 456 return None 457 458 459@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language', 460 'error_output', 'exit_code']) 461def report_failure(opts): 462 """ Create report when analyzer failed. 463 464 The major report is the preprocessor output. The output filename generated 465 randomly. The compiler output also captured into '.stderr.txt' file. 466 And some more execution context also saved into '.info.txt' file. """ 467 468 def extension(): 469 """ Generate preprocessor file extension. """ 470 471 mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'} 472 return mapping.get(opts['language'], '.i') 473 474 def destination(): 475 """ Creates failures directory if not exits yet. """ 476 477 failures_dir = os.path.join(opts['output_dir'], 'failures') 478 if not os.path.isdir(failures_dir): 479 os.makedirs(failures_dir) 480 return failures_dir 481 482 # Classify error type: when Clang terminated by a signal it's a 'Crash'. 483 # (python subprocess Popen.returncode is negative when child terminated 484 # by signal.) Everything else is 'Other Error'. 485 error = 'crash' if opts['exit_code'] < 0 else 'other_error' 486 # Create preprocessor output file name. (This is blindly following the 487 # Perl implementation.) 488 (handle, name) = tempfile.mkstemp(suffix=extension(), 489 prefix='clang_' + error + '_', 490 dir=destination()) 491 os.close(handle) 492 # Execute Clang again, but run the syntax check only. 493 cwd = opts['directory'] 494 cmd = get_arguments( 495 [opts['clang'], '-fsyntax-only', '-E' 496 ] + opts['flags'] + [opts['file'], '-o', name], cwd) 497 run_command(cmd, cwd=cwd) 498 # write general information about the crash 499 with open(name + '.info.txt', 'w') as handle: 500 handle.write(opts['file'] + os.linesep) 501 handle.write(error.title().replace('_', ' ') + os.linesep) 502 handle.write(' '.join(cmd) + os.linesep) 503 handle.write(' '.join(os.uname()) + os.linesep) 504 handle.write(get_version(opts['clang'])) 505 handle.close() 506 # write the captured output too 507 with open(name + '.stderr.txt', 'w') as handle: 508 handle.writelines(opts['error_output']) 509 handle.close() 510 511 512@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir', 513 'output_format']) 514def run_analyzer(opts, continuation=report_failure): 515 """ It assembles the analysis command line and executes it. Capture the 516 output of the analysis and returns with it. If failure reports are 517 requested, it calls the continuation to generate it. """ 518 519 def target(): 520 """ Creates output file name for reports. """ 521 if opts['output_format'] in { 522 'plist', 523 'plist-html', 524 'plist-multi-file'}: 525 (handle, name) = tempfile.mkstemp(prefix='report-', 526 suffix='.plist', 527 dir=opts['output_dir']) 528 os.close(handle) 529 return name 530 return opts['output_dir'] 531 532 try: 533 cwd = opts['directory'] 534 cmd = get_arguments([opts['clang'], '--analyze'] + 535 opts['direct_args'] + opts['flags'] + 536 [opts['file'], '-o', target()], 537 cwd) 538 output = run_command(cmd, cwd=cwd) 539 return {'error_output': output, 'exit_code': 0} 540 except subprocess.CalledProcessError as ex: 541 result = {'error_output': ex.output, 'exit_code': ex.returncode} 542 if opts.get('output_failures', False): 543 opts.update(result) 544 continuation(opts) 545 return result 546 547 548def extdef_map_list_src_to_ast(extdef_src_list): 549 """ Turns textual external definition map list with source files into an 550 external definition map list with ast files. """ 551 552 extdef_ast_list = [] 553 for extdef_src_txt in extdef_src_list: 554 mangled_name, path = extdef_src_txt.split(" ", 1) 555 # Normalize path on windows as well 556 path = os.path.splitdrive(path)[1] 557 # Make relative path out of absolute 558 path = path[1:] if path[0] == os.sep else path 559 ast_path = os.path.join("ast", path + ".ast") 560 extdef_ast_list.append(mangled_name + " " + ast_path) 561 return extdef_ast_list 562 563 564@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu']) 565def ctu_collect_phase(opts): 566 """ Preprocess source by generating all data needed by CTU analysis. """ 567 568 def generate_ast(triple_arch): 569 """ Generates ASTs for the current compilation command. """ 570 571 args = opts['direct_args'] + opts['flags'] 572 ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast', 573 os.path.realpath(opts['file'])[1:] + 574 '.ast') 575 ast_path = os.path.abspath(ast_joined_path) 576 ast_dir = os.path.dirname(ast_path) 577 if not os.path.isdir(ast_dir): 578 try: 579 os.makedirs(ast_dir) 580 except OSError: 581 # In case an other process already created it. 582 pass 583 ast_command = [opts['clang'], '-emit-ast'] 584 ast_command.extend(args) 585 ast_command.append('-w') 586 ast_command.append(opts['file']) 587 ast_command.append('-o') 588 ast_command.append(ast_path) 589 logging.debug("Generating AST using '%s'", ast_command) 590 run_command(ast_command, cwd=opts['directory']) 591 592 def map_extdefs(triple_arch): 593 """ Generate external definition map file for the current source. """ 594 595 args = opts['direct_args'] + opts['flags'] 596 extdefmap_command = [opts['ctu'].extdef_map_cmd] 597 extdefmap_command.append(opts['file']) 598 extdefmap_command.append('--') 599 extdefmap_command.extend(args) 600 logging.debug("Generating external definition map using '%s'", 601 extdefmap_command) 602 extdef_src_list = run_command(extdefmap_command, cwd=opts['directory']) 603 extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) 604 extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch, 605 CTU_TEMP_DEFMAP_FOLDER) 606 if not os.path.isdir(extern_defs_map_folder): 607 try: 608 os.makedirs(extern_defs_map_folder) 609 except OSError: 610 # In case an other process already created it. 611 pass 612 if extdef_ast_list: 613 with tempfile.NamedTemporaryFile(mode='w', 614 dir=extern_defs_map_folder, 615 delete=False) as out_file: 616 out_file.write("\n".join(extdef_ast_list) + "\n") 617 618 cwd = opts['directory'] 619 cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \ 620 + [opts['file']] 621 triple_arch = get_triple_arch(cmd, cwd) 622 generate_ast(triple_arch) 623 map_extdefs(triple_arch) 624 625 626@require(['ctu']) 627def dispatch_ctu(opts, continuation=run_analyzer): 628 """ Execute only one phase of 2 phases of CTU if needed. """ 629 630 ctu_config = opts['ctu'] 631 632 if ctu_config.collect or ctu_config.analyze: 633 assert ctu_config.collect != ctu_config.analyze 634 if ctu_config.collect: 635 return ctu_collect_phase(opts) 636 if ctu_config.analyze: 637 cwd = opts['directory'] 638 cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \ 639 + opts['flags'] + [opts['file']] 640 triarch = get_triple_arch(cmd, cwd) 641 ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch), 642 'experimental-enable-naive-ctu-analysis=true'] 643 analyzer_options = prefix_with('-analyzer-config', ctu_options) 644 direct_options = prefix_with('-Xanalyzer', analyzer_options) 645 opts['direct_args'].extend(direct_options) 646 647 return continuation(opts) 648 649 650@require(['flags', 'force_debug']) 651def filter_debug_flags(opts, continuation=dispatch_ctu): 652 """ Filter out nondebug macros when requested. """ 653 654 if opts.pop('force_debug'): 655 # lazy implementation just append an undefine macro at the end 656 opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) 657 658 return continuation(opts) 659 660 661@require(['language', 'compiler', 'file', 'flags']) 662def language_check(opts, continuation=filter_debug_flags): 663 """ Find out the language from command line parameters or file name 664 extension. The decision also influenced by the compiler invocation. """ 665 666 accepted = frozenset({ 667 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', 668 'c++-cpp-output', 'objective-c-cpp-output' 669 }) 670 671 # language can be given as a parameter... 672 language = opts.pop('language') 673 compiler = opts.pop('compiler') 674 # ... or find out from source file extension 675 if language is None and compiler is not None: 676 language = classify_source(opts['file'], compiler == 'c') 677 678 if language is None: 679 logging.debug('skip analysis, language not known') 680 return None 681 elif language not in accepted: 682 logging.debug('skip analysis, language not supported') 683 return None 684 else: 685 logging.debug('analysis, language: %s', language) 686 opts.update({'language': language, 687 'flags': ['-x', language] + opts['flags']}) 688 return continuation(opts) 689 690 691@require(['arch_list', 'flags']) 692def arch_check(opts, continuation=language_check): 693 """ Do run analyzer through one of the given architectures. """ 694 695 disabled = frozenset({'ppc', 'ppc64'}) 696 697 received_list = opts.pop('arch_list') 698 if received_list: 699 # filter out disabled architectures and -arch switches 700 filtered_list = [a for a in received_list if a not in disabled] 701 if filtered_list: 702 # There should be only one arch given (or the same multiple 703 # times). If there are multiple arch are given and are not 704 # the same, those should not change the pre-processing step. 705 # But that's the only pass we have before run the analyzer. 706 current = filtered_list.pop() 707 logging.debug('analysis, on arch: %s', current) 708 709 opts.update({'flags': ['-arch', current] + opts['flags']}) 710 return continuation(opts) 711 else: 712 logging.debug('skip analysis, found not supported arch') 713 return None 714 else: 715 logging.debug('analysis, on default arch') 716 return continuation(opts) 717 718 719# To have good results from static analyzer certain compiler options shall be 720# omitted. The compiler flag filtering only affects the static analyzer run. 721# 722# Keys are the option name, value number of options to skip 723IGNORED_FLAGS = { 724 '-c': 0, # compile option will be overwritten 725 '-fsyntax-only': 0, # static analyzer option will be overwritten 726 '-o': 1, # will set up own output file 727 # flags below are inherited from the perl implementation. 728 '-g': 0, 729 '-save-temps': 0, 730 '-install_name': 1, 731 '-exported_symbols_list': 1, 732 '-current_version': 1, 733 '-compatibility_version': 1, 734 '-init': 1, 735 '-e': 1, 736 '-seg1addr': 1, 737 '-bundle_loader': 1, 738 '-multiply_defined': 1, 739 '-sectorder': 3, 740 '--param': 1, 741 '--serialize-diagnostics': 1 742} 743 744 745def classify_parameters(command): 746 """ Prepare compiler flags (filters some and add others) and take out 747 language (-x) and architecture (-arch) flags for future processing. """ 748 749 result = { 750 'flags': [], # the filtered compiler flags 751 'arch_list': [], # list of architecture flags 752 'language': None, # compilation language, None, if not specified 753 'compiler': compiler_language(command) # 'c' or 'c++' 754 } 755 756 # iterate on the compile options 757 args = iter(command[1:]) 758 for arg in args: 759 # take arch flags into a separate basket 760 if arg == '-arch': 761 result['arch_list'].append(next(args)) 762 # take language 763 elif arg == '-x': 764 result['language'] = next(args) 765 # parameters which looks source file are not flags 766 elif re.match(r'^[^-].+', arg) and classify_source(arg): 767 pass 768 # ignore some flags 769 elif arg in IGNORED_FLAGS: 770 count = IGNORED_FLAGS[arg] 771 for _ in range(count): 772 next(args) 773 # we don't care about extra warnings, but we should suppress ones 774 # that we don't want to see. 775 elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg): 776 pass 777 # and consider everything else as compilation flag. 778 else: 779 result['flags'].append(arg) 780 781 return result 782