1# -*- coding: utf-8 -*-
2# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3# See https://llvm.org/LICENSE.txt for license information.
4# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5""" This module implements the 'scan-build' command API.
6
7To run the static analyzer against a build is done in multiple steps:
8
9 -- Intercept: capture the compilation command during the build,
10 -- Analyze:   run the analyzer against the captured commands,
11 -- Report:    create a cover report from the analyzer outputs.  """
12
13import re
14import os
15import os.path
16import json
17import logging
18import multiprocessing
19import tempfile
20import functools
21import subprocess
22import contextlib
23import datetime
24import shutil
25import glob
26from collections import defaultdict
27
28from libscanbuild import command_entry_point, compiler_wrapper, \
29    wrapper_environment, run_build, run_command, CtuConfig
30from libscanbuild.arguments import parse_args_for_scan_build, \
31    parse_args_for_analyze_build
32from libscanbuild.intercept import capture
33from libscanbuild.report import document
34from libscanbuild.compilation import split_command, classify_source, \
35    compiler_language
36from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \
37    ClangErrorException
38from libscanbuild.shell import decode
39
40__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
41
42COMPILER_WRAPPER_CC = 'analyze-cc'
43COMPILER_WRAPPER_CXX = 'analyze-c++'
44
45CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
46CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
47
48
49@command_entry_point
50def scan_build():
51    """ Entry point for scan-build command. """
52
53    args = parse_args_for_scan_build()
54    # will re-assign the report directory as new output
55    with report_directory(args.output, args.keep_empty) as args.output:
56        # Run against a build command. there are cases, when analyzer run
57        # is not required. But we need to set up everything for the
58        # wrappers, because 'configure' needs to capture the CC/CXX values
59        # for the Makefile.
60        if args.intercept_first:
61            # Run build command with intercept module.
62            exit_code = capture(args)
63            # Run the analyzer against the captured commands.
64            if need_analyzer(args.build):
65                govern_analyzer_runs(args)
66        else:
67            # Run build command and analyzer with compiler wrappers.
68            environment = setup_environment(args)
69            exit_code = run_build(args.build, env=environment)
70        # Cover report generation and bug counting.
71        number_of_bugs = document(args)
72        # Set exit status as it was requested.
73        return number_of_bugs if args.status_bugs else exit_code
74
75
76@command_entry_point
77def analyze_build():
78    """ Entry point for analyze-build command. """
79
80    args = parse_args_for_analyze_build()
81    # will re-assign the report directory as new output
82    with report_directory(args.output, args.keep_empty) as args.output:
83        # Run the analyzer against a compilation db.
84        govern_analyzer_runs(args)
85        # Cover report generation and bug counting.
86        number_of_bugs = document(args)
87        # Set exit status as it was requested.
88        return number_of_bugs if args.status_bugs else 0
89
90
91def need_analyzer(args):
92    """ Check the intent of the build command.
93
94    When static analyzer run against project configure step, it should be
95    silent and no need to run the analyzer or generate report.
96
97    To run `scan-build` against the configure step might be necessary,
98    when compiler wrappers are used. That's the moment when build setup
99    check the compiler and capture the location for the build process. """
100
101    return len(args) and not re.search(r'configure|autogen', args[0])
102
103
104def prefix_with(constant, pieces):
105    """ From a sequence create another sequence where every second element
106    is from the original sequence and the odd elements are the prefix.
107
108    eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
109
110    return [elem for piece in pieces for elem in [constant, piece]]
111
112
113def get_ctu_config_from_args(args):
114    """ CTU configuration is created from the chosen phases and dir. """
115
116    return (
117        CtuConfig(collect=args.ctu_phases.collect,
118                  analyze=args.ctu_phases.analyze,
119                  dir=args.ctu_dir,
120                  extdef_map_cmd=args.extdef_map_cmd)
121        if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
122        else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
123
124
125def get_ctu_config_from_json(ctu_conf_json):
126    """ CTU configuration is created from the chosen phases and dir. """
127
128    ctu_config = json.loads(ctu_conf_json)
129    # Recover namedtuple from json when coming from analyze-cc or analyze-c++
130    return CtuConfig(collect=ctu_config[0],
131                     analyze=ctu_config[1],
132                     dir=ctu_config[2],
133                     extdef_map_cmd=ctu_config[3])
134
135
136def create_global_ctu_extdef_map(extdef_map_lines):
137    """ Takes iterator of individual external definition maps and creates a
138    global map keeping only unique names. We leave conflicting names out of
139    CTU.
140
141    :param extdef_map_lines: Contains the id of a definition (mangled name) and
142    the originating source (the corresponding AST file) name.
143    :type extdef_map_lines: Iterator of str.
144    :returns: Mangled name - AST file pairs.
145    :rtype: List of (str, str) tuples.
146    """
147
148    mangled_to_asts = defaultdict(set)
149
150    for line in extdef_map_lines:
151        mangled_name, ast_file = line.strip().split(' ', 1)
152        mangled_to_asts[mangled_name].add(ast_file)
153
154    mangled_ast_pairs = []
155
156    for mangled_name, ast_files in mangled_to_asts.items():
157        if len(ast_files) == 1:
158            mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
159
160    return mangled_ast_pairs
161
162
163def merge_ctu_extdef_maps(ctudir):
164    """ Merge individual external definition maps into a global one.
165
166    As the collect phase runs parallel on multiple threads, all compilation
167    units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
168    These definition maps contain the mangled names and the source
169    (AST generated from the source) which had their definition.
170    These files should be merged at the end into a global map file:
171    CTU_EXTDEF_MAP_FILENAME."""
172
173    def generate_extdef_map_lines(extdefmap_dir):
174        """ Iterate over all lines of input files in a determined order. """
175
176        files = glob.glob(os.path.join(extdefmap_dir, '*'))
177        files.sort()
178        for filename in files:
179            with open(filename, 'r') as in_file:
180                for line in in_file:
181                    yield line
182
183    def write_global_map(arch, mangled_ast_pairs):
184        """ Write (mangled name, ast file) pairs into final file. """
185
186        extern_defs_map_file = os.path.join(ctudir, arch,
187                                           CTU_EXTDEF_MAP_FILENAME)
188        with open(extern_defs_map_file, 'w') as out_file:
189            for mangled_name, ast_file in mangled_ast_pairs:
190                out_file.write('%s %s\n' % (mangled_name, ast_file))
191
192    triple_arches = glob.glob(os.path.join(ctudir, '*'))
193    for triple_path in triple_arches:
194        if os.path.isdir(triple_path):
195            triple_arch = os.path.basename(triple_path)
196            extdefmap_dir = os.path.join(ctudir, triple_arch,
197                                     CTU_TEMP_DEFMAP_FOLDER)
198
199            extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
200            mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
201            write_global_map(triple_arch, mangled_ast_pairs)
202
203            # Remove all temporary files
204            shutil.rmtree(extdefmap_dir, ignore_errors=True)
205
206
207def run_analyzer_parallel(args):
208    """ Runs the analyzer against the given compilation database. """
209
210    def exclude(filename):
211        """ Return true when any excluded directory prefix the filename. """
212        return any(re.match(r'^' + directory, filename)
213                   for directory in args.excludes)
214
215    consts = {
216        'clang': args.clang,
217        'output_dir': args.output,
218        'output_format': args.output_format,
219        'output_failures': args.output_failures,
220        'direct_args': analyzer_params(args),
221        'force_debug': args.force_debug,
222        'ctu': get_ctu_config_from_args(args)
223    }
224
225    logging.debug('run analyzer against compilation database')
226    with open(args.cdb, 'r') as handle:
227        generator = (dict(cmd, **consts)
228                     for cmd in json.load(handle) if not exclude(cmd['file']))
229        # when verbose output requested execute sequentially
230        pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
231        for current in pool.imap_unordered(run, generator):
232            if current is not None:
233                # display error message from the static analyzer
234                for line in current['error_output']:
235                    logging.info(line.rstrip())
236        pool.close()
237        pool.join()
238
239
240def govern_analyzer_runs(args):
241    """ Governs multiple runs in CTU mode or runs once in normal mode. """
242
243    ctu_config = get_ctu_config_from_args(args)
244    # If we do a CTU collect (1st phase) we remove all previous collection
245    # data first.
246    if ctu_config.collect:
247        shutil.rmtree(ctu_config.dir, ignore_errors=True)
248
249    # If the user asked for a collect (1st) and analyze (2nd) phase, we do an
250    # all-in-one run where we deliberately remove collection data before and
251    # also after the run. If the user asks only for a single phase data is
252    # left so multiple analyze runs can use the same data gathered by a single
253    # collection run.
254    if ctu_config.collect and ctu_config.analyze:
255        # CTU strings are coming from args.ctu_dir and extdef_map_cmd,
256        # so we can leave it empty
257        args.ctu_phases = CtuConfig(collect=True, analyze=False,
258                                    dir='', extdef_map_cmd='')
259        run_analyzer_parallel(args)
260        merge_ctu_extdef_maps(ctu_config.dir)
261        args.ctu_phases = CtuConfig(collect=False, analyze=True,
262                                    dir='', extdef_map_cmd='')
263        run_analyzer_parallel(args)
264        shutil.rmtree(ctu_config.dir, ignore_errors=True)
265    else:
266        # Single runs (collect or analyze) are launched from here.
267        run_analyzer_parallel(args)
268        if ctu_config.collect:
269            merge_ctu_extdef_maps(ctu_config.dir)
270
271
272def setup_environment(args):
273    """ Set up environment for build command to interpose compiler wrapper. """
274
275    environment = dict(os.environ)
276    environment.update(wrapper_environment(args))
277    environment.update({
278        'CC': COMPILER_WRAPPER_CC,
279        'CXX': COMPILER_WRAPPER_CXX,
280        'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
281        'ANALYZE_BUILD_REPORT_DIR': args.output,
282        'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
283        'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
284        'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
285        'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
286        'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
287    })
288    return environment
289
290
291@command_entry_point
292def analyze_compiler_wrapper():
293    """ Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
294
295    return compiler_wrapper(analyze_compiler_wrapper_impl)
296
297
298def analyze_compiler_wrapper_impl(result, execution):
299    """ Implements analyzer compiler wrapper functionality. """
300
301    # don't run analyzer when compilation fails. or when it's not requested.
302    if result or not os.getenv('ANALYZE_BUILD_CLANG'):
303        return
304
305    # check is it a compilation?
306    compilation = split_command(execution.cmd)
307    if compilation is None:
308        return
309    # collect the needed parameters from environment, crash when missing
310    parameters = {
311        'clang': os.getenv('ANALYZE_BUILD_CLANG'),
312        'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
313        'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
314        'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
315        'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
316                                 '').split(' '),
317        'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
318        'directory': execution.cwd,
319        'command': [execution.cmd[0], '-c'] + compilation.flags,
320        'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
321    }
322    # call static analyzer against the compilation
323    for source in compilation.files:
324        parameters.update({'file': source})
325        logging.debug('analyzer parameters %s', parameters)
326        current = run(parameters)
327        # display error message from the static analyzer
328        if current is not None:
329            for line in current['error_output']:
330                logging.info(line.rstrip())
331
332
333@contextlib.contextmanager
334def report_directory(hint, keep):
335    """ Responsible for the report directory.
336
337    hint -- could specify the parent directory of the output directory.
338    keep -- a boolean value to keep or delete the empty report directory. """
339
340    stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
341    stamp = datetime.datetime.now().strftime(stamp_format)
342    parent_dir = os.path.abspath(hint)
343    if not os.path.exists(parent_dir):
344        os.makedirs(parent_dir)
345    name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
346
347    logging.info('Report directory created: %s', name)
348
349    try:
350        yield name
351    finally:
352        if os.listdir(name):
353            msg = "Run 'scan-view %s' to examine bug reports."
354            keep = True
355        else:
356            if keep:
357                msg = "Report directory '%s' contains no report, but kept."
358            else:
359                msg = "Removing directory '%s' because it contains no report."
360        logging.warning(msg, name)
361
362        if not keep:
363            os.rmdir(name)
364
365
366def analyzer_params(args):
367    """ A group of command line arguments can mapped to command
368    line arguments of the analyzer. This method generates those. """
369
370    result = []
371
372    if args.store_model:
373        result.append('-analyzer-store={0}'.format(args.store_model))
374    if args.constraints_model:
375        result.append('-analyzer-constraints={0}'.format(
376            args.constraints_model))
377    if args.internal_stats:
378        result.append('-analyzer-stats')
379    if args.analyze_headers:
380        result.append('-analyzer-opt-analyze-headers')
381    if args.stats:
382        result.append('-analyzer-checker=debug.Stats')
383    if args.maxloop:
384        result.extend(['-analyzer-max-loop', str(args.maxloop)])
385    if args.output_format:
386        result.append('-analyzer-output={0}'.format(args.output_format))
387    if args.analyzer_config:
388        result.extend(['-analyzer-config', args.analyzer_config])
389    if args.verbose >= 4:
390        result.append('-analyzer-display-progress')
391    if args.plugins:
392        result.extend(prefix_with('-load', args.plugins))
393    if args.enable_checker:
394        checkers = ','.join(args.enable_checker)
395        result.extend(['-analyzer-checker', checkers])
396    if args.disable_checker:
397        checkers = ','.join(args.disable_checker)
398        result.extend(['-analyzer-disable-checker', checkers])
399
400    return prefix_with('-Xclang', result)
401
402
403def require(required):
404    """ Decorator for checking the required values in state.
405
406    It checks the required attributes in the passed state and stop when
407    any of those is missing. """
408
409    def decorator(function):
410        @functools.wraps(function)
411        def wrapper(*args, **kwargs):
412            for key in required:
413                if key not in args[0]:
414                    raise KeyError('{0} not passed to {1}'.format(
415                        key, function.__name__))
416
417            return function(*args, **kwargs)
418
419        return wrapper
420
421    return decorator
422
423
424@require(['command',  # entry from compilation database
425          'directory',  # entry from compilation database
426          'file',  # entry from compilation database
427          'clang',  # clang executable name (and path)
428          'direct_args',  # arguments from command line
429          'force_debug',  # kill non debug macros
430          'output_dir',  # where generated report files shall go
431          'output_format',  # it's 'plist', 'html', both or plist-multi-file
432          'output_failures',  # generate crash reports or not
433          'ctu'])  # ctu control options
434def run(opts):
435    """ Entry point to run (or not) static analyzer against a single entry
436    of the compilation database.
437
438    This complex task is decomposed into smaller methods which are calling
439    each other in chain. If the analysis is not possible the given method
440    just return and break the chain.
441
442    The passed parameter is a python dictionary. Each method first check
443    that the needed parameters received. (This is done by the 'require'
444    decorator. It's like an 'assert' to check the contract between the
445    caller and the called method.) """
446
447    try:
448        command = opts.pop('command')
449        command = command if isinstance(command, list) else decode(command)
450        logging.debug("Run analyzer against '%s'", command)
451        opts.update(classify_parameters(command))
452
453        return arch_check(opts)
454    except Exception:
455        logging.error("Problem occurred during analysis.", exc_info=1)
456        return None
457
458
459@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
460          'error_output', 'exit_code'])
461def report_failure(opts):
462    """ Create report when analyzer failed.
463
464    The major report is the preprocessor output. The output filename generated
465    randomly. The compiler output also captured into '.stderr.txt' file.
466    And some more execution context also saved into '.info.txt' file. """
467
468    def extension():
469        """ Generate preprocessor file extension. """
470
471        mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
472        return mapping.get(opts['language'], '.i')
473
474    def destination():
475        """ Creates failures directory if not exits yet. """
476
477        failures_dir = os.path.join(opts['output_dir'], 'failures')
478        if not os.path.isdir(failures_dir):
479            os.makedirs(failures_dir)
480        return failures_dir
481
482    # Classify error type: when Clang terminated by a signal it's a 'Crash'.
483    # (python subprocess Popen.returncode is negative when child terminated
484    # by signal.) Everything else is 'Other Error'.
485    error = 'crash' if opts['exit_code'] < 0 else 'other_error'
486    # Create preprocessor output file name. (This is blindly following the
487    # Perl implementation.)
488    (handle, name) = tempfile.mkstemp(suffix=extension(),
489                                      prefix='clang_' + error + '_',
490                                      dir=destination())
491    os.close(handle)
492    # Execute Clang again, but run the syntax check only.
493    cwd = opts['directory']
494    cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \
495        [opts['file'], '-o', name]
496    try:
497        cmd = get_arguments(cmd, cwd)
498        run_command(cmd, cwd=cwd)
499    except subprocess.CalledProcessError:
500        pass
501    except ClangErrorException:
502        pass
503    # write general information about the crash
504    with open(name + '.info.txt', 'w') as handle:
505        handle.write(opts['file'] + os.linesep)
506        handle.write(error.title().replace('_', ' ') + os.linesep)
507        handle.write(' '.join(cmd) + os.linesep)
508        handle.write(' '.join(os.uname()) + os.linesep)
509        handle.write(get_version(opts['clang']))
510        handle.close()
511    # write the captured output too
512    with open(name + '.stderr.txt', 'w') as handle:
513        handle.writelines(opts['error_output'])
514        handle.close()
515
516
517@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
518          'output_format'])
519def run_analyzer(opts, continuation=report_failure):
520    """ It assembles the analysis command line and executes it. Capture the
521    output of the analysis and returns with it. If failure reports are
522    requested, it calls the continuation to generate it. """
523
524    def target():
525        """ Creates output file name for reports. """
526        if opts['output_format'] in {
527                'plist',
528                'plist-html',
529                'plist-multi-file'}:
530            (handle, name) = tempfile.mkstemp(prefix='report-',
531                                              suffix='.plist',
532                                              dir=opts['output_dir'])
533            os.close(handle)
534            return name
535        return opts['output_dir']
536
537    try:
538        cwd = opts['directory']
539        cmd = get_arguments([opts['clang'], '--analyze'] +
540                            opts['direct_args'] + opts['flags'] +
541                            [opts['file'], '-o', target()],
542                            cwd)
543        output = run_command(cmd, cwd=cwd)
544        return {'error_output': output, 'exit_code': 0}
545    except subprocess.CalledProcessError as ex:
546        result = {'error_output': ex.output, 'exit_code': ex.returncode}
547        if opts.get('output_failures', False):
548            opts.update(result)
549            continuation(opts)
550        return result
551    except ClangErrorException as ex:
552        result = {'error_output': ex.error, 'exit_code': 0}
553        if opts.get('output_failures', False):
554            opts.update(result)
555            continuation(opts)
556        return result
557
558
559def extdef_map_list_src_to_ast(extdef_src_list):
560    """ Turns textual external definition map list with source files into an
561    external definition map list with ast files. """
562
563    extdef_ast_list = []
564    for extdef_src_txt in extdef_src_list:
565        mangled_name, path = extdef_src_txt.split(" ", 1)
566        # Normalize path on windows as well
567        path = os.path.splitdrive(path)[1]
568        # Make relative path out of absolute
569        path = path[1:] if path[0] == os.sep else path
570        ast_path = os.path.join("ast", path + ".ast")
571        extdef_ast_list.append(mangled_name + " " + ast_path)
572    return extdef_ast_list
573
574
575@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
576def ctu_collect_phase(opts):
577    """ Preprocess source by generating all data needed by CTU analysis. """
578
579    def generate_ast(triple_arch):
580        """ Generates ASTs for the current compilation command. """
581
582        args = opts['direct_args'] + opts['flags']
583        ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
584                                       os.path.realpath(opts['file'])[1:] +
585                                       '.ast')
586        ast_path = os.path.abspath(ast_joined_path)
587        ast_dir = os.path.dirname(ast_path)
588        if not os.path.isdir(ast_dir):
589            try:
590                os.makedirs(ast_dir)
591            except OSError:
592                # In case an other process already created it.
593                pass
594        ast_command = [opts['clang'], '-emit-ast']
595        ast_command.extend(args)
596        ast_command.append('-w')
597        ast_command.append(opts['file'])
598        ast_command.append('-o')
599        ast_command.append(ast_path)
600        logging.debug("Generating AST using '%s'", ast_command)
601        run_command(ast_command, cwd=opts['directory'])
602
603    def map_extdefs(triple_arch):
604        """ Generate external definition map file for the current source. """
605
606        args = opts['direct_args'] + opts['flags']
607        extdefmap_command = [opts['ctu'].extdef_map_cmd]
608        extdefmap_command.append(opts['file'])
609        extdefmap_command.append('--')
610        extdefmap_command.extend(args)
611        logging.debug("Generating external definition map using '%s'",
612                      extdefmap_command)
613        extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
614        extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
615        extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
616                                             CTU_TEMP_DEFMAP_FOLDER)
617        if not os.path.isdir(extern_defs_map_folder):
618            try:
619                os.makedirs(extern_defs_map_folder)
620            except OSError:
621                # In case an other process already created it.
622                pass
623        if extdef_ast_list:
624            with tempfile.NamedTemporaryFile(mode='w',
625                                             dir=extern_defs_map_folder,
626                                             delete=False) as out_file:
627                out_file.write("\n".join(extdef_ast_list) + "\n")
628
629    cwd = opts['directory']
630    cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
631        + [opts['file']]
632    triple_arch = get_triple_arch(cmd, cwd)
633    generate_ast(triple_arch)
634    map_extdefs(triple_arch)
635
636
637@require(['ctu'])
638def dispatch_ctu(opts, continuation=run_analyzer):
639    """ Execute only one phase of 2 phases of CTU if needed. """
640
641    ctu_config = opts['ctu']
642
643    if ctu_config.collect or ctu_config.analyze:
644        assert ctu_config.collect != ctu_config.analyze
645        if ctu_config.collect:
646            return ctu_collect_phase(opts)
647        if ctu_config.analyze:
648            cwd = opts['directory']
649            cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
650                + opts['flags'] + [opts['file']]
651            triarch = get_triple_arch(cmd, cwd)
652            ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
653                           'experimental-enable-naive-ctu-analysis=true']
654            analyzer_options = prefix_with('-analyzer-config', ctu_options)
655            direct_options = prefix_with('-Xanalyzer', analyzer_options)
656            opts['direct_args'].extend(direct_options)
657
658    return continuation(opts)
659
660
661@require(['flags', 'force_debug'])
662def filter_debug_flags(opts, continuation=dispatch_ctu):
663    """ Filter out nondebug macros when requested. """
664
665    if opts.pop('force_debug'):
666        # lazy implementation just append an undefine macro at the end
667        opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
668
669    return continuation(opts)
670
671
672@require(['language', 'compiler', 'file', 'flags'])
673def language_check(opts, continuation=filter_debug_flags):
674    """ Find out the language from command line parameters or file name
675    extension. The decision also influenced by the compiler invocation. """
676
677    accepted = frozenset({
678        'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
679        'c++-cpp-output', 'objective-c-cpp-output'
680    })
681
682    # language can be given as a parameter...
683    language = opts.pop('language')
684    compiler = opts.pop('compiler')
685    # ... or find out from source file extension
686    if language is None and compiler is not None:
687        language = classify_source(opts['file'], compiler == 'c')
688
689    if language is None:
690        logging.debug('skip analysis, language not known')
691        return None
692    elif language not in accepted:
693        logging.debug('skip analysis, language not supported')
694        return None
695    else:
696        logging.debug('analysis, language: %s', language)
697        opts.update({'language': language,
698                     'flags': ['-x', language] + opts['flags']})
699        return continuation(opts)
700
701
702@require(['arch_list', 'flags'])
703def arch_check(opts, continuation=language_check):
704    """ Do run analyzer through one of the given architectures. """
705
706    disabled = frozenset({'ppc', 'ppc64'})
707
708    received_list = opts.pop('arch_list')
709    if received_list:
710        # filter out disabled architectures and -arch switches
711        filtered_list = [a for a in received_list if a not in disabled]
712        if filtered_list:
713            # There should be only one arch given (or the same multiple
714            # times). If there are multiple arch are given and are not
715            # the same, those should not change the pre-processing step.
716            # But that's the only pass we have before run the analyzer.
717            current = filtered_list.pop()
718            logging.debug('analysis, on arch: %s', current)
719
720            opts.update({'flags': ['-arch', current] + opts['flags']})
721            return continuation(opts)
722        else:
723            logging.debug('skip analysis, found not supported arch')
724            return None
725    else:
726        logging.debug('analysis, on default arch')
727        return continuation(opts)
728
729
730# To have good results from static analyzer certain compiler options shall be
731# omitted. The compiler flag filtering only affects the static analyzer run.
732#
733# Keys are the option name, value number of options to skip
734IGNORED_FLAGS = {
735    '-c': 0,  # compile option will be overwritten
736    '-fsyntax-only': 0,  # static analyzer option will be overwritten
737    '-o': 1,  # will set up own output file
738    # flags below are inherited from the perl implementation.
739    '-g': 0,
740    '-save-temps': 0,
741    '-install_name': 1,
742    '-exported_symbols_list': 1,
743    '-current_version': 1,
744    '-compatibility_version': 1,
745    '-init': 1,
746    '-e': 1,
747    '-seg1addr': 1,
748    '-bundle_loader': 1,
749    '-multiply_defined': 1,
750    '-sectorder': 3,
751    '--param': 1,
752    '--serialize-diagnostics': 1
753}
754
755
756def classify_parameters(command):
757    """ Prepare compiler flags (filters some and add others) and take out
758    language (-x) and architecture (-arch) flags for future processing. """
759
760    result = {
761        'flags': [],  # the filtered compiler flags
762        'arch_list': [],  # list of architecture flags
763        'language': None,  # compilation language, None, if not specified
764        'compiler': compiler_language(command)  # 'c' or 'c++'
765    }
766
767    # iterate on the compile options
768    args = iter(command[1:])
769    for arg in args:
770        # take arch flags into a separate basket
771        if arg == '-arch':
772            result['arch_list'].append(next(args))
773        # take language
774        elif arg == '-x':
775            result['language'] = next(args)
776        # parameters which looks source file are not flags
777        elif re.match(r'^[^-].+', arg) and classify_source(arg):
778            pass
779        # ignore some flags
780        elif arg in IGNORED_FLAGS:
781            count = IGNORED_FLAGS[arg]
782            for _ in range(count):
783                next(args)
784        # we don't care about extra warnings, but we should suppress ones
785        # that we don't want to see.
786        elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
787            pass
788        # and consider everything else as compilation flag.
789        else:
790            result['flags'].append(arg)
791
792    return result
793