1# DExTer : Debugging Experience Tester
2# ~~~~~~   ~         ~~         ~   ~~
3#
4# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5# See https://llvm.org/LICENSE.txt for license information.
6# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7"""Test tool."""
8
9import math
10import os
11import csv
12import pickle
13import shutil
14
15from dex.builder import run_external_build_script
16from dex.command.ParseCommand import get_command_infos
17from dex.debugger.Debuggers import run_debugger_subprocess
18from dex.debugger.DebuggerControllers.DefaultController import DefaultController
19from dex.debugger.DebuggerControllers.ConditionalController import ConditionalController
20from dex.dextIR.DextIR import DextIR
21from dex.heuristic import Heuristic
22from dex.tools import TestToolBase
23from dex.utils.Exceptions import DebuggerException
24from dex.utils.Exceptions import BuildScriptException, HeuristicException
25from dex.utils.PrettyOutputBase import Stream
26from dex.utils.ReturnCode import ReturnCode
27from dex.dextIR import BuilderIR
28
29
30class TestCase(object):
31    def __init__(self, context, name, heuristic, error):
32        self.context = context
33        self.name = name
34        self.heuristic = heuristic
35        self.error = error
36
37    @property
38    def penalty(self):
39        try:
40            return self.heuristic.penalty
41        except AttributeError:
42            return float('nan')
43
44    @property
45    def max_penalty(self):
46        try:
47            return self.heuristic.max_penalty
48        except AttributeError:
49            return float('nan')
50
51    @property
52    def score(self):
53        try:
54            return self.heuristic.score
55        except AttributeError:
56            return float('nan')
57
58    def __str__(self):
59        if self.error and self.context.options.verbose:
60            verbose_error = str(self.error)
61        else:
62            verbose_error = ''
63
64        if self.error:
65            script_error = (' : {}'.format(
66                self.error.script_error.splitlines()[0]) if getattr(
67                    self.error, 'script_error', None) else '')
68
69            error = ' [{}{}]'.format(
70                str(self.error).splitlines()[0], script_error)
71        else:
72            error = ''
73
74        try:
75            summary = self.heuristic.summary_string
76        except AttributeError:
77            summary = '<r>nan/nan (nan)</>'
78        return '{}: {}{}\n{}'.format(self.name, summary, error, verbose_error)
79
80
81class Tool(TestToolBase):
82    """Run the specified DExTer test(s) with the specified compiler and linker
83    options and produce a dextIR file as well as printing out the debugging
84    experience score calculated by the DExTer heuristic.
85    """
86
87    def __init__(self, *args, **kwargs):
88        super(Tool, self).__init__(*args, **kwargs)
89        self._test_cases = []
90
91    @property
92    def name(self):
93        return 'DExTer test'
94
95    def add_tool_arguments(self, parser, defaults):
96        parser.add_argument('--fail-lt',
97                            type=float,
98                            default=0.0, # By default TEST always succeeds.
99                            help='exit with status FAIL(2) if the test result'
100                                ' is less than this value.',
101                            metavar='<float>')
102        parser.add_argument('--calculate-average',
103                            action="store_true",
104                            help='calculate the average score of every test run')
105        super(Tool, self).add_tool_arguments(parser, defaults)
106
107    def _build_test_case(self):
108        """Build an executable from the test source with the given --builder
109        script and flags (--cflags, --ldflags) in the working directory.
110        Or, if the --binary option has been given, copy the executable provided
111        into the working directory and rename it to match the --builder output.
112        """
113
114        options = self.context.options
115        if options.binary:
116            # Copy user's binary into the tmp working directory
117            shutil.copy(options.binary, options.executable)
118            builderIR = BuilderIR(
119                name='binary',
120                cflags=[options.binary],
121                ldflags='')
122        else:
123            options = self.context.options
124            compiler_options = [options.cflags for _ in options.source_files]
125            linker_options = options.ldflags
126            _, _, builderIR = run_external_build_script(
127                self.context,
128                script_path=self.build_script,
129                source_files=options.source_files,
130                compiler_options=compiler_options,
131                linker_options=linker_options,
132                executable_file=options.executable)
133        return builderIR
134
135    def _init_debugger_controller(self):
136        step_collection = DextIR(
137            executable_path=self.context.options.executable,
138            source_paths=self.context.options.source_files,
139            dexter_version=self.context.version)
140
141        step_collection.commands, new_source_files = get_command_infos(
142            self.context.options.test_files, self.context.options.source_root_dir)
143
144        self.context.options.source_files.extend(list(new_source_files))
145
146        if 'DexLimitSteps' in step_collection.commands:
147            debugger_controller = ConditionalController(self.context, step_collection)
148        else:
149            debugger_controller = DefaultController(self.context, step_collection)
150
151        return debugger_controller
152
153    def _get_steps(self, builderIR):
154        """Generate a list of debugger steps from a test case.
155        """
156        debugger_controller = self._init_debugger_controller()
157        debugger_controller = run_debugger_subprocess(
158            debugger_controller, self.context.working_directory.path)
159        steps = debugger_controller.step_collection
160        steps.builder = builderIR
161        return steps
162
163    def _get_results_basename(self, test_name):
164        def splitall(x):
165            while len(x) > 0:
166              x, y = os.path.split(x)
167              yield y
168        all_components = reversed([x for x in splitall(test_name)])
169        return '_'.join(all_components)
170
171    def _get_results_path(self, test_name):
172        """Returns the path to the test results directory for the test denoted
173        by test_name.
174        """
175        return os.path.join(self.context.options.results_directory,
176                            self._get_results_basename(test_name))
177
178    def _get_results_text_path(self, test_name):
179        """Returns path results .txt file for test denoted by test_name.
180        """
181        test_results_path = self._get_results_path(test_name)
182        return '{}.txt'.format(test_results_path)
183
184    def _get_results_pickle_path(self, test_name):
185        """Returns path results .dextIR file for test denoted by test_name.
186        """
187        test_results_path = self._get_results_path(test_name)
188        return '{}.dextIR'.format(test_results_path)
189
190    def _record_steps(self, test_name, steps):
191        """Write out the set of steps out to the test's .txt and .json
192        results file.
193        """
194        output_text_path = self._get_results_text_path(test_name)
195        with open(output_text_path, 'w') as fp:
196            self.context.o.auto(str(steps), stream=Stream(fp))
197
198        output_dextIR_path = self._get_results_pickle_path(test_name)
199        with open(output_dextIR_path, 'wb') as fp:
200            pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
201
202    def _record_score(self, test_name, heuristic):
203        """Write out the test's heuristic score to the results .txt file.
204        """
205        output_text_path = self._get_results_text_path(test_name)
206        with open(output_text_path, 'a') as fp:
207            self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
208
209    def _record_test_and_display(self, test_case):
210        """Output test case to o stream and record test case internally for
211        handling later.
212        """
213        self.context.o.auto(test_case)
214        self._test_cases.append(test_case)
215
216    def _record_failed_test(self, test_name, exception):
217        """Instantiate a failed test case with failure exception and
218        store internally.
219        """
220        test_case = TestCase(self.context, test_name, None, exception)
221        self._record_test_and_display(test_case)
222
223    def _record_successful_test(self, test_name, steps, heuristic):
224        """Instantiate a successful test run, store test for handling later.
225        Display verbose output for test case if required.
226        """
227        test_case = TestCase(self.context, test_name, heuristic, None)
228        self._record_test_and_display(test_case)
229        if self.context.options.verbose:
230            self.context.o.auto('\n{}\n'.format(steps))
231            self.context.o.auto(heuristic.verbose_output)
232
233    def _run_test(self, test_name):
234        """Attempt to run test files specified in options.source_files. Store
235        result internally in self._test_cases.
236        """
237        try:
238            builderIR = self._build_test_case()
239            steps = self._get_steps(builderIR)
240            self._record_steps(test_name, steps)
241            heuristic_score = Heuristic(self.context, steps)
242            self._record_score(test_name, heuristic_score)
243        except (BuildScriptException, DebuggerException,
244                HeuristicException) as e:
245            self._record_failed_test(test_name, e)
246            return
247
248        self._record_successful_test(test_name, steps, heuristic_score)
249        return
250
251    def _handle_results(self) -> ReturnCode:
252        return_code = ReturnCode.OK
253        options = self.context.options
254
255        if not options.verbose:
256            self.context.o.auto('\n')
257
258        if options.calculate_average:
259            # Calculate and print the average score
260            score_sum = 0.0
261            num_tests = 0
262            for test_case in self._test_cases:
263                score = test_case.score
264                if not test_case.error and not math.isnan(score):
265                    score_sum += test_case.score
266                    num_tests += 1
267
268            if num_tests != 0:
269                print("@avg: ({:.4f})".format(score_sum/num_tests))
270
271        summary_path = os.path.join(options.results_directory, 'summary.csv')
272        with open(summary_path, mode='w', newline='') as fp:
273            writer = csv.writer(fp, delimiter=',')
274            writer.writerow(['Test Case', 'Score', 'Error'])
275
276            for test_case in self._test_cases:
277                if (test_case.score < options.fail_lt or
278                        test_case.error is not None):
279                    return_code = ReturnCode.FAIL
280
281                writer.writerow([
282                    test_case.name, '{:.4f}'.format(test_case.score),
283                    test_case.error
284                ])
285
286        return return_code
287