1# This Source Code Form is subject to the terms of the Mozilla Public
2# License, v. 2.0. If a copy of the MPL was not distributed with this
3# file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5from __future__ import absolute_import, division, print_function
6
7import six
8import json
9import os
10import re
11import shutil
12import sys
13from abc import ABCMeta, abstractmethod, abstractproperty
14from argparse import ArgumentParser
15from collections import defaultdict
16
17from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
18from mozprocess import ProcessHandler
19
20here = os.path.abspath(os.path.dirname(__file__))
21build = MozbuildObject.from_environment(cwd=here)
22
23JSSHELL_NOT_FOUND = """
24Could not detect a JS shell. Either make sure you have a non-artifact build
25with `ac_add_options --enable-js-shell` or specify it with `--binary`.
26""".strip()
27
28
29@six.add_metaclass(ABCMeta)
30class Benchmark(object):
31    lower_is_better = True
32    should_alert = True
33
34    def __init__(self, shell, args=None, shell_name=None):
35        self.shell = shell
36        self.args = args
37        self.shell_name = shell_name
38
39    @abstractproperty
40    def unit(self):
41        """Returns the unit of measurement of the benchmark."""
42
43    @abstractproperty
44    def name(self):
45        """Returns the string name of the benchmark."""
46
47    @abstractproperty
48    def path(self):
49        """Return the path to the benchmark relative to topsrcdir."""
50
51    @abstractmethod
52    def process_line(self, line):
53        """Process a line of stdout from the benchmark."""
54
55    @abstractmethod
56    def collect_results(self):
57        """Build the result after the process has finished."""
58
59    @property
60    def command(self):
61        """Returns the command to run as a list."""
62        cmd = [self.shell]
63        if self.args:
64            cmd += self.args
65        return cmd
66
67    @property
68    def version(self):
69        if self._version:
70            return self._version
71
72        with open(os.path.join(self.path, "VERSION"), "r") as fh:
73            self._version = fh.read().strip("\r\n\r\n \t")
74        return self._version
75
76    def reset(self):
77        """Resets state between runs."""
78        name = self.name
79        if self.shell_name:
80            name = "{}-{}".format(name, self.shell_name)
81
82        self.perfherder_data = {
83            "framework": {
84                "name": "js-bench",
85            },
86            "suites": [
87                {
88                    "lowerIsBetter": self.lower_is_better,
89                    "name": name,
90                    "shouldAlert": self.should_alert,
91                    "subtests": [],
92                    "unit": self.unit,
93                    "value": None,
94                },
95            ],
96        }
97        self.suite = self.perfherder_data["suites"][0]
98
99    def _provision_benchmark_script(self):
100        if os.path.isdir(self.path):
101            return
102
103        # Some benchmarks may have been downloaded from a fetch task, make
104        # sure they get copied over.
105        fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
106        if fetches_dir and os.path.isdir(fetches_dir):
107            fetchdir = os.path.join(fetches_dir, self.name)
108            if os.path.isdir(fetchdir):
109                shutil.copytree(fetchdir, self.path)
110
111    def run(self):
112        self.reset()
113
114        # Update the environment variables
115        env = os.environ.copy()
116
117        process_args = {
118            "cmd": self.command,
119            "cwd": self.path,
120            "onFinish": self.collect_results,
121            "processOutputLine": self.process_line,
122            "stream": sys.stdout,
123            "env": env,
124            "universal_newlines": True,
125        }
126        proc = ProcessHandler(**process_args)
127        proc.run()
128        return proc.wait()
129
130
131class RunOnceBenchmark(Benchmark):
132    def collect_results(self):
133        bench_total = 0
134        # NOTE: for this benchmark we run the test once, so we have a single value array
135        for bench, scores in self.scores.items():
136            for score, values in scores.items():
137                test_name = "{}-{}".format(self.name, score)
138                # pylint --py3k W1619
139                mean = sum(values) / len(values)
140                self.suite["subtests"].append({"name": test_name, "value": mean})
141                bench_total += int(sum(values))
142        self.suite["value"] = bench_total
143
144
145class Ares6(Benchmark):
146    name = "ares6"
147    path = os.path.join("third_party", "webkit", "PerformanceTests", "ARES-6")
148    unit = "ms"
149
150    @property
151    def command(self):
152        cmd = super(Ares6, self).command
153        return cmd + ["cli.js"]
154
155    def reset(self):
156        super(Ares6, self).reset()
157
158        self.bench_name = None
159        self.last_summary = None
160        # Scores are of the form:
161        # {<bench_name>: {<score_name>: [<values>]}}
162        self.scores = defaultdict(lambda: defaultdict(list))
163
164    def _try_find_score(self, score_name, line):
165        m = re.search(score_name + ":\s*(\d+\.?\d*?) (\+-)?.+", line)
166        if not m:
167            return False
168
169        score = m.group(1)
170        self.scores[self.bench_name][score_name].append(float(score))
171        return True
172
173    def process_line(self, line):
174        m = re.search("Running... (.+) \(.+\)", line)
175        if m:
176            self.bench_name = m.group(1)
177            return
178
179        if self._try_find_score("firstIteration", line):
180            return
181
182        if self._try_find_score("averageWorstCase", line):
183            return
184
185        if self._try_find_score("steadyState", line):
186            return
187
188        m = re.search("summary:\s*(\d+\.?\d*?) (\+-)?.+", line)
189        if m:
190            self.last_summary = float(m.group(1))
191
192    def collect_results(self):
193        for bench, scores in self.scores.items():
194            for score, values in scores.items():
195                # pylint --py3k W1619
196                mean = sum(values) / len(values)
197                test_name = "{}-{}".format(bench, score)
198                self.suite["subtests"].append({"name": test_name, "value": mean})
199
200        if self.last_summary:
201            self.suite["value"] = self.last_summary
202
203
204class SixSpeed(RunOnceBenchmark):
205    name = "six-speed"
206    path = os.path.join("third_party", "webkit", "PerformanceTests", "six-speed")
207    unit = "ms"
208
209    @property
210    def command(self):
211        cmd = super(SixSpeed, self).command
212        return cmd + ["test.js"]
213
214    def reset(self):
215        super(SixSpeed, self).reset()
216
217        # Scores are of the form:
218        # {<bench_name>: {<score_name>: [<values>]}}
219        self.scores = defaultdict(lambda: defaultdict(list))
220
221    def process_line(self, output):
222        m = re.search("(.+): (\d+)", output)
223        if not m:
224            return
225        subtest = m.group(1)
226        score = m.group(2)
227        if subtest not in self.scores[self.name]:
228            self.scores[self.name][subtest] = []
229        self.scores[self.name][subtest].append(int(score))
230
231
232class SunSpider(RunOnceBenchmark):
233    name = "sunspider"
234    path = os.path.join(
235        "third_party", "webkit", "PerformanceTests", "SunSpider", "sunspider-0.9.1"
236    )
237    unit = "ms"
238
239    @property
240    def command(self):
241        cmd = super(SunSpider, self).command
242        return cmd + ["sunspider-standalone-driver.js"]
243
244    def reset(self):
245        super(SunSpider, self).reset()
246
247        # Scores are of the form:
248        # {<bench_name>: {<score_name>: [<values>]}}
249        self.scores = defaultdict(lambda: defaultdict(list))
250
251    def process_line(self, output):
252        m = re.search("(.+): (\d+)", output)
253        if not m:
254            return
255        subtest = m.group(1)
256        score = m.group(2)
257        if subtest not in self.scores[self.name]:
258            self.scores[self.name][subtest] = []
259        self.scores[self.name][subtest].append(int(score))
260
261
262class WebToolingBenchmark(Benchmark):
263    name = "web-tooling-benchmark"
264    path = os.path.join(
265        "third_party", "webkit", "PerformanceTests", "web-tooling-benchmark"
266    )
267    main_js = "cli.js"
268    unit = "score"
269    lower_is_better = False
270    subtests_lower_is_better = False
271
272    @property
273    def command(self):
274        cmd = super(WebToolingBenchmark, self).command
275        return cmd + [self.main_js]
276
277    def reset(self):
278        super(WebToolingBenchmark, self).reset()
279
280        # Scores are of the form:
281        # {<bench_name>: {<score_name>: [<values>]}}
282        self.scores = defaultdict(lambda: defaultdict(list))
283
284    def process_line(self, output):
285        m = re.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output)
286        if not m:
287            return
288        subtest = m.group(1)
289        score = m.group(2)
290        if subtest not in self.scores[self.name]:
291            self.scores[self.name][subtest] = []
292        self.scores[self.name][subtest].append(float(score))
293
294    def collect_results(self):
295        # NOTE: for this benchmark we run the test once, so we have a single value array
296        bench_mean = None
297        for bench, scores in self.scores.items():
298            for score_name, values in scores.items():
299                test_name = "{}-{}".format(self.name, score_name)
300                # pylint --py3k W1619
301                mean = sum(values) / len(values)
302                self.suite["subtests"].append(
303                    {
304                        "lowerIsBetter": self.subtests_lower_is_better,
305                        "name": test_name,
306                        "value": mean,
307                    }
308                )
309                if score_name == "mean":
310                    bench_mean = mean
311        self.suite["value"] = bench_mean
312
313    def run(self):
314        self._provision_benchmark_script()
315        return super(WebToolingBenchmark, self).run()
316
317
318class Octane(RunOnceBenchmark):
319    name = "octane"
320    path = os.path.join("third_party", "webkit", "PerformanceTests", "octane")
321    unit = "score"
322    lower_is_better = False
323
324    @property
325    def command(self):
326        cmd = super(Octane, self).command
327        return cmd + ["run.js"]
328
329    def reset(self):
330        super(Octane, self).reset()
331
332        # Scores are of the form:
333        # {<bench_name>: {<score_name>: [<values>]}}
334        self.scores = defaultdict(lambda: defaultdict(list))
335
336    def process_line(self, output):
337        m = re.search("(.+): (\d+)", output)
338        if not m:
339            return
340        subtest = m.group(1)
341        score = m.group(2)
342        if subtest.startswith("Score"):
343            subtest = "score"
344        if subtest not in self.scores[self.name]:
345            self.scores[self.name][subtest] = []
346        self.scores[self.name][subtest].append(int(score))
347
348    def collect_results(self):
349        bench_score = None
350        # NOTE: for this benchmark we run the test once, so we have a single value array
351        for bench, scores in self.scores.items():
352            for score_name, values in scores.items():
353                test_name = "{}-{}".format(self.name, score_name)
354                # pylint --py3k W1619
355                mean = sum(values) / len(values)
356                self.suite["subtests"].append({"name": test_name, "value": mean})
357                if score_name == "score":
358                    bench_score = mean
359        self.suite["value"] = bench_score
360
361    def run(self):
362        self._provision_benchmark_script()
363        return super(Octane, self).run()
364
365
366all_benchmarks = {
367    "ares6": Ares6,
368    "six-speed": SixSpeed,
369    "sunspider": SunSpider,
370    "web-tooling-benchmark": WebToolingBenchmark,
371    "octane": Octane,
372}
373
374
375def run(benchmark, binary=None, extra_args=None, perfherder=None):
376    if not binary:
377        try:
378            binary = os.path.join(build.bindir, "js" + build.substs["BIN_SUFFIX"])
379        except BuildEnvironmentNotFoundException:
380            binary = None
381
382        if not binary or not os.path.isfile(binary):
383            print(JSSHELL_NOT_FOUND)
384            return 1
385
386    bench = all_benchmarks.get(benchmark)(
387        binary, args=extra_args, shell_name=perfherder
388    )
389    res = bench.run()
390
391    if perfherder:
392        print("PERFHERDER_DATA: {}".format(json.dumps(bench.perfherder_data)))
393    return res
394
395
396def get_parser():
397    parser = ArgumentParser()
398    parser.add_argument(
399        "benchmark",
400        choices=list(all_benchmarks),
401        help="The name of the benchmark to run.",
402    )
403    parser.add_argument(
404        "-b", "--binary", default=None, help="Path to the JS shell binary to use."
405    )
406    parser.add_argument(
407        "--arg",
408        dest="extra_args",
409        action="append",
410        default=None,
411        help="Extra arguments to pass to the JS shell.",
412    )
413    parser.add_argument(
414        "--perfherder",
415        default=None,
416        help="Log PERFHERDER_DATA to stdout using the given suite name.",
417    )
418    return parser
419
420
421def cli(args=sys.argv[1:]):
422    parser = get_parser()
423    args = parser.parser_args(args)
424    return run(**vars(args))
425
426
427if __name__ == "__main__":
428    sys.exit(cli())
429