1# Copyright 2012 the V8 project authors. All rights reserved.
2# Redistribution and use in source and binary forms, with or without
3# modification, are permitted provided that the following conditions are
4# met:
5#
6#     * Redistributions of source code must retain the above copyright
7#       notice, this list of conditions and the following disclaimer.
8#     * Redistributions in binary form must reproduce the above
9#       copyright notice, this list of conditions and the following
10#       disclaimer in the documentation and/or other materials provided
11#       with the distribution.
12#     * Neither the name of Google Inc. nor the names of its
13#       contributors may be used to endorse or promote products derived
14#       from this software without specific prior written permission.
15#
16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28import copy
29import os
30import re
31import shlex
32
33from testrunner.outproc import base as outproc
34from testrunner.local import command
35from testrunner.local import statusfile
36from testrunner.local import utils
37from testrunner.local.variants import ALL_VARIANT_FLAGS
38from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_VARIANT
39from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE
40from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG
41
42
43FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
44
45# Patterns for additional resource files on Android. Files that are not covered
46# by one of the other patterns below will be specified in the resources section.
47RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
48# Pattern to auto-detect files to push on Android for statements like:
49# load("path/to/file.js")
50# d8.file.execute("path/to/file.js")
51LOAD_PATTERN = re.compile(
52    r"(?:execute|load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)")
53# Pattern to auto-detect files to push on Android for statements like:
54# import foobar from "path/to/file.js"
55# import {foo, bar} from "path/to/file.js"
56# export {"foo" as "bar"} from "path/to/file.js"
57MODULE_FROM_RESOURCES_PATTERN = re.compile(
58    r"(?:import|export).*?from\s*\(?['\"]([^'\"]+)['\"]",
59    re.MULTILINE | re.DOTALL)
60# Pattern to detect files to push on Android for statements like:
61# import "path/to/file.js"
62# import("module.mjs").catch()...
63MODULE_IMPORT_RESOURCES_PATTERN = re.compile(
64    r"import\s*\(?['\"]([^'\"]+)['\"]",
65    re.MULTILINE | re.DOTALL)
66# Pattern to detect and strip test262 frontmatter from tests to prevent false
67# positives for MODULE_RESOURCES_PATTERN above.
68TEST262_FRONTMATTER_PATTERN = re.compile(r"/\*---.*?---\*/", re.DOTALL)
69
70TIMEOUT_LONG = "long"
71
72try:
73  cmp             # Python 2
74except NameError:
75  def cmp(x, y):  # Python 3
76    return (x > y) - (x < y)
77
78def read_file_utf8(file):
79  try:                # Python 3
80    with open(file, encoding='utf-8') as f:
81      return f.read()
82  except TypeError:   # Python 2
83    with open(file) as f:
84      return f.read()
85
86class TestCase(object):
87  def __init__(self, suite, path, name, test_config):
88    self.suite = suite        # TestSuite object
89
90    self.path = path          # string, e.g. 'div-mod', 'test-api/foo'
91    self.name = name          # string that identifies test in the status file
92
93    self.variant = None       # name of the used testing variant
94    self.variant_flags = []   # list of strings, flags specific to this test
95
96    # Fields used by the test processors.
97    self.origin = None # Test that this test is subtest of.
98    self.processor = None # Processor that created this subtest.
99    self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
100    self.keep_output = False # Can output of this test be dropped
101
102    # Test config contains information needed to build the command.
103    self._test_config = test_config
104    self._random_seed = None # Overrides test config value if not None
105
106    # Outcomes
107    self._statusfile_outcomes = None
108    self._expected_outcomes = None
109    self._checked_flag_contradictions = False
110    self._statusfile_flags = None
111    self.expected_failure_reason = None
112
113    self._prepare_outcomes()
114
115  def create_subtest(self, processor, subtest_id, variant=None, flags=None,
116                     keep_output=False, random_seed=None):
117    subtest = copy.copy(self)
118    subtest.origin = self
119    subtest.processor = processor
120    subtest.procid += '.%s' % subtest_id
121    subtest.keep_output |= keep_output
122    if random_seed:
123      subtest._random_seed = random_seed
124    if flags:
125      subtest.variant_flags = subtest.variant_flags + flags
126    if variant is not None:
127      assert self.variant is None
128      subtest.variant = variant
129      subtest._prepare_outcomes()
130    return subtest
131
132  def _prepare_outcomes(self, force_update=True):
133    if force_update or self._statusfile_outcomes is None:
134      def is_flag(outcome):
135        return outcome.startswith('--')
136      def not_flag(outcome):
137        return not is_flag(outcome)
138
139      outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
140      self._statusfile_outcomes = list(filter(not_flag, outcomes))
141      self._statusfile_flags = list(filter(is_flag, outcomes))
142    self._expected_outcomes = (
143      self._parse_status_file_outcomes(self._statusfile_outcomes))
144
145  def _parse_status_file_outcomes(self, outcomes):
146    if (statusfile.FAIL_SLOPPY in outcomes and
147        '--use-strict' not in self.variant_flags):
148      return outproc.OUTCOMES_FAIL
149
150    expected_outcomes = []
151    if (statusfile.FAIL in outcomes or
152        statusfile.FAIL_OK in outcomes):
153      expected_outcomes.append(statusfile.FAIL)
154    if statusfile.CRASH in outcomes:
155      expected_outcomes.append(statusfile.CRASH)
156
157    # Do not add PASS if there is nothing else. Empty outcomes are converted to
158    # the global [PASS].
159    if expected_outcomes and statusfile.PASS in outcomes:
160      expected_outcomes.append(statusfile.PASS)
161
162    # Avoid creating multiple instances of a list with a single FAIL.
163    if expected_outcomes == outproc.OUTCOMES_FAIL:
164      return outproc.OUTCOMES_FAIL
165    return expected_outcomes or outproc.OUTCOMES_PASS
166
167  def allow_timeouts(self):
168    if self.expected_outcomes == outproc.OUTCOMES_PASS:
169      self._expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
170    elif self.expected_outcomes == outproc.OUTCOMES_FAIL:
171      self._expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT
172    elif statusfile.TIMEOUT not in self.expected_outcomes:
173      self._expected_outcomes = (
174          self.expected_outcomes + [statusfile.TIMEOUT])
175
176  def allow_pass(self):
177    if self.expected_outcomes == outproc.OUTCOMES_TIMEOUT:
178      self._expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
179    elif self.expected_outcomes == outproc.OUTCOMES_FAIL:
180      self._expected_outcomes = outproc.OUTCOMES_FAIL_OR_PASS
181    elif statusfile.PASS not in self.expected_outcomes:
182      self._expected_outcomes = (
183          self.expected_outcomes + [statusfile.PASS])
184
185  @property
186  def expected_outcomes(self):
187    def is_flag(maybe_flag):
188      return maybe_flag.startswith("--")  # Best-effort heuristic.
189
190    # Filter to flags, e.g.: ["--foo", "3", "--bar"] -> ["--foo", "--bar"].
191    def filter_flags(normalized_flags):
192      return [f for f in normalized_flags if is_flag(f)];
193
194    def normalize_flag(flag):
195      return flag.replace("_", "-").replace("--no-", "--no")
196
197    def normalize_flags(flags):
198      return [normalize_flag(flag) for flag in filter_flags(flags)]
199
200    # Note this can get it wrong if the flag name starts with the characters
201    # "--no" where "no" is part of the flag name, e.g. "--nobodys-perfect".
202    # In that case the negation "--bodys-perfect" would be returned. This is
203    # a weakness we accept and hope to never run into.
204    def negate_flag(normalized_flag):
205      return ("--" + normalized_flag[4:] if normalized_flag.startswith("--no")
206              else "--no" + normalized_flag[2:])
207
208    def negate_flags(normalized_flags):
209      return [negate_flag(flag) for flag in normalized_flags]
210
211    def has_flag(conflicting_flag, flags):
212      conflicting_flag = normalize_flag(conflicting_flag)
213      if conflicting_flag in flags:
214        return True
215      if conflicting_flag.endswith("*"):
216        return any(flag.startswith(conflicting_flag[:-1]) for flag in flags)
217      return False
218
219    def check_flags(incompatible_flags, actual_flags, rule):
220      for incompatible_flag in incompatible_flags:
221          if has_flag(incompatible_flag, actual_flags):
222            self._statusfile_outcomes = outproc.OUTCOMES_FAIL
223            self._expected_outcomes = outproc.OUTCOMES_FAIL
224            self.expected_failure_reason = ("Rule " + rule + " in " +
225                "tools/testrunner/local/variants.py expected a flag " +
226                "contradiction error with " + incompatible_flag + ".")
227
228    if not self._checked_flag_contradictions:
229      self._checked_flag_contradictions = True
230
231      file_specific_flags = (self._get_source_flags() + self._get_suite_flags()
232                             + self._get_statusfile_flags())
233      file_specific_flags = normalize_flags(file_specific_flags)
234      extra_flags = normalize_flags(self._get_extra_flags())
235
236      # Contradiction: flags contains both a flag --foo and its negation
237      # --no-foo.
238      if self.variant in ALL_VARIANT_FLAGS:
239        for flags in ALL_VARIANT_FLAGS[self.variant]:
240          all_flags = (file_specific_flags + extra_flags
241                       + normalize_flags(flags))
242          check_flags(negate_flags(all_flags), all_flags, "Flag negations")
243
244      # Contradiction: flags specified through the "Flags:" annotation are
245      # incompatible with the variant.
246      if self.variant in INCOMPATIBLE_FLAGS_PER_VARIANT:
247        check_flags(INCOMPATIBLE_FLAGS_PER_VARIANT[self.variant], file_specific_flags,
248                    "INCOMPATIBLE_FLAGS_PER_VARIANT[\""+self.variant+"\"]")
249
250      # Contradiction: flags specified through the "Flags:" annotation are
251      # incompatible with the build.
252      for variable, incompatible_flags in INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE.items():
253        if self.suite.statusfile.variables[variable]:
254            check_flags(incompatible_flags, file_specific_flags,
255              "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\""+variable+"\"]")
256
257      # Contradiction: flags passed through --extra-flags are incompatible.
258      for extra_flag, incompatible_flags in INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG.items():
259        if has_flag(extra_flag, extra_flags):
260            check_flags(incompatible_flags, file_specific_flags,
261              "INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG[\""+extra_flag+"\"]")
262    return self._expected_outcomes
263
264  @property
265  def do_skip(self):
266    return (statusfile.SKIP in self._statusfile_outcomes and
267            not self.suite.test_config.run_skipped)
268
269  @property
270  def is_heavy(self):
271    return statusfile.HEAVY in self._statusfile_outcomes
272
273  @property
274  def is_slow(self):
275    return self.is_heavy or statusfile.SLOW in self._statusfile_outcomes
276
277  @property
278  def is_fail_ok(self):
279    return statusfile.FAIL_OK in self._statusfile_outcomes
280
281  @property
282  def is_pass_or_fail(self):
283    return (statusfile.PASS in self._statusfile_outcomes and
284            statusfile.FAIL in self._statusfile_outcomes and
285            statusfile.CRASH not in self._statusfile_outcomes)
286
287  @property
288  def is_fail(self):
289     return (statusfile.FAIL in self._statusfile_outcomes and
290             statusfile.PASS not in self._statusfile_outcomes)
291
292  @property
293  def only_standard_variant(self):
294    return statusfile.NO_VARIANTS in self._statusfile_outcomes
295
296  def get_command(self):
297    params = self._get_cmd_params()
298    env = self._get_cmd_env()
299    shell = self.get_shell()
300    if utils.IsWindows():
301      shell += '.exe'
302    shell_flags = self._get_shell_flags()
303    timeout = self._get_timeout(params)
304    return self._create_cmd(shell, shell_flags + params, env, timeout)
305
306  def _get_cmd_params(self):
307    """Gets command parameters and combines them in the following order:
308      - files [empty by default]
309      - random seed
310      - mode flags (based on chosen mode)
311      - extra flags (from command line)
312      - user flags (variant/fuzzer flags)
313      - source flags (from source code) [empty by default]
314      - test-suite flags
315      - statusfile flags
316
317    The best way to modify how parameters are created is to only override
318    methods for getting partial parameters.
319    """
320    return (
321        self._get_files_params() +
322        self._get_random_seed_flags() +
323        self._get_mode_flags() +
324        self._get_extra_flags() +
325        self._get_variant_flags() +
326        self._get_source_flags() +
327        self._get_suite_flags() +
328        self._get_statusfile_flags()
329    )
330
331  def _get_cmd_env(self):
332    return {}
333
334  def _get_files_params(self):
335    return []
336
337  def _get_timeout_param(self):
338    return None
339
340  def _get_random_seed_flags(self):
341    return ['--random-seed=%d' % self.random_seed]
342
343  @property
344  def random_seed(self):
345    return self._random_seed or self._test_config.random_seed
346
347  def _get_extra_flags(self):
348    return self._test_config.extra_flags
349
350  def _get_variant_flags(self):
351    return self.variant_flags
352
353  def _get_statusfile_flags(self):
354    """Gets runtime flags from a status file.
355
356    Every outcome that starts with "--" is a flag.
357    """
358    return self._statusfile_flags
359
360  def _get_mode_flags(self):
361    return self._test_config.mode_flags
362
363  def _get_source_flags(self):
364    return []
365
366  def _get_suite_flags(self):
367    return []
368
369  def _get_shell_flags(self):
370    return []
371
372  def _get_timeout(self, params):
373    timeout = self._test_config.timeout
374    if "--stress-opt" in params:
375      timeout *= 4
376    if "--jitless" in params:
377      timeout *= 2
378    if "--no-opt" in params:
379      timeout *= 2
380    if "--noenable-vfp3" in params:
381      timeout *= 2
382    if self._get_timeout_param() == TIMEOUT_LONG:
383      timeout *= 10
384    if self.is_slow:
385      timeout *= 4
386    return timeout
387
388  def get_shell(self):
389    raise NotImplementedError()
390
391  def _get_suffix(self):
392    return '.js'
393
394  def _create_cmd(self, shell, params, env, timeout):
395    return command.Command(
396      cmd_prefix=self._test_config.command_prefix,
397      shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
398      args=params,
399      env=env,
400      timeout=timeout,
401      verbose=self._test_config.verbose,
402      resources_func=self._get_resources,
403      handle_sigterm=True,
404    )
405
406  def _parse_source_flags(self, source=None):
407    source = source or self.get_source()
408    flags = []
409    for match in re.findall(FLAGS_PATTERN, source):
410      flags += shlex.split(match.strip())
411    return flags
412
413  def is_source_available(self):
414    return self._get_source_path() is not None
415
416  def get_source(self):
417    return read_file_utf8(self._get_source_path())
418
419  def _get_source_path(self):
420    return None
421
422  def _get_resources(self):
423    """Returns a list of absolute paths with additional files needed by the
424    test case.
425
426    Used to push additional files to Android devices.
427    """
428    return []
429
430  def skip_predictable(self):
431    """Returns True if the test case is not suitable for predictable testing."""
432    return True
433
434  @property
435  def output_proc(self):
436    if self.expected_outcomes is outproc.OUTCOMES_PASS:
437      return outproc.DEFAULT
438    return outproc.OutProc(self.expected_outcomes)
439
440  def __cmp__(self, other):
441    # Make sure that test cases are sorted correctly if sorted without
442    # key function. But using a key function is preferred for speed.
443    return cmp(
444        (self.suite.name, self.name, self.variant),
445        (other.suite.name, other.name, other.variant)
446    )
447
448  def __str__(self):
449    return self.suite.name + '/' + self.name
450
451
452class D8TestCase(TestCase):
453  def get_shell(self):
454    return "d8"
455
456  def _get_shell_flags(self):
457    return ['--test']
458
459  def _get_resources_for_file(self, file):
460    """Returns for a given file a list of absolute paths of files needed by the
461    given file.
462    """
463    source = read_file_utf8(file)
464    result = []
465    def add_path(path):
466      result.append(os.path.abspath(path.replace('/', os.path.sep)))
467    def add_import_path(import_path):
468      add_path(os.path.normpath(
469        os.path.join(os.path.dirname(file), import_path)))
470    def strip_test262_frontmatter(input):
471      return TEST262_FRONTMATTER_PATTERN.sub('', input)
472    for match in RESOURCES_PATTERN.finditer(source):
473      # There are several resources per line. Relative to base dir.
474      for path in match.group(1).strip().split():
475        add_path(path)
476    # Strip test262 frontmatter before looking for load() and import/export
477    # statements.
478    source = strip_test262_frontmatter(source)
479    for match in LOAD_PATTERN.finditer(source):
480      # Files in load statements are relative to base dir.
481      add_path(match.group(1))
482    # Imported files are relative to the file importing them.
483    for match in MODULE_FROM_RESOURCES_PATTERN.finditer(source):
484      add_import_path(match.group(1))
485    for match in MODULE_IMPORT_RESOURCES_PATTERN.finditer(source):
486      add_import_path(match.group(1))
487    return result
488
489  def _get_resources(self):
490    """Returns the list of files needed by a test case."""
491    if not self._get_source_path():
492      return []
493    result = set()
494    to_check = [self._get_source_path()]
495    # Recurse over all files until reaching a fixpoint.
496    while to_check:
497      next_resource = to_check.pop()
498      result.add(next_resource)
499      for resource in self._get_resources_for_file(next_resource):
500        # Only add files that exist on disc. The pattens we check for give some
501        # false positives otherwise.
502        if resource not in result and os.path.exists(resource):
503          to_check.append(resource)
504    return sorted(list(result))
505
506  def skip_predictable(self):
507    """Returns True if the test case is not suitable for predictable testing."""
508    return (statusfile.FAIL in self.expected_outcomes or
509            self.output_proc.negative)
510