1import copy
2import functools
3import imp
4import io
5import os
6from collections import OrderedDict, defaultdict
7from datetime import datetime
8
9from mozlog import reader
10from mozlog.formatters import JSONFormatter
11from mozlog.handlers import BaseHandler, StreamHandler, LogLevelFilter
12
13here = os.path.dirname(__file__)
14localpaths = imp.load_source("localpaths", os.path.abspath(os.path.join(here, os.pardir, os.pardir, "localpaths.py")))
15from wpt.markdown import markdown_adjust, table
16
17
18class LogActionFilter(BaseHandler):
19
20    """Handler that filters out messages not of a given set of actions.
21
22    Subclasses BaseHandler.
23
24    :param inner: Handler to use for messages that pass this filter
25    :param actions: List of actions for which to fire the handler
26    """
27
28    def __init__(self, inner, actions):
29        """Extend BaseHandler and set inner and actions props on self."""
30        BaseHandler.__init__(self, inner)
31        self.inner = inner
32        self.actions = actions
33
34    def __call__(self, item):
35        """Invoke handler if action is in list passed as constructor param."""
36        if item["action"] in self.actions:
37            return self.inner(item)
38
39
40class LogHandler(reader.LogHandler):
41
42    """Handle updating test and subtest status in log.
43
44    Subclasses reader.LogHandler.
45    """
46    def __init__(self):
47        self.results = OrderedDict()
48
49    def find_or_create_test(self, data):
50        test_name = data["test"]
51        if self.results.get(test_name):
52            return self.results[test_name]
53
54        test = {
55            "subtests": OrderedDict(),
56            "status": defaultdict(int)
57        }
58        self.results[test_name] = test
59        return test
60
61    def find_or_create_subtest(self, data):
62        test = self.find_or_create_test(data)
63        subtest_name = data["subtest"]
64
65        if test["subtests"].get(subtest_name):
66            return test["subtests"][subtest_name]
67
68        subtest = {
69            "status": defaultdict(int),
70            "messages": set()
71        }
72        test["subtests"][subtest_name] = subtest
73
74        return subtest
75
76    def test_status(self, data):
77        subtest = self.find_or_create_subtest(data)
78        subtest["status"][data["status"]] += 1
79        if data.get("message"):
80            subtest["messages"].add(data["message"])
81
82    def test_end(self, data):
83        test = self.find_or_create_test(data)
84        test["status"][data["status"]] += 1
85
86
87def is_inconsistent(results_dict, iterations):
88    """Return whether or not a single test is inconsistent."""
89    if 'SKIP' in results_dict:
90        return False
91    return len(results_dict) > 1 or sum(results_dict.values()) != iterations
92
93
94def process_results(log, iterations):
95    """Process test log and return overall results and list of inconsistent tests."""
96    inconsistent = []
97    handler = LogHandler()
98    reader.handle_log(reader.read(log), handler)
99    results = handler.results
100    for test_name, test in results.iteritems():
101        if is_inconsistent(test["status"], iterations):
102            inconsistent.append((test_name, None, test["status"], []))
103        for subtest_name, subtest in test["subtests"].iteritems():
104            if is_inconsistent(subtest["status"], iterations):
105                inconsistent.append((test_name, subtest_name, subtest["status"], subtest["messages"]))
106    return results, inconsistent
107
108
109def err_string(results_dict, iterations):
110    """Create and return string with errors from test run."""
111    rv = []
112    total_results = sum(results_dict.values())
113    for key, value in sorted(results_dict.items()):
114        rv.append("%s%s" %
115                  (key, ": %s/%s" % (value, iterations) if value != iterations else ""))
116    if total_results < iterations:
117        rv.append("MISSING: %s/%s" % (iterations - total_results, iterations))
118    rv = ", ".join(rv)
119    if is_inconsistent(results_dict, iterations):
120        rv = "**%s**" % rv
121    return rv
122
123
124def write_inconsistent(log, inconsistent, iterations):
125    """Output inconsistent tests to logger.error."""
126    log("## Unstable results ##\n")
127    strings = [(
128        "`%s`" % markdown_adjust(test),
129        ("`%s`" % markdown_adjust(subtest)) if subtest else "",
130        err_string(results, iterations),
131        ("`%s`" % markdown_adjust(";".join(messages))) if len(messages) else "")
132        for test, subtest, results, messages in inconsistent]
133    table(["Test", "Subtest", "Results", "Messages"], strings, log)
134
135
136def write_results(log, results, iterations, pr_number=None, use_details=False):
137    log("## All results ##\n")
138    if use_details:
139        log("<details>\n")
140        log("<summary>%i %s ran</summary>\n\n" % (len(results),
141                                                  "tests" if len(results) > 1
142                                                  else "test"))
143
144    for test_name, test in results.iteritems():
145        baseurl = "http://w3c-test.org/submissions"
146        if "https" in os.path.splitext(test_name)[0].split(".")[1:]:
147            baseurl = "https://w3c-test.org/submissions"
148        title = test_name
149        if use_details:
150            log("<details>\n")
151            if pr_number:
152                title = "<a href=\"%s/%s%s\">%s</a>" % (baseurl, pr_number, test_name, title)
153            log('<summary>%s</summary>\n\n' % title)
154        else:
155            log("### %s ###" % title)
156        strings = [("", err_string(test["status"], iterations), "")]
157
158        strings.extend(((
159            ("`%s`" % markdown_adjust(subtest_name)) if subtest else "",
160            err_string(subtest["status"], iterations),
161            ("`%s`" % markdown_adjust(';'.join(subtest["messages"]))) if len(subtest["messages"]) else "")
162            for subtest_name, subtest in test["subtests"].items()))
163        table(["Subtest", "Results", "Messages"], strings, log)
164        if use_details:
165            log("</details>\n")
166
167    if use_details:
168        log("</details>\n")
169
170
171def run_step(logger, iterations, restart_after_iteration, kwargs_extras, **kwargs):
172    import wptrunner
173    kwargs = copy.deepcopy(kwargs)
174
175    if restart_after_iteration:
176        kwargs["repeat"] = iterations
177    else:
178        kwargs["rerun"] = iterations
179
180    kwargs["pause_after_test"] = False
181    kwargs.update(kwargs_extras)
182
183    def wrap_handler(x):
184        x = LogLevelFilter(x, "WARNING")
185        if not kwargs["verify_log_full"]:
186            x = LogActionFilter(x, ["log", "process_output"])
187        return x
188
189    initial_handlers = logger._state.handlers
190    logger._state.handlers = [wrap_handler(handler)
191                              for handler in initial_handlers]
192
193    log = io.BytesIO()
194    # Setup logging for wptrunner that keeps process output and
195    # warning+ level logs only
196    logger.add_handler(StreamHandler(log, JSONFormatter()))
197
198    wptrunner.run_tests(**kwargs)
199
200    logger._state.handlers = initial_handlers
201    logger._state.running_tests = set()
202    logger._state.suite_started = False
203
204    log.seek(0)
205    results, inconsistent = process_results(log, iterations)
206    return results, inconsistent, iterations
207
208
209def get_steps(logger, repeat_loop, repeat_restart, kwargs_extras):
210    steps = []
211    for kwargs_extra in kwargs_extras:
212        if kwargs_extra:
213            flags_string = " with flags %s" % " ".join(
214                "%s=%s" % item for item in kwargs_extra.iteritems())
215        else:
216            flags_string = ""
217
218        if repeat_loop:
219            desc = "Running tests in a loop %d times%s" % (repeat_loop,
220                                                           flags_string)
221            steps.append((desc, functools.partial(run_step, logger, repeat_loop, False, kwargs_extra)))
222
223        if repeat_restart:
224            desc = "Running tests in a loop with restarts %s times%s" % (repeat_restart,
225                                                                         flags_string)
226            steps.append((desc, functools.partial(run_step, logger, repeat_restart, True, kwargs_extra)))
227
228    return steps
229
230
231def write_summary(logger, step_results, final_result):
232    for desc, result in step_results:
233        logger.info('::: %s : %s' % (desc, result))
234    logger.info(':::')
235    if final_result == "PASS":
236        log = logger.info
237    elif final_result == "TIMEOUT":
238        log = logger.warning
239    else:
240        log = logger.error
241    log('::: Test verification %s' % final_result)
242
243    logger.info(':::')
244
245
246def check_stability(logger, repeat_loop=10, repeat_restart=5, chaos_mode=True, max_time=None,
247                    output_results=True, **kwargs):
248    kwargs_extras = [{}]
249    if chaos_mode and kwargs["product"] == "firefox":
250        kwargs_extras.append({"chaos_mode_flags": 3})
251
252    steps = get_steps(logger, repeat_loop, repeat_restart, kwargs_extras)
253
254    start_time = datetime.now()
255    step_results = []
256
257    for desc, step_func in steps:
258        if max_time and datetime.now() - start_time > max_time:
259            logger.info("::: Test verification is taking too long: Giving up!")
260            logger.info("::: So far, all checks passed, but not all checks were run.")
261            write_summary(logger, step_results, "TIMEOUT")
262            return 2
263
264        logger.info(':::')
265        logger.info('::: Running test verification step "%s"...' % desc)
266        logger.info(':::')
267        results, inconsistent, iterations = step_func(**kwargs)
268        if output_results:
269            write_results(logger.info, results, iterations)
270
271        if inconsistent:
272            step_results.append((desc, "FAIL"))
273            write_inconsistent(logger.info, inconsistent, iterations)
274            write_summary(logger, step_results, "FAIL")
275            return 1
276
277        step_results.append((desc, "PASS"))
278
279    write_summary(logger, step_results, "PASS")
280