1import array
2import os
3from collections import defaultdict, namedtuple
4from typing import Dict, List, Tuple
5
6from mozlog import structuredlog
7from six import ensure_str, ensure_text
8from sys import intern
9
10from . import manifestupdate
11from . import testloader
12from . import wptmanifest
13from . import wpttest
14from .expected import expected_path
15from .vcs import git
16manifest = None  # Module that will be imported relative to test_root
17manifestitem = None
18
19logger = structuredlog.StructuredLogger("web-platform-tests")
20
21try:
22    import ujson as json
23except ImportError:
24    import json  # type: ignore
25
26
27class RunInfo(object):
28    """A wrapper around RunInfo dicts so that they can be hashed by identity"""
29
30    def __init__(self, dict_value):
31        self.data = dict_value
32        self.canonical_repr = tuple(tuple(item) for item in sorted(dict_value.items()))
33
34    def __getitem__(self, key):
35        return self.data[key]
36
37    def __setitem__(self, key, value):
38        raise TypeError
39
40    def __hash__(self):
41        return hash(self.canonical_repr)
42
43    def __eq__(self, other):
44        return self.canonical_repr == other.canonical_repr
45
46    def iteritems(self):
47        for key, value in self.data.items():
48            yield key, value
49
50    def items(self):
51        return list(self.items())
52
53
54def update_expected(test_paths, serve_root, log_file_names,
55                    update_properties, rev_old=None, rev_new="HEAD",
56                    full_update=False, sync_root=None, disable_intermittent=None,
57                    update_intermittent=False, remove_intermittent=False):
58    """Update the metadata files for web-platform-tests based on
59    the results obtained in a previous run or runs
60
61    If `disable_intermittent` is not None, assume log_file_names refers to logs from repeated
62    test jobs, disable tests that don't behave as expected on all runs
63
64    If `update_intermittent` is True, intermittent statuses will be recorded as `expected` in
65    the metadata.
66
67    If `remove_intermittent` is True and used in conjunction with `update_intermittent`, any
68    intermittent statuses which are not present in the current run will be removed from the
69    metadata, else they are left in."""
70
71    do_delayed_imports(serve_root)
72
73    id_test_map = load_test_data(test_paths)
74
75    for metadata_path, updated_ini in update_from_logs(id_test_map,
76                                                       update_properties,
77                                                       disable_intermittent,
78                                                       update_intermittent,
79                                                       remove_intermittent,
80                                                       full_update,
81                                                       *log_file_names):
82
83        write_new_expected(metadata_path, updated_ini)
84        if disable_intermittent:
85            for test in updated_ini.iterchildren():
86                for subtest in test.iterchildren():
87                    if subtest.new_disabled:
88                        print("disabled: %s" % os.path.dirname(subtest.root.test_path) + "/" + subtest.name)
89                    if test.new_disabled:
90                        print("disabled: %s" % test.root.test_path)
91
92
93def do_delayed_imports(serve_root=None):
94    global manifest, manifestitem
95    from manifest import manifest, item as manifestitem  # type: ignore
96
97
98def files_in_repo(repo_root):
99    return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
100
101
102def rev_range(rev_old, rev_new, symmetric=False):
103    joiner = ".." if not symmetric else "..."
104    return "".join([rev_old, joiner, rev_new])
105
106
107def paths_changed(rev_old, rev_new, repo):
108    data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
109    lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
110             for line in data.split("\n") if line.strip()]
111    output = set(lines)
112    return output
113
114
115def load_change_data(rev_old, rev_new, repo):
116    changes = paths_changed(rev_old, rev_new, repo)
117    rv = {}
118    status_keys = {"M": "modified",
119                   "A": "new",
120                   "D": "deleted"}
121    # TODO: deal with renames
122    for item in changes:
123        rv[item[1]] = status_keys[item[0]]
124    return rv
125
126
127def unexpected_changes(manifests, change_data, files_changed):
128    files_changed = set(files_changed)
129
130    root_manifest = None
131    for manifest, paths in manifests.items():
132        if paths["url_base"] == "/":
133            root_manifest = manifest
134            break
135    else:
136        return []
137
138    return [fn for _, fn, _ in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
139
140# For each testrun
141# Load all files and scan for the suite_start entry
142# Build a hash of filename: properties
143# For each different set of properties, gather all chunks
144# For each chunk in the set of chunks, go through all tests
145# for each test, make a map of {conditionals: [(platform, new_value)]}
146# Repeat for each platform
147# For each test in the list of tests:
148#   for each conditional:
149#      If all the new values match (or there aren't any) retain that conditional
150#      If any new values mismatch:
151#           If disable_intermittent and any repeated values don't match, disable the test
152#           else mark the test as needing human attention
153#   Check if all the RHS values are the same; if so collapse the conditionals
154
155
156class InternedData(object):
157    """Class for interning data of any (hashable) type.
158
159    This class is intended for building a mapping of int <=> value, such
160    that the integer may be stored as a proxy for the real value, and then
161    the real value obtained later from the proxy value.
162
163    In order to support the use case of packing the integer value as binary,
164    it is possible to specify a maximum bitsize of the data; adding more items
165    than this allowed will result in a ValueError exception.
166
167    The zero value is reserved to use as a sentinal."""
168
169    type_conv = None
170    rev_type_conv = None
171
172    def __init__(self, max_bits: int = 8):
173        self.max_idx = 2**max_bits - 2
174        # Reserve 0 as a sentinal
175        self._data: Tuple[List[object], Dict[int, object]]
176        self._data = [None], {}
177
178    def clear(self):
179        self.__init__()
180
181    def store(self, obj):
182        if self.type_conv is not None:
183            obj = self.type_conv(obj)
184
185        objs, obj_to_idx = self._data
186        if obj not in obj_to_idx:
187            value = len(objs)
188            objs.append(obj)
189            obj_to_idx[obj] = value
190            if value > self.max_idx:
191                raise ValueError
192        else:
193            value = obj_to_idx[obj]
194        return value
195
196    def get(self, idx):
197        obj = self._data[0][idx]
198        if self.rev_type_conv is not None:
199            obj = self.rev_type_conv(obj)
200        return obj
201
202    def __iter__(self):
203        for i in range(1, len(self._data[0])):
204            yield self.get(i)
205
206
207class RunInfoInterned(InternedData):
208    def type_conv(self, value):
209        return tuple(value.items())
210
211    def rev_type_conv(self, value):
212        return dict(value)
213
214
215prop_intern = InternedData(4)
216run_info_intern = InternedData(8)
217status_intern = InternedData(4)
218
219
220def pack_result(data):
221    # As `status_intern` normally handles one status, if `known_intermittent` is present in
222    # the test logs, intern and store this with the `status` in an array until needed.
223    if not data.get("known_intermittent"):
224        return status_intern.store(data.get("status"))
225    result = array.array("B")
226    expected = data.get("expected")
227    if expected is None:
228        expected = data["status"]
229    result_parts = [data["status"], expected] + data["known_intermittent"]
230    for i, part in enumerate(result_parts):
231        value = status_intern.store(part)
232        if i % 2 == 0:
233            assert value < 16
234            result.append(value << 4)
235        else:
236            result[-1] += value
237    return result
238
239
240def unpack_result(data):
241    if isinstance(data, int):
242        return (status_intern.get(data), None)
243    if isinstance(data, str):
244        return (data, None)
245    # Unpack multiple statuses into a tuple to be used in the Results named tuple below,
246    # separating `status` and `known_intermittent`.
247    results = []
248    for packed_value in data:
249        first = status_intern.get(packed_value >> 4)
250        second = status_intern.get(packed_value & 0x0F)
251        results.append(first)
252        if second:
253            results.append(second)
254    return ((results[0],), tuple(results[1:]))
255
256
257def load_test_data(test_paths):
258    manifest_loader = testloader.ManifestLoader(test_paths, False)
259    manifests = manifest_loader.load()
260
261    id_test_map = {}
262    for test_manifest, paths in manifests.items():
263        id_test_map.update(create_test_tree(paths["metadata_path"],
264                                            test_manifest))
265    return id_test_map
266
267
268def update_from_logs(id_test_map, update_properties, disable_intermittent, update_intermittent,
269                     remove_intermittent, full_update, *log_filenames):
270
271    updater = ExpectedUpdater(id_test_map)
272
273    for i, log_filename in enumerate(log_filenames):
274        print("Processing log %d/%d" % (i + 1, len(log_filenames)))
275        with open(log_filename) as f:
276            updater.update_from_log(f)
277
278    for item in update_results(id_test_map, update_properties, full_update,
279                               disable_intermittent, update_intermittent=update_intermittent,
280                               remove_intermittent=remove_intermittent):
281        yield item
282
283
284def update_results(id_test_map,
285                   update_properties,
286                   full_update,
287                   disable_intermittent,
288                   update_intermittent,
289                   remove_intermittent):
290    test_file_items = set(id_test_map.values())
291
292    default_expected_by_type = {}
293    for test_type, test_cls in wpttest.manifest_test_cls.items():
294        if test_cls.result_cls:
295            default_expected_by_type[(test_type, False)] = test_cls.result_cls.default_expected
296        if test_cls.subtest_result_cls:
297            default_expected_by_type[(test_type, True)] = test_cls.subtest_result_cls.default_expected
298
299    for test_file in test_file_items:
300        updated_expected = test_file.update(default_expected_by_type, update_properties,
301                                            full_update, disable_intermittent, update_intermittent,
302                                            remove_intermittent)
303        if updated_expected is not None and updated_expected.modified:
304            yield test_file.metadata_path, updated_expected
305
306
307def directory_manifests(metadata_path):
308    rv = []
309    for dirpath, dirname, filenames in os.walk(metadata_path):
310        if "__dir__.ini" in filenames:
311            rel_path = os.path.relpath(dirpath, metadata_path)
312            rv.append(os.path.join(rel_path, "__dir__.ini"))
313    return rv
314
315
316def write_new_expected(metadata_path, expected):
317    # Serialize the data back to a file
318    path = expected_path(metadata_path, expected.test_path)
319    if not expected.is_empty:
320        manifest_str = wptmanifest.serialize(expected.node,
321                                             skip_empty_data=True)
322        assert manifest_str != ""
323        dir = os.path.dirname(path)
324        if not os.path.exists(dir):
325            os.makedirs(dir)
326        tmp_path = path + ".tmp"
327        try:
328            with open(tmp_path, "wb") as f:
329                f.write(manifest_str.encode("utf8"))
330            os.replace(tmp_path, path)
331        except (Exception, KeyboardInterrupt):
332            try:
333                os.unlink(tmp_path)
334            except OSError:
335                pass
336    else:
337        try:
338            os.unlink(path)
339        except OSError:
340            pass
341
342
343class ExpectedUpdater(object):
344    def __init__(self, id_test_map):
345        self.id_test_map = id_test_map
346        self.run_info = None
347        self.action_map = {"suite_start": self.suite_start,
348                           "test_start": self.test_start,
349                           "test_status": self.test_status,
350                           "test_end": self.test_end,
351                           "assertion_count": self.assertion_count,
352                           "lsan_leak": self.lsan_leak,
353                           "mozleak_object": self.mozleak_object,
354                           "mozleak_total": self.mozleak_total}
355        self.tests_visited = {}
356
357    def update_from_log(self, log_file):
358        # We support three possible formats:
359        # * wptreport format; one json object in the file, possibly pretty-printed
360        # * wptreport format; one run per line
361        # * raw log format
362
363        # Try reading a single json object in wptreport format
364        self.run_info = None
365        success = self.get_wptreport_data(log_file.read())
366
367        if success:
368            return
369
370        # Try line-separated json objects in wptreport format
371        log_file.seek(0)
372        for line in log_file:
373            success = self.get_wptreport_data(line)
374            if not success:
375                break
376        else:
377            return
378
379        # Assume the file is a raw log
380        log_file.seek(0)
381        self.update_from_raw_log(log_file)
382
383    def get_wptreport_data(self, input_str):
384        try:
385            data = json.loads(input_str)
386        except Exception:
387            pass
388        else:
389            if "action" not in data and "results" in data:
390                self.update_from_wptreport_log(data)
391                return True
392        return False
393
394    def update_from_raw_log(self, log_file):
395        action_map = self.action_map
396        for line in log_file:
397            try:
398                data = json.loads(line)
399            except ValueError:
400                # Just skip lines that aren't json
401                continue
402            action = data["action"]
403            if action in action_map:
404                action_map[action](data)
405
406    def update_from_wptreport_log(self, data):
407        action_map = self.action_map
408        action_map["suite_start"]({"run_info": data["run_info"]})
409        for test in data["results"]:
410            action_map["test_start"]({"test": test["test"]})
411            for subtest in test["subtests"]:
412                action_map["test_status"]({"test": test["test"],
413                                           "subtest": subtest["name"],
414                                           "status": subtest["status"],
415                                           "expected": subtest.get("expected"),
416                                           "known_intermittent": subtest.get("known_intermittent", [])})
417            action_map["test_end"]({"test": test["test"],
418                                    "status": test["status"],
419                                    "expected": test.get("expected"),
420                                    "known_intermittent": test.get("known_intermittent", [])})
421            if "asserts" in test:
422                asserts = test["asserts"]
423                action_map["assertion_count"]({"test": test["test"],
424                                               "count": asserts["count"],
425                                               "min_expected": asserts["min"],
426                                               "max_expected": asserts["max"]})
427        for item in data.get("lsan_leaks", []):
428            action_map["lsan_leak"](item)
429
430        mozleak_data = data.get("mozleak", {})
431        for scope, scope_data in mozleak_data.items():
432            for key, action in [("objects", "mozleak_object"),
433                                ("total", "mozleak_total")]:
434                for item in scope_data.get(key, []):
435                    item_data = {"scope": scope}
436                    item_data.update(item)
437                    action_map[action](item_data)
438
439    def suite_start(self, data):
440        self.run_info = run_info_intern.store(RunInfo(data["run_info"]))
441
442    def test_start(self, data):
443        test_id = intern(ensure_str(data["test"]))
444        try:
445            self.id_test_map[test_id]
446        except KeyError:
447            print("Test not found %s, skipping" % test_id)
448            return
449
450        self.tests_visited[test_id] = set()
451
452    def test_status(self, data):
453        test_id = intern(ensure_str(data["test"]))
454        subtest = intern(ensure_str(data["subtest"]))
455        test_data = self.id_test_map.get(test_id)
456        if test_data is None:
457            return
458
459        self.tests_visited[test_id].add(subtest)
460
461        result = pack_result(data)
462
463        test_data.set(test_id, subtest, "status", self.run_info, result)
464        if data.get("expected") and data["expected"] != data["status"]:
465            test_data.set_requires_update()
466
467    def test_end(self, data):
468        if data["status"] == "SKIP":
469            return
470
471        test_id = intern(ensure_str(data["test"]))
472        test_data = self.id_test_map.get(test_id)
473        if test_data is None:
474            return
475
476        result = pack_result(data)
477
478        test_data.set(test_id, None, "status", self.run_info, result)
479        if data.get("expected") and data["expected"] != data["status"]:
480            test_data.set_requires_update()
481        del self.tests_visited[test_id]
482
483    def assertion_count(self, data):
484        test_id = intern(ensure_str(data["test"]))
485        test_data = self.id_test_map.get(test_id)
486        if test_data is None:
487            return
488
489        test_data.set(test_id, None, "asserts", self.run_info, data["count"])
490        if data["count"] < data["min_expected"] or data["count"] > data["max_expected"]:
491            test_data.set_requires_update()
492
493    def test_for_scope(self, data):
494        dir_path = data.get("scope", "/")
495        dir_id = intern(ensure_str(os.path.join(dir_path, "__dir__").replace(os.path.sep, "/")))
496        if dir_id.startswith("/"):
497            dir_id = dir_id[1:]
498        return dir_id, self.id_test_map[dir_id]
499
500    def lsan_leak(self, data):
501        if data["scope"] == "/":
502            logger.warning("Not updating lsan annotations for root scope")
503            return
504        dir_id, test_data = self.test_for_scope(data)
505        test_data.set(dir_id, None, "lsan",
506                      self.run_info, (data["frames"], data.get("allowed_match")))
507        if not data.get("allowed_match"):
508            test_data.set_requires_update()
509
510    def mozleak_object(self, data):
511        if data["scope"] == "/":
512            logger.warning("Not updating mozleak annotations for root scope")
513            return
514        dir_id, test_data = self.test_for_scope(data)
515        test_data.set(dir_id, None, "leak-object",
516                      self.run_info, ("%s:%s", (data["process"], data["name"]),
517                                      data.get("allowed")))
518        if not data.get("allowed"):
519            test_data.set_requires_update()
520
521    def mozleak_total(self, data):
522        if data["scope"] == "/":
523            logger.warning("Not updating mozleak annotations for root scope")
524            return
525        if data["bytes"]:
526            dir_id, test_data = self.test_for_scope(data)
527            test_data.set(dir_id, None, "leak-threshold",
528                          self.run_info, (data["process"], data["bytes"], data["threshold"]))
529            if data["bytes"] > data["threshold"] or data["bytes"] < 0:
530                test_data.set_requires_update()
531
532
533def create_test_tree(metadata_path, test_manifest):
534    """Create a map of test_id to TestFileData for that test.
535    """
536    do_delayed_imports()
537    id_test_map = {}
538    exclude_types = frozenset(["manual", "support", "conformancechecker"])
539    all_types = set(manifestitem.item_types.keys())
540    assert all_types > exclude_types
541    include_types = all_types - exclude_types
542    for item_type, test_path, tests in test_manifest.itertypes(*include_types):
543        test_file_data = TestFileData(intern(ensure_str(test_manifest.url_base)),
544                                      intern(ensure_str(item_type)),
545                                      metadata_path,
546                                      test_path,
547                                      tests)
548        for test in tests:
549            id_test_map[intern(ensure_str(test.id))] = test_file_data
550
551        dir_path = os.path.dirname(test_path)
552        while True:
553            dir_meta_path = os.path.join(dir_path, "__dir__")
554            dir_id = (test_manifest.url_base + dir_meta_path.replace(os.path.sep, "/")).lstrip("/")
555            if dir_id in id_test_map:
556                break
557
558            test_file_data = TestFileData(intern(ensure_str(test_manifest.url_base)),
559                                          None,
560                                          metadata_path,
561                                          dir_meta_path,
562                                          [])
563            id_test_map[dir_id] = test_file_data
564            dir_path = os.path.dirname(dir_path)
565            if not dir_path:
566                break
567
568    return id_test_map
569
570
571class PackedResultList(object):
572    """Class for storing test results.
573
574    Results are stored as an array of 2-byte integers for compactness.
575    The first 4 bits represent the property name, the second 4 bits
576    represent the test status (if it's a result with a status code), and
577    the final 8 bits represent the run_info. If the result doesn't have a
578    simple status code but instead a richer type, we place that richer type
579    in a dictionary and set the status part of the result type to 0.
580
581    This class depends on the global prop_intern, run_info_intern and
582    status_intern InteredData objects to convert between the bit values
583    and corresponding Python objects."""
584
585    def __init__(self):
586        self.data = array.array("H")
587
588    __slots__ = ("data", "raw_data")
589
590    def append(self, prop, run_info, value):
591        out_val = (prop << 12) + run_info
592        if prop == prop_intern.store("status") and isinstance(value, int):
593            out_val += value << 8
594        else:
595            if not hasattr(self, "raw_data"):
596                self.raw_data = {}
597            self.raw_data[len(self.data)] = value
598        self.data.append(out_val)
599
600    def unpack(self, idx, packed):
601        prop = prop_intern.get((packed & 0xF000) >> 12)
602
603        value_idx = (packed & 0x0F00) >> 8
604        if value_idx == 0:
605            value = self.raw_data[idx]
606        else:
607            value = status_intern.get(value_idx)
608
609        run_info = run_info_intern.get(packed & 0x00FF)
610
611        return prop, run_info, value
612
613    def __iter__(self):
614        for i, item in enumerate(self.data):
615            yield self.unpack(i, item)
616
617
618class TestFileData(object):
619    __slots__ = ("url_base", "item_type", "test_path", "metadata_path", "tests",
620                 "_requires_update", "data")
621
622    def __init__(self, url_base, item_type, metadata_path, test_path, tests):
623        self.url_base = url_base
624        self.item_type = item_type
625        self.test_path = test_path
626        self.metadata_path = metadata_path
627        self.tests = {intern(ensure_str(item.id)) for item in tests}
628        self._requires_update = False
629        self.data = defaultdict(lambda: defaultdict(PackedResultList))
630
631    def set_requires_update(self):
632        self._requires_update = True
633
634    @property
635    def requires_update(self):
636        return self._requires_update
637
638    def set(self, test_id, subtest_id, prop, run_info, value):
639        self.data[test_id][subtest_id].append(prop_intern.store(prop),
640                                              run_info,
641                                              value)
642
643    def expected(self, update_properties, update_intermittent, remove_intermittent):
644        expected_data = load_expected(self.url_base,
645                                      self.metadata_path,
646                                      self.test_path,
647                                      self.tests,
648                                      update_properties,
649                                      update_intermittent,
650                                      remove_intermittent)
651        if expected_data is None:
652            expected_data = create_expected(self.url_base,
653                                            self.test_path,
654                                            update_properties,
655                                            update_intermittent,
656                                            remove_intermittent)
657        return expected_data
658
659    def is_disabled(self, test):
660        # This conservatively assumes that anything that was disabled remains disabled
661        # we could probably do better by checking if it's in the full set of run infos
662        return test.has_key("disabled")
663
664    def orphan_subtests(self, expected):
665        # Return subtest nodes present in the expected file, but missing from the data
666        rv = []
667
668        for test_id, subtests in self.data.items():
669            test = expected.get_test(ensure_text(test_id))
670            if not test:
671                continue
672            seen_subtests = set(ensure_text(item) for item in subtests.keys() if item is not None)
673            missing_subtests = set(test.subtests.keys()) - seen_subtests
674            for item in missing_subtests:
675                expected_subtest = test.get_subtest(item)
676                if not self.is_disabled(expected_subtest):
677                    rv.append(expected_subtest)
678            for name in seen_subtests:
679                subtest = test.get_subtest(name)
680                # If any of the items have children (ie subsubtests) we want to prune thes
681                if subtest.children:
682                    rv.extend(subtest.children)
683
684        return rv
685
686    def filter_unknown_props(self, update_properties, subtests):
687        # Remove subtests which have some conditions that aren't in update_properties
688        # since removing these may be inappropriate
689        top_level_props, dependent_props = update_properties
690        all_properties = set(top_level_props)
691        for item in dependent_props.values():
692            all_properties |= set(item)
693
694        filtered = []
695        for subtest in subtests:
696            include = True
697            for key, _ in subtest.iter_properties():
698                conditions = subtest.get_conditions(key)
699                for condition in conditions:
700                    if not condition.variables.issubset(all_properties):
701                        include = False
702                        break
703                if not include:
704                    break
705            if include:
706                filtered.append(subtest)
707        return filtered
708
709    def update(self, default_expected_by_type, update_properties,
710               full_update=False, disable_intermittent=None, update_intermittent=False,
711               remove_intermittent=False):
712        # If we are doing a full update, we may need to prune missing nodes
713        # even if the expectations didn't change
714        if not self.requires_update and not full_update:
715            return
716
717        expected = self.expected(update_properties,
718                                 update_intermittent=update_intermittent,
719                                 remove_intermittent=remove_intermittent)
720
721        if full_update:
722            orphans = self.orphan_subtests(expected)
723            orphans = self.filter_unknown_props(update_properties, orphans)
724
725            if not self.requires_update and not orphans:
726                return
727
728            if orphans:
729                expected.modified = True
730                for item in orphans:
731                    item.remove()
732
733        expected_by_test = {}
734
735        for test_id in self.tests:
736            if not expected.has_test(test_id):
737                expected.append(manifestupdate.TestNode.create(test_id))
738            test_expected = expected.get_test(test_id)
739            expected_by_test[test_id] = test_expected
740
741        for test_id, test_data in self.data.items():
742            test_id = ensure_str(test_id)
743            for subtest_id, results_list in test_data.items():
744                for prop, run_info, value in results_list:
745                    # Special case directory metadata
746                    if subtest_id is None and test_id.endswith("__dir__"):
747                        if prop == "lsan":
748                            expected.set_lsan(run_info, value)
749                        elif prop == "leak-object":
750                            expected.set_leak_object(run_info, value)
751                        elif prop == "leak-threshold":
752                            expected.set_leak_threshold(run_info, value)
753                        continue
754
755                    test_expected = expected_by_test[test_id]
756                    if subtest_id is None:
757                        item_expected = test_expected
758                    else:
759                        subtest_id = ensure_text(subtest_id)
760                        item_expected = test_expected.get_subtest(subtest_id)
761
762                    if prop == "status":
763                        status, known_intermittent = unpack_result(value)
764                        value = Result(status,
765                                       known_intermittent,
766                                       default_expected_by_type[self.item_type,
767                                                                subtest_id is not None])
768                        item_expected.set_result(run_info, value)
769                    elif prop == "asserts":
770                        item_expected.set_asserts(run_info, value)
771
772        expected.update(full_update=full_update,
773                        disable_intermittent=disable_intermittent)
774        for test in expected.iterchildren():
775            for subtest in test.iterchildren():
776                subtest.update(full_update=full_update,
777                               disable_intermittent=disable_intermittent)
778            test.update(full_update=full_update,
779                        disable_intermittent=disable_intermittent)
780
781        return expected
782
783
784Result = namedtuple("Result", ["status", "known_intermittent", "default_expected"])
785
786
787def create_expected(url_base, test_path, run_info_properties, update_intermittent, remove_intermittent):
788    expected = manifestupdate.ExpectedManifest(None,
789                                               test_path,
790                                               url_base,
791                                               run_info_properties,
792                                               update_intermittent,
793                                               remove_intermittent)
794    return expected
795
796
797def load_expected(url_base, metadata_path, test_path, tests, run_info_properties, update_intermittent, remove_intermittent):
798    expected_manifest = manifestupdate.get_manifest(metadata_path,
799                                                    test_path,
800                                                    url_base,
801                                                    run_info_properties,
802                                                    update_intermittent,
803                                                    remove_intermittent)
804    return expected_manifest
805