1# obsutil.py - utility functions for obsolescence
2#
3# Copyright 2017 Boris Feld <boris.feld@octobus.net>
4#
5# This software may be used and distributed according to the terms of the
6# GNU General Public License version 2 or any later version.
7
8from __future__ import absolute_import
9
10import re
11
12from .i18n import _
13from .node import (
14    hex,
15    short,
16)
17from . import (
18    diffutil,
19    encoding,
20    error,
21    phases,
22    pycompat,
23    util,
24)
25from .utils import dateutil
26
27### obsolescence marker flag
28
29## bumpedfix flag
30#
31# When a changeset A' succeed to a changeset A which became public, we call A'
32# "bumped" because it's a successors of a public changesets
33#
34# o    A' (bumped)
35# |`:
36# | o  A
37# |/
38# o    Z
39#
40# The way to solve this situation is to create a new changeset Ad as children
41# of A. This changeset have the same content than A'. So the diff from A to A'
42# is the same than the diff from A to Ad. Ad is marked as a successors of A'
43#
44# o   Ad
45# |`:
46# | x A'
47# |'|
48# o | A
49# |/
50# o Z
51#
52# But by transitivity Ad is also a successors of A. To avoid having Ad marked
53# as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
54# This flag mean that the successors express the changes between the public and
55# bumped version and fix the situation, breaking the transitivity of
56# "bumped" here.
57bumpedfix = 1
58usingsha256 = 2
59
60
61class marker(object):
62    """Wrap obsolete marker raw data"""
63
64    def __init__(self, repo, data):
65        # the repo argument will be used to create changectx in later version
66        self._repo = repo
67        self._data = data
68        self._decodedmeta = None
69
70    def __hash__(self):
71        return hash(self._data)
72
73    def __eq__(self, other):
74        if type(other) != type(self):
75            return False
76        return self._data == other._data
77
78    def prednode(self):
79        """Predecessor changeset node identifier"""
80        return self._data[0]
81
82    def succnodes(self):
83        """List of successor changesets node identifiers"""
84        return self._data[1]
85
86    def parentnodes(self):
87        """Parents of the predecessors (None if not recorded)"""
88        return self._data[5]
89
90    def metadata(self):
91        """Decoded metadata dictionary"""
92        return dict(self._data[3])
93
94    def date(self):
95        """Creation date as (unixtime, offset)"""
96        return self._data[4]
97
98    def flags(self):
99        """The flags field of the marker"""
100        return self._data[2]
101
102
103def getmarkers(repo, nodes=None, exclusive=False):
104    """returns markers known in a repository
105
106    If <nodes> is specified, only markers "relevant" to those nodes are are
107    returned"""
108    if nodes is None:
109        rawmarkers = repo.obsstore
110    elif exclusive:
111        rawmarkers = exclusivemarkers(repo, nodes)
112    else:
113        rawmarkers = repo.obsstore.relevantmarkers(nodes)
114
115    for markerdata in rawmarkers:
116        yield marker(repo, markerdata)
117
118
119def sortedmarkers(markers):
120    # last item of marker tuple ('parents') may be None or a tuple
121    return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
122
123
124def closestpredecessors(repo, nodeid):
125    """yield the list of next predecessors pointing on visible changectx nodes
126
127    This function respect the repoview filtering, filtered revision will be
128    considered missing.
129    """
130
131    precursors = repo.obsstore.predecessors
132    stack = [nodeid]
133    seen = set(stack)
134
135    while stack:
136        current = stack.pop()
137        currentpreccs = precursors.get(current, ())
138
139        for prec in currentpreccs:
140            precnodeid = prec[0]
141
142            # Basic cycle protection
143            if precnodeid in seen:
144                continue
145            seen.add(precnodeid)
146
147            if precnodeid in repo:
148                yield precnodeid
149            else:
150                stack.append(precnodeid)
151
152
153def allpredecessors(obsstore, nodes, ignoreflags=0):
154    """Yield node for every precursors of <nodes>.
155
156    Some precursors may be unknown locally.
157
158    This is a linear yield unsuited to detecting folded changesets. It includes
159    initial nodes too."""
160
161    remaining = set(nodes)
162    seen = set(remaining)
163    prec = obsstore.predecessors.get
164    while remaining:
165        current = remaining.pop()
166        yield current
167        for mark in prec(current, ()):
168            # ignore marker flagged with specified flag
169            if mark[2] & ignoreflags:
170                continue
171            suc = mark[0]
172            if suc not in seen:
173                seen.add(suc)
174                remaining.add(suc)
175
176
177def allsuccessors(obsstore, nodes, ignoreflags=0):
178    """Yield node for every successor of <nodes>.
179
180    Some successors may be unknown locally.
181
182    This is a linear yield unsuited to detecting split changesets. It includes
183    initial nodes too."""
184    remaining = set(nodes)
185    seen = set(remaining)
186    while remaining:
187        current = remaining.pop()
188        yield current
189        for mark in obsstore.successors.get(current, ()):
190            # ignore marker flagged with specified flag
191            if mark[2] & ignoreflags:
192                continue
193            for suc in mark[1]:
194                if suc not in seen:
195                    seen.add(suc)
196                    remaining.add(suc)
197
198
199def _filterprunes(markers):
200    """return a set with no prune markers"""
201    return {m for m in markers if m[1]}
202
203
204def exclusivemarkers(repo, nodes):
205    """set of markers relevant to "nodes" but no other locally-known nodes
206
207    This function compute the set of markers "exclusive" to a locally-known
208    node. This means we walk the markers starting from <nodes> until we reach a
209    locally-known precursors outside of <nodes>. Element of <nodes> with
210    locally-known successors outside of <nodes> are ignored (since their
211    precursors markers are also relevant to these successors).
212
213    For example:
214
215        # (A0 rewritten as A1)
216        #
217        # A0 <-1- A1 # Marker "1" is exclusive to A1
218
219        or
220
221        # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
222        #
223        # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
224
225        or
226
227        # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
228        #
229        #          <-2- A1 # Marker "2" is exclusive to A0,A1
230        #        /
231        # <-1- A0
232        #        \
233        #         <-3- A2 # Marker "3" is exclusive to A0,A2
234        #
235        # in addition:
236        #
237        #  Markers "2,3" are exclusive to A1,A2
238        #  Markers "1,2,3" are exclusive to A0,A1,A2
239
240        See test/test-obsolete-bundle-strip.t for more examples.
241
242    An example usage is strip. When stripping a changeset, we also want to
243    strip the markers exclusive to this changeset. Otherwise we would have
244    "dangling"" obsolescence markers from its precursors: Obsolescence markers
245    marking a node as obsolete without any successors available locally.
246
247    As for relevant markers, the prune markers for children will be followed.
248    Of course, they will only be followed if the pruned children is
249    locally-known. Since the prune markers are relevant to the pruned node.
250    However, while prune markers are considered relevant to the parent of the
251    pruned changesets, prune markers for locally-known changeset (with no
252    successors) are considered exclusive to the pruned nodes. This allows
253    to strip the prune markers (with the rest of the exclusive chain) alongside
254    the pruned changesets.
255    """
256    # running on a filtered repository would be dangerous as markers could be
257    # reported as exclusive when they are relevant for other filtered nodes.
258    unfi = repo.unfiltered()
259
260    # shortcut to various useful item
261    has_node = unfi.changelog.index.has_node
262    precursorsmarkers = unfi.obsstore.predecessors
263    successormarkers = unfi.obsstore.successors
264    childrenmarkers = unfi.obsstore.children
265
266    # exclusive markers (return of the function)
267    exclmarkers = set()
268    # we need fast membership testing
269    nodes = set(nodes)
270    # looking for head in the obshistory
271    #
272    # XXX we are ignoring all issues in regard with cycle for now.
273    stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
274    stack.sort()
275    # nodes already stacked
276    seennodes = set(stack)
277    while stack:
278        current = stack.pop()
279        # fetch precursors markers
280        markers = list(precursorsmarkers.get(current, ()))
281        # extend the list with prune markers
282        for mark in successormarkers.get(current, ()):
283            if not mark[1]:
284                markers.append(mark)
285        # and markers from children (looking for prune)
286        for mark in childrenmarkers.get(current, ()):
287            if not mark[1]:
288                markers.append(mark)
289        # traverse the markers
290        for mark in markers:
291            if mark in exclmarkers:
292                # markers already selected
293                continue
294
295            # If the markers is about the current node, select it
296            #
297            # (this delay the addition of markers from children)
298            if mark[1] or mark[0] == current:
299                exclmarkers.add(mark)
300
301            # should we keep traversing through the precursors?
302            prec = mark[0]
303
304            # nodes in the stack or already processed
305            if prec in seennodes:
306                continue
307
308            # is this a locally known node ?
309            known = has_node(prec)
310            # if locally-known and not in the <nodes> set the traversal
311            # stop here.
312            if known and prec not in nodes:
313                continue
314
315            # do not keep going if there are unselected markers pointing to this
316            # nodes. If we end up traversing these unselected markers later the
317            # node will be taken care of at that point.
318            precmarkers = _filterprunes(successormarkers.get(prec))
319            if precmarkers.issubset(exclmarkers):
320                seennodes.add(prec)
321                stack.append(prec)
322
323    return exclmarkers
324
325
326def foreground(repo, nodes):
327    """return all nodes in the "foreground" of other node
328
329    The foreground of a revision is anything reachable using parent -> children
330    or precursor -> successor relation. It is very similar to "descendant" but
331    augmented with obsolescence information.
332
333    Beware that possible obsolescence cycle may result if complex situation.
334    """
335    repo = repo.unfiltered()
336    foreground = set(repo.set(b'%ln::', nodes))
337    if repo.obsstore:
338        # We only need this complicated logic if there is obsolescence
339        # XXX will probably deserve an optimised revset.
340        has_node = repo.changelog.index.has_node
341        plen = -1
342        # compute the whole set of successors or descendants
343        while len(foreground) != plen:
344            plen = len(foreground)
345            succs = {c.node() for c in foreground}
346            mutable = [c.node() for c in foreground if c.mutable()]
347            succs.update(allsuccessors(repo.obsstore, mutable))
348            known = (n for n in succs if has_node(n))
349            foreground = set(repo.set(b'%ln::', known))
350    return {c.node() for c in foreground}
351
352
353# effectflag field
354#
355# Effect-flag is a 1-byte bit field used to store what changed between a
356# changeset and its successor(s).
357#
358# The effect flag is stored in obs-markers metadata while we iterate on the
359# information design. That's why we have the EFFECTFLAGFIELD. If we come up
360# with an incompatible design for effect flag, we can store a new design under
361# another field name so we don't break readers. We plan to extend the existing
362# obsmarkers bit-field when the effect flag design will be stabilized.
363#
364# The effect-flag is placed behind an experimental flag
365# `effect-flags` set to off by default.
366#
367
368EFFECTFLAGFIELD = b"ef1"
369
370DESCCHANGED = 1 << 0  # action changed the description
371METACHANGED = 1 << 1  # action change the meta
372DIFFCHANGED = 1 << 3  # action change diff introduced by the changeset
373PARENTCHANGED = 1 << 2  # action change the parent
374USERCHANGED = 1 << 4  # the user changed
375DATECHANGED = 1 << 5  # the date changed
376BRANCHCHANGED = 1 << 6  # the branch changed
377
378METABLACKLIST = [
379    re.compile(b'^branch$'),
380    re.compile(b'^.*-source$'),
381    re.compile(b'^.*_source$'),
382    re.compile(b'^source$'),
383]
384
385
386def metanotblacklisted(metaitem):
387    """Check that the key of a meta item (extrakey, extravalue) does not
388    match at least one of the blacklist pattern
389    """
390    metakey = metaitem[0]
391
392    return not any(pattern.match(metakey) for pattern in METABLACKLIST)
393
394
395def _prepare_hunk(hunk):
396    """Drop all information but the username and patch"""
397    cleanhunk = []
398    for line in hunk.splitlines():
399        if line.startswith(b'# User') or not line.startswith(b'#'):
400            if line.startswith(b'@@'):
401                line = b'@@\n'
402            cleanhunk.append(line)
403    return cleanhunk
404
405
406def _getdifflines(iterdiff):
407    """return a cleaned up lines"""
408    lines = next(iterdiff, None)
409
410    if lines is None:
411        return lines
412
413    return _prepare_hunk(lines)
414
415
416def _cmpdiff(leftctx, rightctx):
417    """return True if both ctx introduce the "same diff"
418
419    This is a first and basic implementation, with many shortcoming.
420    """
421    diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
422
423    # Leftctx or right ctx might be filtered, so we need to use the contexts
424    # with an unfiltered repository to safely compute the diff
425
426    # leftctx and rightctx can be from different repository views in case of
427    # hgsubversion, do don't try to access them from same repository
428    # rightctx.repo() and leftctx.repo() are not always the same
429    leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
430    leftdiff = leftunfi.diff(opts=diffopts)
431    rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
432    rightdiff = rightunfi.diff(opts=diffopts)
433
434    left, right = (0, 0)
435    while None not in (left, right):
436        left = _getdifflines(leftdiff)
437        right = _getdifflines(rightdiff)
438
439        if left != right:
440            return False
441    return True
442
443
444def geteffectflag(source, successors):
445    """From an obs-marker relation, compute what changed between the
446    predecessor and the successor.
447    """
448    effects = 0
449
450    for changectx in successors:
451        # Check if description has changed
452        if changectx.description() != source.description():
453            effects |= DESCCHANGED
454
455        # Check if user has changed
456        if changectx.user() != source.user():
457            effects |= USERCHANGED
458
459        # Check if date has changed
460        if changectx.date() != source.date():
461            effects |= DATECHANGED
462
463        # Check if branch has changed
464        if changectx.branch() != source.branch():
465            effects |= BRANCHCHANGED
466
467        # Check if at least one of the parent has changed
468        if changectx.parents() != source.parents():
469            effects |= PARENTCHANGED
470
471        # Check if other meta has changed
472        changeextra = changectx.extra().items()
473        ctxmeta = list(filter(metanotblacklisted, changeextra))
474
475        sourceextra = source.extra().items()
476        srcmeta = list(filter(metanotblacklisted, sourceextra))
477
478        if ctxmeta != srcmeta:
479            effects |= METACHANGED
480
481        # Check if the diff has changed
482        if not _cmpdiff(source, changectx):
483            effects |= DIFFCHANGED
484
485    return effects
486
487
488def getobsoleted(repo, tr=None, changes=None):
489    """return the set of pre-existing revisions obsoleted by a transaction
490
491    Either the transaction or changes item of the transaction (for hooks)
492    must be provided, but not both.
493    """
494    if (tr is None) == (changes is None):
495        e = b"exactly one of tr and changes must be provided"
496        raise error.ProgrammingError(e)
497    torev = repo.unfiltered().changelog.index.get_rev
498    phase = repo._phasecache.phase
499    succsmarkers = repo.obsstore.successors.get
500    public = phases.public
501    if changes is None:
502        changes = tr.changes
503    addedmarkers = changes[b'obsmarkers']
504    origrepolen = changes[b'origrepolen']
505    seenrevs = set()
506    obsoleted = set()
507    for mark in addedmarkers:
508        node = mark[0]
509        rev = torev(node)
510        if rev is None or rev in seenrevs or rev >= origrepolen:
511            continue
512        seenrevs.add(rev)
513        if phase(repo, rev) == public:
514            continue
515        if set(succsmarkers(node) or []).issubset(addedmarkers):
516            obsoleted.add(rev)
517    return obsoleted
518
519
520class _succs(list):
521    """small class to represent a successors with some metadata about it"""
522
523    def __init__(self, *args, **kwargs):
524        super(_succs, self).__init__(*args, **kwargs)
525        self.markers = set()
526
527    def copy(self):
528        new = _succs(self)
529        new.markers = self.markers.copy()
530        return new
531
532    @util.propertycache
533    def _set(self):
534        # immutable
535        return set(self)
536
537    def canmerge(self, other):
538        return self._set.issubset(other._set)
539
540
541def successorssets(repo, initialnode, closest=False, cache=None):
542    """Return set of all latest successors of initial nodes
543
544    The successors set of a changeset A are the group of revisions that succeed
545    A. It succeeds A as a consistent whole, each revision being only a partial
546    replacement. By default, the successors set contains non-obsolete
547    changesets only, walking the obsolescence graph until reaching a leaf. If
548    'closest' is set to True, closest successors-sets are return (the
549    obsolescence walk stops on known changesets).
550
551    This function returns the full list of successor sets which is why it
552    returns a list of tuples and not just a single tuple. Each tuple is a valid
553    successors set. Note that (A,) may be a valid successors set for changeset A
554    (see below).
555
556    In most cases, a changeset A will have a single element (e.g. the changeset
557    A is replaced by A') in its successors set. Though, it is also common for a
558    changeset A to have no elements in its successor set (e.g. the changeset
559    has been pruned). Therefore, the returned list of successors sets will be
560    [(A',)] or [], respectively.
561
562    When a changeset A is split into A' and B', however, it will result in a
563    successors set containing more than a single element, i.e. [(A',B')].
564    Divergent changesets will result in multiple successors sets, i.e. [(A',),
565    (A'')].
566
567    If a changeset A is not obsolete, then it will conceptually have no
568    successors set. To distinguish this from a pruned changeset, the successor
569    set will contain itself only, i.e. [(A,)].
570
571    Finally, final successors unknown locally are considered to be pruned
572    (pruned: obsoleted without any successors). (Final: successors not affected
573    by markers).
574
575    The 'closest' mode respect the repoview filtering. For example, without
576    filter it will stop at the first locally known changeset, with 'visible'
577    filter it will stop on visible changesets).
578
579    The optional `cache` parameter is a dictionary that may contains
580    precomputed successors sets. It is meant to reuse the computation of a
581    previous call to `successorssets` when multiple calls are made at the same
582    time. The cache dictionary is updated in place. The caller is responsible
583    for its life span. Code that makes multiple calls to `successorssets`
584    *should* use this cache mechanism or risk a performance hit.
585
586    Since results are different depending of the 'closest' most, the same cache
587    cannot be reused for both mode.
588    """
589
590    succmarkers = repo.obsstore.successors
591
592    # Stack of nodes we search successors sets for
593    toproceed = [initialnode]
594    # set version of above list for fast loop detection
595    # element added to "toproceed" must be added here
596    stackedset = set(toproceed)
597    if cache is None:
598        cache = {}
599
600    # This while loop is the flattened version of a recursive search for
601    # successors sets
602    #
603    # def successorssets(x):
604    #    successors = directsuccessors(x)
605    #    ss = [[]]
606    #    for succ in directsuccessors(x):
607    #        # product as in itertools cartesian product
608    #        ss = product(ss, successorssets(succ))
609    #    return ss
610    #
611    # But we can not use plain recursive calls here:
612    # - that would blow the python call stack
613    # - obsolescence markers may have cycles, we need to handle them.
614    #
615    # The `toproceed` list act as our call stack. Every node we search
616    # successors set for are stacked there.
617    #
618    # The `stackedset` is set version of this stack used to check if a node is
619    # already stacked. This check is used to detect cycles and prevent infinite
620    # loop.
621    #
622    # successors set of all nodes are stored in the `cache` dictionary.
623    #
624    # After this while loop ends we use the cache to return the successors sets
625    # for the node requested by the caller.
626    while toproceed:
627        # Every iteration tries to compute the successors sets of the topmost
628        # node of the stack: CURRENT.
629        #
630        # There are four possible outcomes:
631        #
632        # 1) We already know the successors sets of CURRENT:
633        #    -> mission accomplished, pop it from the stack.
634        # 2) Stop the walk:
635        #    default case: Node is not obsolete
636        #    closest case: Node is known at this repo filter level
637        #      -> the node is its own successors sets. Add it to the cache.
638        # 3) We do not know successors set of direct successors of CURRENT:
639        #    -> We add those successors to the stack.
640        # 4) We know successors sets of all direct successors of CURRENT:
641        #    -> We can compute CURRENT successors set and add it to the
642        #       cache.
643        #
644        current = toproceed[-1]
645
646        # case 2 condition is a bit hairy because of closest,
647        # we compute it on its own
648        case2condition = (current not in succmarkers) or (
649            closest and current != initialnode and current in repo
650        )
651
652        if current in cache:
653            # case (1): We already know the successors sets
654            stackedset.remove(toproceed.pop())
655        elif case2condition:
656            # case (2): end of walk.
657            if current in repo:
658                # We have a valid successors.
659                cache[current] = [_succs((current,))]
660            else:
661                # Final obsolete version is unknown locally.
662                # Do not count that as a valid successors
663                cache[current] = []
664        else:
665            # cases (3) and (4)
666            #
667            # We proceed in two phases. Phase 1 aims to distinguish case (3)
668            # from case (4):
669            #
670            #     For each direct successors of CURRENT, we check whether its
671            #     successors sets are known. If they are not, we stack the
672            #     unknown node and proceed to the next iteration of the while
673            #     loop. (case 3)
674            #
675            #     During this step, we may detect obsolescence cycles: a node
676            #     with unknown successors sets but already in the call stack.
677            #     In such a situation, we arbitrary set the successors sets of
678            #     the node to nothing (node pruned) to break the cycle.
679            #
680            #     If no break was encountered we proceed to phase 2.
681            #
682            # Phase 2 computes successors sets of CURRENT (case 4); see details
683            # in phase 2 itself.
684            #
685            # Note the two levels of iteration in each phase.
686            # - The first one handles obsolescence markers using CURRENT as
687            #   precursor (successors markers of CURRENT).
688            #
689            #   Having multiple entry here means divergence.
690            #
691            # - The second one handles successors defined in each marker.
692            #
693            #   Having none means pruned node, multiple successors means split,
694            #   single successors are standard replacement.
695            #
696            for mark in sortedmarkers(succmarkers[current]):
697                for suc in mark[1]:
698                    if suc not in cache:
699                        if suc in stackedset:
700                            # cycle breaking
701                            cache[suc] = []
702                        else:
703                            # case (3) If we have not computed successors sets
704                            # of one of those successors we add it to the
705                            # `toproceed` stack and stop all work for this
706                            # iteration.
707                            toproceed.append(suc)
708                            stackedset.add(suc)
709                            break
710                else:
711                    continue
712                break
713            else:
714                # case (4): we know all successors sets of all direct
715                # successors
716                #
717                # Successors set contributed by each marker depends on the
718                # successors sets of all its "successors" node.
719                #
720                # Each different marker is a divergence in the obsolescence
721                # history. It contributes successors sets distinct from other
722                # markers.
723                #
724                # Within a marker, a successor may have divergent successors
725                # sets. In such a case, the marker will contribute multiple
726                # divergent successors sets. If multiple successors have
727                # divergent successors sets, a Cartesian product is used.
728                #
729                # At the end we post-process successors sets to remove
730                # duplicated entry and successors set that are strict subset of
731                # another one.
732                succssets = []
733                for mark in sortedmarkers(succmarkers[current]):
734                    # successors sets contributed by this marker
735                    base = _succs()
736                    base.markers.add(mark)
737                    markss = [base]
738                    for suc in mark[1]:
739                        # cardinal product with previous successors
740                        productresult = []
741                        for prefix in markss:
742                            for suffix in cache[suc]:
743                                newss = prefix.copy()
744                                newss.markers.update(suffix.markers)
745                                for part in suffix:
746                                    # do not duplicated entry in successors set
747                                    # first entry wins.
748                                    if part not in newss:
749                                        newss.append(part)
750                                productresult.append(newss)
751                        if productresult:
752                            markss = productresult
753                    succssets.extend(markss)
754                # remove duplicated and subset
755                seen = []
756                final = []
757                candidates = sorted(
758                    (s for s in succssets if s), key=len, reverse=True
759                )
760                for cand in candidates:
761                    for seensuccs in seen:
762                        if cand.canmerge(seensuccs):
763                            seensuccs.markers.update(cand.markers)
764                            break
765                    else:
766                        final.append(cand)
767                        seen.append(cand)
768                final.reverse()  # put small successors set first
769                cache[current] = final
770    return cache[initialnode]
771
772
773def successorsandmarkers(repo, ctx):
774    """compute the raw data needed for computing obsfate
775    Returns a list of dict, one dict per successors set
776    """
777    if not ctx.obsolete():
778        return None
779
780    ssets = successorssets(repo, ctx.node(), closest=True)
781
782    # closestsuccessors returns an empty list for pruned revisions, remap it
783    # into a list containing an empty list for future processing
784    if ssets == []:
785        ssets = [_succs()]
786
787    # Try to recover pruned markers
788    succsmap = repo.obsstore.successors
789    fullsuccessorsets = []  # successor set + markers
790    for sset in ssets:
791        if sset:
792            fullsuccessorsets.append(sset)
793        else:
794            # successorsset return an empty set() when ctx or one of its
795            # successors is pruned.
796            # In this case, walk the obs-markers tree again starting with ctx
797            # and find the relevant pruning obs-makers, the ones without
798            # successors.
799            # Having these markers allow us to compute some information about
800            # its fate, like who pruned this changeset and when.
801
802            # XXX we do not catch all prune markers (eg rewritten then pruned)
803            # (fix me later)
804            foundany = False
805            for mark in succsmap.get(ctx.node(), ()):
806                if not mark[1]:
807                    foundany = True
808                    sset = _succs()
809                    sset.markers.add(mark)
810                    fullsuccessorsets.append(sset)
811            if not foundany:
812                fullsuccessorsets.append(_succs())
813
814    values = []
815    for sset in fullsuccessorsets:
816        values.append({b'successors': sset, b'markers': sset.markers})
817
818    return values
819
820
821def _getobsfate(successorssets):
822    """Compute a changeset obsolescence fate based on its successorssets.
823    Successors can be the tipmost ones or the immediate ones. This function
824    return values are not meant to be shown directly to users, it is meant to
825    be used by internal functions only.
826    Returns one fate from the following values:
827    - pruned
828    - diverged
829    - superseded
830    - superseded_split
831    """
832
833    if len(successorssets) == 0:
834        # The commit has been pruned
835        return b'pruned'
836    elif len(successorssets) > 1:
837        return b'diverged'
838    else:
839        # No divergence, only one set of successors
840        successors = successorssets[0]
841
842        if len(successors) == 1:
843            return b'superseded'
844        else:
845            return b'superseded_split'
846
847
848def obsfateverb(successorset, markers):
849    """Return the verb summarizing the successorset and potentially using
850    information from the markers
851    """
852    if not successorset:
853        verb = b'pruned'
854    elif len(successorset) == 1:
855        verb = b'rewritten'
856    else:
857        verb = b'split'
858    return verb
859
860
861def markersdates(markers):
862    """returns the list of dates for a list of markers"""
863    return [m[4] for m in markers]
864
865
866def markersusers(markers):
867    """Returns a sorted list of markers users without duplicates"""
868    markersmeta = [dict(m[3]) for m in markers]
869    users = {
870        encoding.tolocal(meta[b'user'])
871        for meta in markersmeta
872        if meta.get(b'user')
873    }
874
875    return sorted(users)
876
877
878def markersoperations(markers):
879    """Returns a sorted list of markers operations without duplicates"""
880    markersmeta = [dict(m[3]) for m in markers]
881    operations = {
882        meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
883    }
884
885    return sorted(operations)
886
887
888def obsfateprinter(ui, repo, successors, markers, formatctx):
889    """Build a obsfate string for a single successorset using all obsfate
890    related function defined in obsutil
891    """
892    quiet = ui.quiet
893    verbose = ui.verbose
894    normal = not verbose and not quiet
895
896    line = []
897
898    # Verb
899    line.append(obsfateverb(successors, markers))
900
901    # Operations
902    operations = markersoperations(markers)
903    if operations:
904        line.append(b" using %s" % b", ".join(operations))
905
906    # Successors
907    if successors:
908        fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
909        line.append(b" as %s" % b", ".join(fmtsuccessors))
910
911    # Users
912    users = markersusers(markers)
913    # Filter out current user in not verbose mode to reduce amount of
914    # information
915    if not verbose:
916        currentuser = ui.username(acceptempty=True)
917        if len(users) == 1 and currentuser in users:
918            users = None
919
920    if (verbose or normal) and users:
921        line.append(b" by %s" % b", ".join(users))
922
923    # Date
924    dates = markersdates(markers)
925
926    if dates and verbose:
927        min_date = min(dates)
928        max_date = max(dates)
929
930        if min_date == max_date:
931            fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
932            line.append(b" (at %s)" % fmtmin_date)
933        else:
934            fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
935            fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
936            line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
937
938    return b"".join(line)
939
940
941filteredmsgtable = {
942    b"pruned": _(b"hidden revision '%s' is pruned"),
943    b"diverged": _(b"hidden revision '%s' has diverged"),
944    b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
945    b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
946    b"superseded_split_several": _(
947        b"hidden revision '%s' was split as: %s and %d more"
948    ),
949}
950
951
952def _getfilteredreason(repo, changeid, ctx):
953    """return a human-friendly string on why a obsolete changeset is hidden"""
954    successors = successorssets(repo, ctx.node())
955    fate = _getobsfate(successors)
956
957    # Be more precise in case the revision is superseded
958    if fate == b'pruned':
959        return filteredmsgtable[b'pruned'] % changeid
960    elif fate == b'diverged':
961        return filteredmsgtable[b'diverged'] % changeid
962    elif fate == b'superseded':
963        single_successor = short(successors[0][0])
964        return filteredmsgtable[b'superseded'] % (changeid, single_successor)
965    elif fate == b'superseded_split':
966
967        succs = []
968        for node_id in successors[0]:
969            succs.append(short(node_id))
970
971        if len(succs) <= 2:
972            fmtsuccs = b', '.join(succs)
973            return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
974        else:
975            firstsuccessors = b', '.join(succs[:2])
976            remainingnumber = len(succs) - 2
977
978            args = (changeid, firstsuccessors, remainingnumber)
979            return filteredmsgtable[b'superseded_split_several'] % args
980
981
982def divergentsets(repo, ctx):
983    """Compute sets of commits divergent with a given one"""
984    cache = {}
985    base = {}
986    for n in allpredecessors(repo.obsstore, [ctx.node()]):
987        if n == ctx.node():
988            # a node can't be a base for divergence with itself
989            continue
990        nsuccsets = successorssets(repo, n, cache)
991        for nsuccset in nsuccsets:
992            if ctx.node() in nsuccset:
993                # we are only interested in *other* successor sets
994                continue
995            if tuple(nsuccset) in base:
996                # we already know the latest base for this divergency
997                continue
998            base[tuple(nsuccset)] = n
999    return [
1000        {b'divergentnodes': divset, b'commonpredecessor': b}
1001        for divset, b in pycompat.iteritems(base)
1002    ]
1003
1004
1005def whyunstable(repo, ctx):
1006    result = []
1007    if ctx.orphan():
1008        for parent in ctx.parents():
1009            kind = None
1010            if parent.orphan():
1011                kind = b'orphan'
1012            elif parent.obsolete():
1013                kind = b'obsolete'
1014            if kind is not None:
1015                result.append(
1016                    {
1017                        b'instability': b'orphan',
1018                        b'reason': b'%s parent' % kind,
1019                        b'node': parent.hex(),
1020                    }
1021                )
1022    if ctx.phasedivergent():
1023        predecessors = allpredecessors(
1024            repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1025        )
1026        immutable = [
1027            repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1028        ]
1029        for predecessor in immutable:
1030            result.append(
1031                {
1032                    b'instability': b'phase-divergent',
1033                    b'reason': b'immutable predecessor',
1034                    b'node': predecessor.hex(),
1035                }
1036            )
1037    if ctx.contentdivergent():
1038        dsets = divergentsets(repo, ctx)
1039        for dset in dsets:
1040            divnodes = [repo[n] for n in dset[b'divergentnodes']]
1041            result.append(
1042                {
1043                    b'instability': b'content-divergent',
1044                    b'divergentnodes': divnodes,
1045                    b'reason': b'predecessor',
1046                    b'node': hex(dset[b'commonpredecessor']),
1047                }
1048            )
1049    return result
1050