1# repository.py - Interfaces and base classes for repositories and peers.
2# coding: utf-8
3#
4# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5#
6# This software may be used and distributed according to the terms of the
7# GNU General Public License version 2 or any later version.
8
9from __future__ import absolute_import
10
11from ..i18n import _
12from .. import error
13from . import util as interfaceutil
14
15# Local repository feature string.
16
17# Revlogs are being used for file storage.
18REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
19# The storage part of the repository is shared from an external source.
20REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
21# LFS supported for backing file storage.
22REPO_FEATURE_LFS = b'lfs'
23# Repository supports being stream cloned.
24REPO_FEATURE_STREAM_CLONE = b'streamclone'
25# Repository supports (at least) some sidedata to be stored
26REPO_FEATURE_SIDE_DATA = b'side-data'
27# Files storage may lack data for all ancestors.
28REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
29
30REVISION_FLAG_CENSORED = 1 << 15
31REVISION_FLAG_ELLIPSIS = 1 << 14
32REVISION_FLAG_EXTSTORED = 1 << 13
33REVISION_FLAG_HASCOPIESINFO = 1 << 12
34
35REVISION_FLAGS_KNOWN = (
36    REVISION_FLAG_CENSORED
37    | REVISION_FLAG_ELLIPSIS
38    | REVISION_FLAG_EXTSTORED
39    | REVISION_FLAG_HASCOPIESINFO
40)
41
42CG_DELTAMODE_STD = b'default'
43CG_DELTAMODE_PREV = b'previous'
44CG_DELTAMODE_FULL = b'fulltext'
45CG_DELTAMODE_P1 = b'p1'
46
47
48## Cache related constants:
49#
50# Used to control which cache should be warmed in a repo.updatecaches(…) call.
51
52# Warm branchmaps of all known repoview's filter-level
53CACHE_BRANCHMAP_ALL = b"branchmap-all"
54# Warm branchmaps of repoview's filter-level used by server
55CACHE_BRANCHMAP_SERVED = b"branchmap-served"
56# Warm internal changelog cache (eg: persistent nodemap)
57CACHE_CHANGELOG_CACHE = b"changelog-cache"
58# Warm full manifest cache
59CACHE_FULL_MANIFEST = b"full-manifest"
60# Warm file-node-tags cache
61CACHE_FILE_NODE_TAGS = b"file-node-tags"
62# Warm internal manifestlog cache (eg: persistent nodemap)
63CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
64# Warn rev branch cache
65CACHE_REV_BRANCH = b"rev-branch-cache"
66# Warm tags' cache for default repoview'
67CACHE_TAGS_DEFAULT = b"tags-default"
68# Warm tags' cache for  repoview's filter-level used by server
69CACHE_TAGS_SERVED = b"tags-served"
70
71# the cache to warm by default after a simple transaction
72# (this is a mutable set to let extension update it)
73CACHES_DEFAULT = {
74    CACHE_BRANCHMAP_SERVED,
75}
76
77# the caches to warm when warming all of them
78# (this is a mutable set to let extension update it)
79CACHES_ALL = {
80    CACHE_BRANCHMAP_SERVED,
81    CACHE_BRANCHMAP_ALL,
82    CACHE_CHANGELOG_CACHE,
83    CACHE_FILE_NODE_TAGS,
84    CACHE_FULL_MANIFEST,
85    CACHE_MANIFESTLOG_CACHE,
86    CACHE_TAGS_DEFAULT,
87    CACHE_TAGS_SERVED,
88}
89
90# the cache to warm by default on simple call
91# (this is a mutable set to let extension update it)
92CACHES_POST_CLONE = CACHES_ALL.copy()
93CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
94
95
96class ipeerconnection(interfaceutil.Interface):
97    """Represents a "connection" to a repository.
98
99    This is the base interface for representing a connection to a repository.
100    It holds basic properties and methods applicable to all peer types.
101
102    This is not a complete interface definition and should not be used
103    outside of this module.
104    """
105
106    ui = interfaceutil.Attribute("""ui.ui instance""")
107
108    def url():
109        """Returns a URL string representing this peer.
110
111        Currently, implementations expose the raw URL used to construct the
112        instance. It may contain credentials as part of the URL. The
113        expectations of the value aren't well-defined and this could lead to
114        data leakage.
115
116        TODO audit/clean consumers and more clearly define the contents of this
117        value.
118        """
119
120    def local():
121        """Returns a local repository instance.
122
123        If the peer represents a local repository, returns an object that
124        can be used to interface with it. Otherwise returns ``None``.
125        """
126
127    def peer():
128        """Returns an object conforming to this interface.
129
130        Most implementations will ``return self``.
131        """
132
133    def canpush():
134        """Returns a boolean indicating if this peer can be pushed to."""
135
136    def close():
137        """Close the connection to this peer.
138
139        This is called when the peer will no longer be used. Resources
140        associated with the peer should be cleaned up.
141        """
142
143
144class ipeercapabilities(interfaceutil.Interface):
145    """Peer sub-interface related to capabilities."""
146
147    def capable(name):
148        """Determine support for a named capability.
149
150        Returns ``False`` if capability not supported.
151
152        Returns ``True`` if boolean capability is supported. Returns a string
153        if capability support is non-boolean.
154
155        Capability strings may or may not map to wire protocol capabilities.
156        """
157
158    def requirecap(name, purpose):
159        """Require a capability to be present.
160
161        Raises a ``CapabilityError`` if the capability isn't present.
162        """
163
164
165class ipeercommands(interfaceutil.Interface):
166    """Client-side interface for communicating over the wire protocol.
167
168    This interface is used as a gateway to the Mercurial wire protocol.
169    methods commonly call wire protocol commands of the same name.
170    """
171
172    def branchmap():
173        """Obtain heads in named branches.
174
175        Returns a dict mapping branch name to an iterable of nodes that are
176        heads on that branch.
177        """
178
179    def capabilities():
180        """Obtain capabilities of the peer.
181
182        Returns a set of string capabilities.
183        """
184
185    def clonebundles():
186        """Obtains the clone bundles manifest for the repo.
187
188        Returns the manifest as unparsed bytes.
189        """
190
191    def debugwireargs(one, two, three=None, four=None, five=None):
192        """Used to facilitate debugging of arguments passed over the wire."""
193
194    def getbundle(source, **kwargs):
195        """Obtain remote repository data as a bundle.
196
197        This command is how the bulk of repository data is transferred from
198        the peer to the local repository
199
200        Returns a generator of bundle data.
201        """
202
203    def heads():
204        """Determine all known head revisions in the peer.
205
206        Returns an iterable of binary nodes.
207        """
208
209    def known(nodes):
210        """Determine whether multiple nodes are known.
211
212        Accepts an iterable of nodes whose presence to check for.
213
214        Returns an iterable of booleans indicating of the corresponding node
215        at that index is known to the peer.
216        """
217
218    def listkeys(namespace):
219        """Obtain all keys in a pushkey namespace.
220
221        Returns an iterable of key names.
222        """
223
224    def lookup(key):
225        """Resolve a value to a known revision.
226
227        Returns a binary node of the resolved revision on success.
228        """
229
230    def pushkey(namespace, key, old, new):
231        """Set a value using the ``pushkey`` protocol.
232
233        Arguments correspond to the pushkey namespace and key to operate on and
234        the old and new values for that key.
235
236        Returns a string with the peer result. The value inside varies by the
237        namespace.
238        """
239
240    def stream_out():
241        """Obtain streaming clone data.
242
243        Successful result should be a generator of data chunks.
244        """
245
246    def unbundle(bundle, heads, url):
247        """Transfer repository data to the peer.
248
249        This is how the bulk of data during a push is transferred.
250
251        Returns the integer number of heads added to the peer.
252        """
253
254
255class ipeerlegacycommands(interfaceutil.Interface):
256    """Interface for implementing support for legacy wire protocol commands.
257
258    Wire protocol commands transition to legacy status when they are no longer
259    used by modern clients. To facilitate identifying which commands are
260    legacy, the interfaces are split.
261    """
262
263    def between(pairs):
264        """Obtain nodes between pairs of nodes.
265
266        ``pairs`` is an iterable of node pairs.
267
268        Returns an iterable of iterables of nodes corresponding to each
269        requested pair.
270        """
271
272    def branches(nodes):
273        """Obtain ancestor changesets of specific nodes back to a branch point.
274
275        For each requested node, the peer finds the first ancestor node that is
276        a DAG root or is a merge.
277
278        Returns an iterable of iterables with the resolved values for each node.
279        """
280
281    def changegroup(nodes, source):
282        """Obtain a changegroup with data for descendants of specified nodes."""
283
284    def changegroupsubset(bases, heads, source):
285        pass
286
287
288class ipeercommandexecutor(interfaceutil.Interface):
289    """Represents a mechanism to execute remote commands.
290
291    This is the primary interface for requesting that wire protocol commands
292    be executed. Instances of this interface are active in a context manager
293    and have a well-defined lifetime. When the context manager exits, all
294    outstanding requests are waited on.
295    """
296
297    def callcommand(name, args):
298        """Request that a named command be executed.
299
300        Receives the command name and a dictionary of command arguments.
301
302        Returns a ``concurrent.futures.Future`` that will resolve to the
303        result of that command request. That exact value is left up to
304        the implementation and possibly varies by command.
305
306        Not all commands can coexist with other commands in an executor
307        instance: it depends on the underlying wire protocol transport being
308        used and the command itself.
309
310        Implementations MAY call ``sendcommands()`` automatically if the
311        requested command can not coexist with other commands in this executor.
312
313        Implementations MAY call ``sendcommands()`` automatically when the
314        future's ``result()`` is called. So, consumers using multiple
315        commands with an executor MUST ensure that ``result()`` is not called
316        until all command requests have been issued.
317        """
318
319    def sendcommands():
320        """Trigger submission of queued command requests.
321
322        Not all transports submit commands as soon as they are requested to
323        run. When called, this method forces queued command requests to be
324        issued. It will no-op if all commands have already been sent.
325
326        When called, no more new commands may be issued with this executor.
327        """
328
329    def close():
330        """Signal that this command request is finished.
331
332        When called, no more new commands may be issued. All outstanding
333        commands that have previously been issued are waited on before
334        returning. This not only includes waiting for the futures to resolve,
335        but also waiting for all response data to arrive. In other words,
336        calling this waits for all on-wire state for issued command requests
337        to finish.
338
339        When used as a context manager, this method is called when exiting the
340        context manager.
341
342        This method may call ``sendcommands()`` if there are buffered commands.
343        """
344
345
346class ipeerrequests(interfaceutil.Interface):
347    """Interface for executing commands on a peer."""
348
349    limitedarguments = interfaceutil.Attribute(
350        """True if the peer cannot receive large argument value for commands."""
351    )
352
353    def commandexecutor():
354        """A context manager that resolves to an ipeercommandexecutor.
355
356        The object this resolves to can be used to issue command requests
357        to the peer.
358
359        Callers should call its ``callcommand`` method to issue command
360        requests.
361
362        A new executor should be obtained for each distinct set of commands
363        (possibly just a single command) that the consumer wants to execute
364        as part of a single operation or round trip. This is because some
365        peers are half-duplex and/or don't support persistent connections.
366        e.g. in the case of HTTP peers, commands sent to an executor represent
367        a single HTTP request. While some peers may support multiple command
368        sends over the wire per executor, consumers need to code to the least
369        capable peer. So it should be assumed that command executors buffer
370        called commands until they are told to send them and that each
371        command executor could result in a new connection or wire-level request
372        being issued.
373        """
374
375
376class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
377    """Unified interface for peer repositories.
378
379    All peer instances must conform to this interface.
380    """
381
382
383class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
384    """Unified peer interface for wire protocol version 2 peers."""
385
386    apidescriptor = interfaceutil.Attribute(
387        """Data structure holding description of server API."""
388    )
389
390
391@interfaceutil.implementer(ipeerbase)
392class peer(object):
393    """Base class for peer repositories."""
394
395    limitedarguments = False
396
397    def capable(self, name):
398        caps = self.capabilities()
399        if name in caps:
400            return True
401
402        name = b'%s=' % name
403        for cap in caps:
404            if cap.startswith(name):
405                return cap[len(name) :]
406
407        return False
408
409    def requirecap(self, name, purpose):
410        if self.capable(name):
411            return
412
413        raise error.CapabilityError(
414            _(
415                b'cannot %s; remote repository does not support the '
416                b'\'%s\' capability'
417            )
418            % (purpose, name)
419        )
420
421
422class iverifyproblem(interfaceutil.Interface):
423    """Represents a problem with the integrity of the repository.
424
425    Instances of this interface are emitted to describe an integrity issue
426    with a repository (e.g. corrupt storage, missing data, etc).
427
428    Instances are essentially messages associated with severity.
429    """
430
431    warning = interfaceutil.Attribute(
432        """Message indicating a non-fatal problem."""
433    )
434
435    error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
436
437    node = interfaceutil.Attribute(
438        """Revision encountering the problem.
439
440        ``None`` means the problem doesn't apply to a single revision.
441        """
442    )
443
444
445class irevisiondelta(interfaceutil.Interface):
446    """Represents a delta between one revision and another.
447
448    Instances convey enough information to allow a revision to be exchanged
449    with another repository.
450
451    Instances represent the fulltext revision data or a delta against
452    another revision. Therefore the ``revision`` and ``delta`` attributes
453    are mutually exclusive.
454
455    Typically used for changegroup generation.
456    """
457
458    node = interfaceutil.Attribute("""20 byte node of this revision.""")
459
460    p1node = interfaceutil.Attribute(
461        """20 byte node of 1st parent of this revision."""
462    )
463
464    p2node = interfaceutil.Attribute(
465        """20 byte node of 2nd parent of this revision."""
466    )
467
468    linknode = interfaceutil.Attribute(
469        """20 byte node of the changelog revision this node is linked to."""
470    )
471
472    flags = interfaceutil.Attribute(
473        """2 bytes of integer flags that apply to this revision.
474
475        This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
476        """
477    )
478
479    basenode = interfaceutil.Attribute(
480        """20 byte node of the revision this data is a delta against.
481
482        ``nullid`` indicates that the revision is a full revision and not
483        a delta.
484        """
485    )
486
487    baserevisionsize = interfaceutil.Attribute(
488        """Size of base revision this delta is against.
489
490        May be ``None`` if ``basenode`` is ``nullid``.
491        """
492    )
493
494    revision = interfaceutil.Attribute(
495        """Raw fulltext of revision data for this node."""
496    )
497
498    delta = interfaceutil.Attribute(
499        """Delta between ``basenode`` and ``node``.
500
501        Stored in the bdiff delta format.
502        """
503    )
504
505    sidedata = interfaceutil.Attribute(
506        """Raw sidedata bytes for the given revision."""
507    )
508
509    protocol_flags = interfaceutil.Attribute(
510        """Single byte of integer flags that can influence the protocol.
511
512        This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
513        """
514    )
515
516
517class ifilerevisionssequence(interfaceutil.Interface):
518    """Contains index data for all revisions of a file.
519
520    Types implementing this behave like lists of tuples. The index
521    in the list corresponds to the revision number. The values contain
522    index metadata.
523
524    The *null* revision (revision number -1) is always the last item
525    in the index.
526    """
527
528    def __len__():
529        """The total number of revisions."""
530
531    def __getitem__(rev):
532        """Returns the object having a specific revision number.
533
534        Returns an 8-tuple with the following fields:
535
536        offset+flags
537           Contains the offset and flags for the revision. 64-bit unsigned
538           integer where first 6 bytes are the offset and the next 2 bytes
539           are flags. The offset can be 0 if it is not used by the store.
540        compressed size
541            Size of the revision data in the store. It can be 0 if it isn't
542            needed by the store.
543        uncompressed size
544            Fulltext size. It can be 0 if it isn't needed by the store.
545        base revision
546            Revision number of revision the delta for storage is encoded
547            against. -1 indicates not encoded against a base revision.
548        link revision
549            Revision number of changelog revision this entry is related to.
550        p1 revision
551            Revision number of 1st parent. -1 if no 1st parent.
552        p2 revision
553            Revision number of 2nd parent. -1 if no 1st parent.
554        node
555            Binary node value for this revision number.
556
557        Negative values should index off the end of the sequence. ``-1``
558        should return the null revision. ``-2`` should return the most
559        recent revision.
560        """
561
562    def __contains__(rev):
563        """Whether a revision number exists."""
564
565    def insert(self, i, entry):
566        """Add an item to the index at specific revision."""
567
568
569class ifileindex(interfaceutil.Interface):
570    """Storage interface for index data of a single file.
571
572    File storage data is divided into index metadata and data storage.
573    This interface defines the index portion of the interface.
574
575    The index logically consists of:
576
577    * A mapping between revision numbers and nodes.
578    * DAG data (storing and querying the relationship between nodes).
579    * Metadata to facilitate storage.
580    """
581
582    nullid = interfaceutil.Attribute(
583        """node for the null revision for use as delta base."""
584    )
585
586    def __len__():
587        """Obtain the number of revisions stored for this file."""
588
589    def __iter__():
590        """Iterate over revision numbers for this file."""
591
592    def hasnode(node):
593        """Returns a bool indicating if a node is known to this store.
594
595        Implementations must only return True for full, binary node values:
596        hex nodes, revision numbers, and partial node matches must be
597        rejected.
598
599        The null node is never present.
600        """
601
602    def revs(start=0, stop=None):
603        """Iterate over revision numbers for this file, with control."""
604
605    def parents(node):
606        """Returns a 2-tuple of parent nodes for a revision.
607
608        Values will be ``nullid`` if the parent is empty.
609        """
610
611    def parentrevs(rev):
612        """Like parents() but operates on revision numbers."""
613
614    def rev(node):
615        """Obtain the revision number given a node.
616
617        Raises ``error.LookupError`` if the node is not known.
618        """
619
620    def node(rev):
621        """Obtain the node value given a revision number.
622
623        Raises ``IndexError`` if the node is not known.
624        """
625
626    def lookup(node):
627        """Attempt to resolve a value to a node.
628
629        Value can be a binary node, hex node, revision number, or a string
630        that can be converted to an integer.
631
632        Raises ``error.LookupError`` if a node could not be resolved.
633        """
634
635    def linkrev(rev):
636        """Obtain the changeset revision number a revision is linked to."""
637
638    def iscensored(rev):
639        """Return whether a revision's content has been censored."""
640
641    def commonancestorsheads(node1, node2):
642        """Obtain an iterable of nodes containing heads of common ancestors.
643
644        See ``ancestor.commonancestorsheads()``.
645        """
646
647    def descendants(revs):
648        """Obtain descendant revision numbers for a set of revision numbers.
649
650        If ``nullrev`` is in the set, this is equivalent to ``revs()``.
651        """
652
653    def heads(start=None, stop=None):
654        """Obtain a list of nodes that are DAG heads, with control.
655
656        The set of revisions examined can be limited by specifying
657        ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
658        iterable of nodes. DAG traversal starts at earlier revision
659        ``start`` and iterates forward until any node in ``stop`` is
660        encountered.
661        """
662
663    def children(node):
664        """Obtain nodes that are children of a node.
665
666        Returns a list of nodes.
667        """
668
669
670class ifiledata(interfaceutil.Interface):
671    """Storage interface for data storage of a specific file.
672
673    This complements ``ifileindex`` and provides an interface for accessing
674    data for a tracked file.
675    """
676
677    def size(rev):
678        """Obtain the fulltext size of file data.
679
680        Any metadata is excluded from size measurements.
681        """
682
683    def revision(node, raw=False):
684        """Obtain fulltext data for a node.
685
686        By default, any storage transformations are applied before the data
687        is returned. If ``raw`` is True, non-raw storage transformations
688        are not applied.
689
690        The fulltext data may contain a header containing metadata. Most
691        consumers should use ``read()`` to obtain the actual file data.
692        """
693
694    def rawdata(node):
695        """Obtain raw data for a node."""
696
697    def read(node):
698        """Resolve file fulltext data.
699
700        This is similar to ``revision()`` except any metadata in the data
701        headers is stripped.
702        """
703
704    def renamed(node):
705        """Obtain copy metadata for a node.
706
707        Returns ``False`` if no copy metadata is stored or a 2-tuple of
708        (path, node) from which this revision was copied.
709        """
710
711    def cmp(node, fulltext):
712        """Compare fulltext to another revision.
713
714        Returns True if the fulltext is different from what is stored.
715
716        This takes copy metadata into account.
717
718        TODO better document the copy metadata and censoring logic.
719        """
720
721    def emitrevisions(
722        nodes,
723        nodesorder=None,
724        revisiondata=False,
725        assumehaveparentrevisions=False,
726        deltamode=CG_DELTAMODE_STD,
727    ):
728        """Produce ``irevisiondelta`` for revisions.
729
730        Given an iterable of nodes, emits objects conforming to the
731        ``irevisiondelta`` interface that describe revisions in storage.
732
733        This method is a generator.
734
735        The input nodes may be unordered. Implementations must ensure that a
736        node's parents are emitted before the node itself. Transitively, this
737        means that a node may only be emitted once all its ancestors in
738        ``nodes`` have also been emitted.
739
740        By default, emits "index" data (the ``node``, ``p1node``, and
741        ``p2node`` attributes). If ``revisiondata`` is set, revision data
742        will also be present on the emitted objects.
743
744        With default argument values, implementations can choose to emit
745        either fulltext revision data or a delta. When emitting deltas,
746        implementations must consider whether the delta's base revision
747        fulltext is available to the receiver.
748
749        The base revision fulltext is guaranteed to be available if any of
750        the following are met:
751
752        * Its fulltext revision was emitted by this method call.
753        * A delta for that revision was emitted by this method call.
754        * ``assumehaveparentrevisions`` is True and the base revision is a
755          parent of the node.
756
757        ``nodesorder`` can be used to control the order that revisions are
758        emitted. By default, revisions can be reordered as long as they are
759        in DAG topological order (see above). If the value is ``nodes``,
760        the iteration order from ``nodes`` should be used. If the value is
761        ``storage``, then the native order from the backing storage layer
762        is used. (Not all storage layers will have strong ordering and behavior
763        of this mode is storage-dependent.) ``nodes`` ordering can force
764        revisions to be emitted before their ancestors, so consumers should
765        use it with care.
766
767        The ``linknode`` attribute on the returned ``irevisiondelta`` may not
768        be set and it is the caller's responsibility to resolve it, if needed.
769
770        If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
771        all revision data should be emitted as deltas against the revision
772        emitted just prior. The initial revision should be a delta against its
773        1st parent.
774        """
775
776
777class ifilemutation(interfaceutil.Interface):
778    """Storage interface for mutation events of a tracked file."""
779
780    def add(filedata, meta, transaction, linkrev, p1, p2):
781        """Add a new revision to the store.
782
783        Takes file data, dictionary of metadata, a transaction, linkrev,
784        and parent nodes.
785
786        Returns the node that was added.
787
788        May no-op if a revision matching the supplied data is already stored.
789        """
790
791    def addrevision(
792        revisiondata,
793        transaction,
794        linkrev,
795        p1,
796        p2,
797        node=None,
798        flags=0,
799        cachedelta=None,
800    ):
801        """Add a new revision to the store and return its number.
802
803        This is similar to ``add()`` except it operates at a lower level.
804
805        The data passed in already contains a metadata header, if any.
806
807        ``node`` and ``flags`` can be used to define the expected node and
808        the flags to use with storage. ``flags`` is a bitwise value composed
809        of the various ``REVISION_FLAG_*`` constants.
810
811        ``add()`` is usually called when adding files from e.g. the working
812        directory. ``addrevision()`` is often called by ``add()`` and for
813        scenarios where revision data has already been computed, such as when
814        applying raw data from a peer repo.
815        """
816
817    def addgroup(
818        deltas,
819        linkmapper,
820        transaction,
821        addrevisioncb=None,
822        duplicaterevisioncb=None,
823        maybemissingparents=False,
824    ):
825        """Process a series of deltas for storage.
826
827        ``deltas`` is an iterable of 7-tuples of
828        (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
829        to add.
830
831        The ``delta`` field contains ``mpatch`` data to apply to a base
832        revision, identified by ``deltabase``. The base node can be
833        ``nullid``, in which case the header from the delta can be ignored
834        and the delta used as the fulltext.
835
836        ``alwayscache`` instructs the lower layers to cache the content of the
837        newly added revision, even if it needs to be explicitly computed.
838        This used to be the default when ``addrevisioncb`` was provided up to
839        Mercurial 5.8.
840
841        ``addrevisioncb`` should be called for each new rev as it is committed.
842        ``duplicaterevisioncb`` should be called for all revs with a
843        pre-existing node.
844
845        ``maybemissingparents`` is a bool indicating whether the incoming
846        data may reference parents/ancestor revisions that aren't present.
847        This flag is set when receiving data into a "shallow" store that
848        doesn't hold all history.
849
850        Returns a list of nodes that were processed. A node will be in the list
851        even if it existed in the store previously.
852        """
853
854    def censorrevision(tr, node, tombstone=b''):
855        """Remove the content of a single revision.
856
857        The specified ``node`` will have its content purged from storage.
858        Future attempts to access the revision data for this node will
859        result in failure.
860
861        A ``tombstone`` message can optionally be stored. This message may be
862        displayed to users when they attempt to access the missing revision
863        data.
864
865        Storage backends may have stored deltas against the previous content
866        in this revision. As part of censoring a revision, these storage
867        backends are expected to rewrite any internally stored deltas such
868        that they no longer reference the deleted content.
869        """
870
871    def getstrippoint(minlink):
872        """Find the minimum revision that must be stripped to strip a linkrev.
873
874        Returns a 2-tuple containing the minimum revision number and a set
875        of all revisions numbers that would be broken by this strip.
876
877        TODO this is highly revlog centric and should be abstracted into
878        a higher-level deletion API. ``repair.strip()`` relies on this.
879        """
880
881    def strip(minlink, transaction):
882        """Remove storage of items starting at a linkrev.
883
884        This uses ``getstrippoint()`` to determine the first node to remove.
885        Then it effectively truncates storage for all revisions after that.
886
887        TODO this is highly revlog centric and should be abstracted into a
888        higher-level deletion API.
889        """
890
891
892class ifilestorage(ifileindex, ifiledata, ifilemutation):
893    """Complete storage interface for a single tracked file."""
894
895    def files():
896        """Obtain paths that are backing storage for this file.
897
898        TODO this is used heavily by verify code and there should probably
899        be a better API for that.
900        """
901
902    def storageinfo(
903        exclusivefiles=False,
904        sharedfiles=False,
905        revisionscount=False,
906        trackedsize=False,
907        storedsize=False,
908    ):
909        """Obtain information about storage for this file's data.
910
911        Returns a dict describing storage for this tracked path. The keys
912        in the dict map to arguments of the same. The arguments are bools
913        indicating whether to calculate and obtain that data.
914
915        exclusivefiles
916           Iterable of (vfs, path) describing files that are exclusively
917           used to back storage for this tracked path.
918
919        sharedfiles
920           Iterable of (vfs, path) describing files that are used to back
921           storage for this tracked path. Those files may also provide storage
922           for other stored entities.
923
924        revisionscount
925           Number of revisions available for retrieval.
926
927        trackedsize
928           Total size in bytes of all tracked revisions. This is a sum of the
929           length of the fulltext of all revisions.
930
931        storedsize
932           Total size in bytes used to store data for all tracked revisions.
933           This is commonly less than ``trackedsize`` due to internal usage
934           of deltas rather than fulltext revisions.
935
936        Not all storage backends may support all queries are have a reasonable
937        value to use. In that case, the value should be set to ``None`` and
938        callers are expected to handle this special value.
939        """
940
941    def verifyintegrity(state):
942        """Verifies the integrity of file storage.
943
944        ``state`` is a dict holding state of the verifier process. It can be
945        used to communicate data between invocations of multiple storage
946        primitives.
947
948        If individual revisions cannot have their revision content resolved,
949        the method is expected to set the ``skipread`` key to a set of nodes
950        that encountered problems.  If set, the method can also add the node(s)
951        to ``safe_renamed`` in order to indicate nodes that may perform the
952        rename checks with currently accessible data.
953
954        The method yields objects conforming to the ``iverifyproblem``
955        interface.
956        """
957
958
959class idirs(interfaceutil.Interface):
960    """Interface representing a collection of directories from paths.
961
962    This interface is essentially a derived data structure representing
963    directories from a collection of paths.
964    """
965
966    def addpath(path):
967        """Add a path to the collection.
968
969        All directories in the path will be added to the collection.
970        """
971
972    def delpath(path):
973        """Remove a path from the collection.
974
975        If the removal was the last path in a particular directory, the
976        directory is removed from the collection.
977        """
978
979    def __iter__():
980        """Iterate over the directories in this collection of paths."""
981
982    def __contains__(path):
983        """Whether a specific directory is in this collection."""
984
985
986class imanifestdict(interfaceutil.Interface):
987    """Interface representing a manifest data structure.
988
989    A manifest is effectively a dict mapping paths to entries. Each entry
990    consists of a binary node and extra flags affecting that entry.
991    """
992
993    def __getitem__(path):
994        """Returns the binary node value for a path in the manifest.
995
996        Raises ``KeyError`` if the path does not exist in the manifest.
997
998        Equivalent to ``self.find(path)[0]``.
999        """
1000
1001    def find(path):
1002        """Returns the entry for a path in the manifest.
1003
1004        Returns a 2-tuple of (node, flags).
1005
1006        Raises ``KeyError`` if the path does not exist in the manifest.
1007        """
1008
1009    def __len__():
1010        """Return the number of entries in the manifest."""
1011
1012    def __nonzero__():
1013        """Returns True if the manifest has entries, False otherwise."""
1014
1015    __bool__ = __nonzero__
1016
1017    def __setitem__(path, node):
1018        """Define the node value for a path in the manifest.
1019
1020        If the path is already in the manifest, its flags will be copied to
1021        the new entry.
1022        """
1023
1024    def __contains__(path):
1025        """Whether a path exists in the manifest."""
1026
1027    def __delitem__(path):
1028        """Remove a path from the manifest.
1029
1030        Raises ``KeyError`` if the path is not in the manifest.
1031        """
1032
1033    def __iter__():
1034        """Iterate over paths in the manifest."""
1035
1036    def iterkeys():
1037        """Iterate over paths in the manifest."""
1038
1039    def keys():
1040        """Obtain a list of paths in the manifest."""
1041
1042    def filesnotin(other, match=None):
1043        """Obtain the set of paths in this manifest but not in another.
1044
1045        ``match`` is an optional matcher function to be applied to both
1046        manifests.
1047
1048        Returns a set of paths.
1049        """
1050
1051    def dirs():
1052        """Returns an object implementing the ``idirs`` interface."""
1053
1054    def hasdir(dir):
1055        """Returns a bool indicating if a directory is in this manifest."""
1056
1057    def walk(match):
1058        """Generator of paths in manifest satisfying a matcher.
1059
1060        If the matcher has explicit files listed and they don't exist in
1061        the manifest, ``match.bad()`` is called for each missing file.
1062        """
1063
1064    def diff(other, match=None, clean=False):
1065        """Find differences between this manifest and another.
1066
1067        This manifest is compared to ``other``.
1068
1069        If ``match`` is provided, the two manifests are filtered against this
1070        matcher and only entries satisfying the matcher are compared.
1071
1072        If ``clean`` is True, unchanged files are included in the returned
1073        object.
1074
1075        Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1076        the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1077        represents the node and flags for this manifest and ``(node2, flag2)``
1078        are the same for the other manifest.
1079        """
1080
1081    def setflag(path, flag):
1082        """Set the flag value for a given path.
1083
1084        Raises ``KeyError`` if the path is not already in the manifest.
1085        """
1086
1087    def get(path, default=None):
1088        """Obtain the node value for a path or a default value if missing."""
1089
1090    def flags(path):
1091        """Return the flags value for a path (default: empty bytestring)."""
1092
1093    def copy():
1094        """Return a copy of this manifest."""
1095
1096    def items():
1097        """Returns an iterable of (path, node) for items in this manifest."""
1098
1099    def iteritems():
1100        """Identical to items()."""
1101
1102    def iterentries():
1103        """Returns an iterable of (path, node, flags) for this manifest.
1104
1105        Similar to ``iteritems()`` except items are a 3-tuple and include
1106        flags.
1107        """
1108
1109    def text():
1110        """Obtain the raw data representation for this manifest.
1111
1112        Result is used to create a manifest revision.
1113        """
1114
1115    def fastdelta(base, changes):
1116        """Obtain a delta between this manifest and another given changes.
1117
1118        ``base`` in the raw data representation for another manifest.
1119
1120        ``changes`` is an iterable of ``(path, to_delete)``.
1121
1122        Returns a 2-tuple containing ``bytearray(self.text())`` and the
1123        delta between ``base`` and this manifest.
1124
1125        If this manifest implementation can't support ``fastdelta()``,
1126        raise ``mercurial.manifest.FastdeltaUnavailable``.
1127        """
1128
1129
1130class imanifestrevisionbase(interfaceutil.Interface):
1131    """Base interface representing a single revision of a manifest.
1132
1133    Should not be used as a primary interface: should always be inherited
1134    as part of a larger interface.
1135    """
1136
1137    def copy():
1138        """Obtain a copy of this manifest instance.
1139
1140        Returns an object conforming to the ``imanifestrevisionwritable``
1141        interface. The instance will be associated with the same
1142        ``imanifestlog`` collection as this instance.
1143        """
1144
1145    def read():
1146        """Obtain the parsed manifest data structure.
1147
1148        The returned object conforms to the ``imanifestdict`` interface.
1149        """
1150
1151
1152class imanifestrevisionstored(imanifestrevisionbase):
1153    """Interface representing a manifest revision committed to storage."""
1154
1155    def node():
1156        """The binary node for this manifest."""
1157
1158    parents = interfaceutil.Attribute(
1159        """List of binary nodes that are parents for this manifest revision."""
1160    )
1161
1162    def readdelta(shallow=False):
1163        """Obtain the manifest data structure representing changes from parent.
1164
1165        This manifest is compared to its 1st parent. A new manifest representing
1166        those differences is constructed.
1167
1168        The returned object conforms to the ``imanifestdict`` interface.
1169        """
1170
1171    def readfast(shallow=False):
1172        """Calls either ``read()`` or ``readdelta()``.
1173
1174        The faster of the two options is called.
1175        """
1176
1177    def find(key):
1178        """Calls self.read().find(key)``.
1179
1180        Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1181        """
1182
1183
1184class imanifestrevisionwritable(imanifestrevisionbase):
1185    """Interface representing a manifest revision that can be committed."""
1186
1187    def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1188        """Add this revision to storage.
1189
1190        Takes a transaction object, the changeset revision number it will
1191        be associated with, its parent nodes, and lists of added and
1192        removed paths.
1193
1194        If match is provided, storage can choose not to inspect or write out
1195        items that do not match. Storage is still required to be able to provide
1196        the full manifest in the future for any directories written (these
1197        manifests should not be "narrowed on disk").
1198
1199        Returns the binary node of the created revision.
1200        """
1201
1202
1203class imanifeststorage(interfaceutil.Interface):
1204    """Storage interface for manifest data."""
1205
1206    nodeconstants = interfaceutil.Attribute(
1207        """nodeconstants used by the current repository."""
1208    )
1209
1210    tree = interfaceutil.Attribute(
1211        """The path to the directory this manifest tracks.
1212
1213        The empty bytestring represents the root manifest.
1214        """
1215    )
1216
1217    index = interfaceutil.Attribute(
1218        """An ``ifilerevisionssequence`` instance."""
1219    )
1220
1221    opener = interfaceutil.Attribute(
1222        """VFS opener to use to access underlying files used for storage.
1223
1224        TODO this is revlog specific and should not be exposed.
1225        """
1226    )
1227
1228    _generaldelta = interfaceutil.Attribute(
1229        """Whether generaldelta storage is being used.
1230
1231        TODO this is revlog specific and should not be exposed.
1232        """
1233    )
1234
1235    fulltextcache = interfaceutil.Attribute(
1236        """Dict with cache of fulltexts.
1237
1238        TODO this doesn't feel appropriate for the storage interface.
1239        """
1240    )
1241
1242    def __len__():
1243        """Obtain the number of revisions stored for this manifest."""
1244
1245    def __iter__():
1246        """Iterate over revision numbers for this manifest."""
1247
1248    def rev(node):
1249        """Obtain the revision number given a binary node.
1250
1251        Raises ``error.LookupError`` if the node is not known.
1252        """
1253
1254    def node(rev):
1255        """Obtain the node value given a revision number.
1256
1257        Raises ``error.LookupError`` if the revision is not known.
1258        """
1259
1260    def lookup(value):
1261        """Attempt to resolve a value to a node.
1262
1263        Value can be a binary node, hex node, revision number, or a bytes
1264        that can be converted to an integer.
1265
1266        Raises ``error.LookupError`` if a ndoe could not be resolved.
1267        """
1268
1269    def parents(node):
1270        """Returns a 2-tuple of parent nodes for a node.
1271
1272        Values will be ``nullid`` if the parent is empty.
1273        """
1274
1275    def parentrevs(rev):
1276        """Like parents() but operates on revision numbers."""
1277
1278    def linkrev(rev):
1279        """Obtain the changeset revision number a revision is linked to."""
1280
1281    def revision(node, _df=None, raw=False):
1282        """Obtain fulltext data for a node."""
1283
1284    def rawdata(node, _df=None):
1285        """Obtain raw data for a node."""
1286
1287    def revdiff(rev1, rev2):
1288        """Obtain a delta between two revision numbers.
1289
1290        The returned data is the result of ``bdiff.bdiff()`` on the raw
1291        revision data.
1292        """
1293
1294    def cmp(node, fulltext):
1295        """Compare fulltext to another revision.
1296
1297        Returns True if the fulltext is different from what is stored.
1298        """
1299
1300    def emitrevisions(
1301        nodes,
1302        nodesorder=None,
1303        revisiondata=False,
1304        assumehaveparentrevisions=False,
1305    ):
1306        """Produce ``irevisiondelta`` describing revisions.
1307
1308        See the documentation for ``ifiledata`` for more.
1309        """
1310
1311    def addgroup(
1312        deltas,
1313        linkmapper,
1314        transaction,
1315        addrevisioncb=None,
1316        duplicaterevisioncb=None,
1317    ):
1318        """Process a series of deltas for storage.
1319
1320        See the documentation in ``ifilemutation`` for more.
1321        """
1322
1323    def rawsize(rev):
1324        """Obtain the size of tracked data.
1325
1326        Is equivalent to ``len(m.rawdata(node))``.
1327
1328        TODO this method is only used by upgrade code and may be removed.
1329        """
1330
1331    def getstrippoint(minlink):
1332        """Find minimum revision that must be stripped to strip a linkrev.
1333
1334        See the documentation in ``ifilemutation`` for more.
1335        """
1336
1337    def strip(minlink, transaction):
1338        """Remove storage of items starting at a linkrev.
1339
1340        See the documentation in ``ifilemutation`` for more.
1341        """
1342
1343    def checksize():
1344        """Obtain the expected sizes of backing files.
1345
1346        TODO this is used by verify and it should not be part of the interface.
1347        """
1348
1349    def files():
1350        """Obtain paths that are backing storage for this manifest.
1351
1352        TODO this is used by verify and there should probably be a better API
1353        for this functionality.
1354        """
1355
1356    def deltaparent(rev):
1357        """Obtain the revision that a revision is delta'd against.
1358
1359        TODO delta encoding is an implementation detail of storage and should
1360        not be exposed to the storage interface.
1361        """
1362
1363    def clone(tr, dest, **kwargs):
1364        """Clone this instance to another."""
1365
1366    def clearcaches(clear_persisted_data=False):
1367        """Clear any caches associated with this instance."""
1368
1369    def dirlog(d):
1370        """Obtain a manifest storage instance for a tree."""
1371
1372    def add(
1373        m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1374    ):
1375        """Add a revision to storage.
1376
1377        ``m`` is an object conforming to ``imanifestdict``.
1378
1379        ``link`` is the linkrev revision number.
1380
1381        ``p1`` and ``p2`` are the parent revision numbers.
1382
1383        ``added`` and ``removed`` are iterables of added and removed paths,
1384        respectively.
1385
1386        ``readtree`` is a function that can be used to read the child tree(s)
1387        when recursively writing the full tree structure when using
1388        treemanifets.
1389
1390        ``match`` is a matcher that can be used to hint to storage that not all
1391        paths must be inspected; this is an optimization and can be safely
1392        ignored. Note that the storage must still be able to reproduce a full
1393        manifest including files that did not match.
1394        """
1395
1396    def storageinfo(
1397        exclusivefiles=False,
1398        sharedfiles=False,
1399        revisionscount=False,
1400        trackedsize=False,
1401        storedsize=False,
1402    ):
1403        """Obtain information about storage for this manifest's data.
1404
1405        See ``ifilestorage.storageinfo()`` for a description of this method.
1406        This one behaves the same way, except for manifest data.
1407        """
1408
1409
1410class imanifestlog(interfaceutil.Interface):
1411    """Interface representing a collection of manifest snapshots.
1412
1413    Represents the root manifest in a repository.
1414
1415    Also serves as a means to access nested tree manifests and to cache
1416    tree manifests.
1417    """
1418
1419    nodeconstants = interfaceutil.Attribute(
1420        """nodeconstants used by the current repository."""
1421    )
1422
1423    def __getitem__(node):
1424        """Obtain a manifest instance for a given binary node.
1425
1426        Equivalent to calling ``self.get('', node)``.
1427
1428        The returned object conforms to the ``imanifestrevisionstored``
1429        interface.
1430        """
1431
1432    def get(tree, node, verify=True):
1433        """Retrieve the manifest instance for a given directory and binary node.
1434
1435        ``node`` always refers to the node of the root manifest (which will be
1436        the only manifest if flat manifests are being used).
1437
1438        If ``tree`` is the empty string, the root manifest is returned.
1439        Otherwise the manifest for the specified directory will be returned
1440        (requires tree manifests).
1441
1442        If ``verify`` is True, ``LookupError`` is raised if the node is not
1443        known.
1444
1445        The returned object conforms to the ``imanifestrevisionstored``
1446        interface.
1447        """
1448
1449    def getstorage(tree):
1450        """Retrieve an interface to storage for a particular tree.
1451
1452        If ``tree`` is the empty bytestring, storage for the root manifest will
1453        be returned. Otherwise storage for a tree manifest is returned.
1454
1455        TODO formalize interface for returned object.
1456        """
1457
1458    def clearcaches():
1459        """Clear caches associated with this collection."""
1460
1461    def rev(node):
1462        """Obtain the revision number for a binary node.
1463
1464        Raises ``error.LookupError`` if the node is not known.
1465        """
1466
1467    def update_caches(transaction):
1468        """update whatever cache are relevant for the used storage."""
1469
1470
1471class ilocalrepositoryfilestorage(interfaceutil.Interface):
1472    """Local repository sub-interface providing access to tracked file storage.
1473
1474    This interface defines how a repository accesses storage for a single
1475    tracked file path.
1476    """
1477
1478    def file(f):
1479        """Obtain a filelog for a tracked path.
1480
1481        The returned type conforms to the ``ifilestorage`` interface.
1482        """
1483
1484
1485class ilocalrepositorymain(interfaceutil.Interface):
1486    """Main interface for local repositories.
1487
1488    This currently captures the reality of things - not how things should be.
1489    """
1490
1491    nodeconstants = interfaceutil.Attribute(
1492        """Constant nodes matching the hash function used by the repository."""
1493    )
1494    nullid = interfaceutil.Attribute(
1495        """null revision for the hash function used by the repository."""
1496    )
1497
1498    supportedformats = interfaceutil.Attribute(
1499        """Set of requirements that apply to stream clone.
1500
1501        This is actually a class attribute and is shared among all instances.
1502        """
1503    )
1504
1505    supported = interfaceutil.Attribute(
1506        """Set of requirements that this repo is capable of opening."""
1507    )
1508
1509    requirements = interfaceutil.Attribute(
1510        """Set of requirements this repo uses."""
1511    )
1512
1513    features = interfaceutil.Attribute(
1514        """Set of "features" this repository supports.
1515
1516        A "feature" is a loosely-defined term. It can refer to a feature
1517        in the classical sense or can describe an implementation detail
1518        of the repository. For example, a ``readonly`` feature may denote
1519        the repository as read-only. Or a ``revlogfilestore`` feature may
1520        denote that the repository is using revlogs for file storage.
1521
1522        The intent of features is to provide a machine-queryable mechanism
1523        for repo consumers to test for various repository characteristics.
1524
1525        Features are similar to ``requirements``. The main difference is that
1526        requirements are stored on-disk and represent requirements to open the
1527        repository. Features are more run-time capabilities of the repository
1528        and more granular capabilities (which may be derived from requirements).
1529        """
1530    )
1531
1532    filtername = interfaceutil.Attribute(
1533        """Name of the repoview that is active on this repo."""
1534    )
1535
1536    wvfs = interfaceutil.Attribute(
1537        """VFS used to access the working directory."""
1538    )
1539
1540    vfs = interfaceutil.Attribute(
1541        """VFS rooted at the .hg directory.
1542
1543        Used to access repository data not in the store.
1544        """
1545    )
1546
1547    svfs = interfaceutil.Attribute(
1548        """VFS rooted at the store.
1549
1550        Used to access repository data in the store. Typically .hg/store.
1551        But can point elsewhere if the store is shared.
1552        """
1553    )
1554
1555    root = interfaceutil.Attribute(
1556        """Path to the root of the working directory."""
1557    )
1558
1559    path = interfaceutil.Attribute("""Path to the .hg directory.""")
1560
1561    origroot = interfaceutil.Attribute(
1562        """The filesystem path that was used to construct the repo."""
1563    )
1564
1565    auditor = interfaceutil.Attribute(
1566        """A pathauditor for the working directory.
1567
1568        This checks if a path refers to a nested repository.
1569
1570        Operates on the filesystem.
1571        """
1572    )
1573
1574    nofsauditor = interfaceutil.Attribute(
1575        """A pathauditor for the working directory.
1576
1577        This is like ``auditor`` except it doesn't do filesystem checks.
1578        """
1579    )
1580
1581    baseui = interfaceutil.Attribute(
1582        """Original ui instance passed into constructor."""
1583    )
1584
1585    ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1586
1587    sharedpath = interfaceutil.Attribute(
1588        """Path to the .hg directory of the repo this repo was shared from."""
1589    )
1590
1591    store = interfaceutil.Attribute("""A store instance.""")
1592
1593    spath = interfaceutil.Attribute("""Path to the store.""")
1594
1595    sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1596
1597    cachevfs = interfaceutil.Attribute(
1598        """A VFS used to access the cache directory.
1599
1600        Typically .hg/cache.
1601        """
1602    )
1603
1604    wcachevfs = interfaceutil.Attribute(
1605        """A VFS used to access the cache directory dedicated to working copy
1606
1607        Typically .hg/wcache.
1608        """
1609    )
1610
1611    filteredrevcache = interfaceutil.Attribute(
1612        """Holds sets of revisions to be filtered."""
1613    )
1614
1615    names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1616
1617    filecopiesmode = interfaceutil.Attribute(
1618        """The way files copies should be dealt with in this repo."""
1619    )
1620
1621    def close():
1622        """Close the handle on this repository."""
1623
1624    def peer():
1625        """Obtain an object conforming to the ``peer`` interface."""
1626
1627    def unfiltered():
1628        """Obtain an unfiltered/raw view of this repo."""
1629
1630    def filtered(name, visibilityexceptions=None):
1631        """Obtain a named view of this repository."""
1632
1633    obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1634
1635    changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1636
1637    manifestlog = interfaceutil.Attribute(
1638        """An instance conforming to the ``imanifestlog`` interface.
1639
1640        Provides access to manifests for the repository.
1641        """
1642    )
1643
1644    dirstate = interfaceutil.Attribute("""Working directory state.""")
1645
1646    narrowpats = interfaceutil.Attribute(
1647        """Matcher patterns for this repository's narrowspec."""
1648    )
1649
1650    def narrowmatch(match=None, includeexact=False):
1651        """Obtain a matcher for the narrowspec."""
1652
1653    def setnarrowpats(newincludes, newexcludes):
1654        """Define the narrowspec for this repository."""
1655
1656    def __getitem__(changeid):
1657        """Try to resolve a changectx."""
1658
1659    def __contains__(changeid):
1660        """Whether a changeset exists."""
1661
1662    def __nonzero__():
1663        """Always returns True."""
1664        return True
1665
1666    __bool__ = __nonzero__
1667
1668    def __len__():
1669        """Returns the number of changesets in the repo."""
1670
1671    def __iter__():
1672        """Iterate over revisions in the changelog."""
1673
1674    def revs(expr, *args):
1675        """Evaluate a revset.
1676
1677        Emits revisions.
1678        """
1679
1680    def set(expr, *args):
1681        """Evaluate a revset.
1682
1683        Emits changectx instances.
1684        """
1685
1686    def anyrevs(specs, user=False, localalias=None):
1687        """Find revisions matching one of the given revsets."""
1688
1689    def url():
1690        """Returns a string representing the location of this repo."""
1691
1692    def hook(name, throw=False, **args):
1693        """Call a hook."""
1694
1695    def tags():
1696        """Return a mapping of tag to node."""
1697
1698    def tagtype(tagname):
1699        """Return the type of a given tag."""
1700
1701    def tagslist():
1702        """Return a list of tags ordered by revision."""
1703
1704    def nodetags(node):
1705        """Return the tags associated with a node."""
1706
1707    def nodebookmarks(node):
1708        """Return the list of bookmarks pointing to the specified node."""
1709
1710    def branchmap():
1711        """Return a mapping of branch to heads in that branch."""
1712
1713    def revbranchcache():
1714        pass
1715
1716    def register_changeset(rev, changelogrevision):
1717        """Extension point for caches for new nodes.
1718
1719        Multiple consumers are expected to need parts of the changelogrevision,
1720        so it is provided as optimization to avoid duplicate lookups. A simple
1721        cache would be fragile when other revisions are accessed, too."""
1722        pass
1723
1724    def branchtip(branchtip, ignoremissing=False):
1725        """Return the tip node for a given branch."""
1726
1727    def lookup(key):
1728        """Resolve the node for a revision."""
1729
1730    def lookupbranch(key):
1731        """Look up the branch name of the given revision or branch name."""
1732
1733    def known(nodes):
1734        """Determine whether a series of nodes is known.
1735
1736        Returns a list of bools.
1737        """
1738
1739    def local():
1740        """Whether the repository is local."""
1741        return True
1742
1743    def publishing():
1744        """Whether the repository is a publishing repository."""
1745
1746    def cancopy():
1747        pass
1748
1749    def shared():
1750        """The type of shared repository or None."""
1751
1752    def wjoin(f, *insidef):
1753        """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1754
1755    def setparents(p1, p2):
1756        """Set the parent nodes of the working directory."""
1757
1758    def filectx(path, changeid=None, fileid=None):
1759        """Obtain a filectx for the given file revision."""
1760
1761    def getcwd():
1762        """Obtain the current working directory from the dirstate."""
1763
1764    def pathto(f, cwd=None):
1765        """Obtain the relative path to a file."""
1766
1767    def adddatafilter(name, fltr):
1768        pass
1769
1770    def wread(filename):
1771        """Read a file from wvfs, using data filters."""
1772
1773    def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1774        """Write data to a file in the wvfs, using data filters."""
1775
1776    def wwritedata(filename, data):
1777        """Resolve data for writing to the wvfs, using data filters."""
1778
1779    def currenttransaction():
1780        """Obtain the current transaction instance or None."""
1781
1782    def transaction(desc, report=None):
1783        """Open a new transaction to write to the repository."""
1784
1785    def undofiles():
1786        """Returns a list of (vfs, path) for files to undo transactions."""
1787
1788    def recover():
1789        """Roll back an interrupted transaction."""
1790
1791    def rollback(dryrun=False, force=False):
1792        """Undo the last transaction.
1793
1794        DANGEROUS.
1795        """
1796
1797    def updatecaches(tr=None, full=False):
1798        """Warm repo caches."""
1799
1800    def invalidatecaches():
1801        """Invalidate cached data due to the repository mutating."""
1802
1803    def invalidatevolatilesets():
1804        pass
1805
1806    def invalidatedirstate():
1807        """Invalidate the dirstate."""
1808
1809    def invalidate(clearfilecache=False):
1810        pass
1811
1812    def invalidateall():
1813        pass
1814
1815    def lock(wait=True):
1816        """Lock the repository store and return a lock instance."""
1817
1818    def wlock(wait=True):
1819        """Lock the non-store parts of the repository."""
1820
1821    def currentwlock():
1822        """Return the wlock if it's held or None."""
1823
1824    def checkcommitpatterns(wctx, match, status, fail):
1825        pass
1826
1827    def commit(
1828        text=b'',
1829        user=None,
1830        date=None,
1831        match=None,
1832        force=False,
1833        editor=False,
1834        extra=None,
1835    ):
1836        """Add a new revision to the repository."""
1837
1838    def commitctx(ctx, error=False, origctx=None):
1839        """Commit a commitctx instance to the repository."""
1840
1841    def destroying():
1842        """Inform the repository that nodes are about to be destroyed."""
1843
1844    def destroyed():
1845        """Inform the repository that nodes have been destroyed."""
1846
1847    def status(
1848        node1=b'.',
1849        node2=None,
1850        match=None,
1851        ignored=False,
1852        clean=False,
1853        unknown=False,
1854        listsubrepos=False,
1855    ):
1856        """Convenience method to call repo[x].status()."""
1857
1858    def addpostdsstatus(ps):
1859        pass
1860
1861    def postdsstatus():
1862        pass
1863
1864    def clearpostdsstatus():
1865        pass
1866
1867    def heads(start=None):
1868        """Obtain list of nodes that are DAG heads."""
1869
1870    def branchheads(branch=None, start=None, closed=False):
1871        pass
1872
1873    def branches(nodes):
1874        pass
1875
1876    def between(pairs):
1877        pass
1878
1879    def checkpush(pushop):
1880        pass
1881
1882    prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1883
1884    def pushkey(namespace, key, old, new):
1885        pass
1886
1887    def listkeys(namespace):
1888        pass
1889
1890    def debugwireargs(one, two, three=None, four=None, five=None):
1891        pass
1892
1893    def savecommitmessage(text):
1894        pass
1895
1896    def register_sidedata_computer(
1897        kind, category, keys, computer, flags, replace=False
1898    ):
1899        pass
1900
1901    def register_wanted_sidedata(category):
1902        pass
1903
1904
1905class completelocalrepository(
1906    ilocalrepositorymain, ilocalrepositoryfilestorage
1907):
1908    """Complete interface for a local repository."""
1909
1910
1911class iwireprotocolcommandcacher(interfaceutil.Interface):
1912    """Represents a caching backend for wire protocol commands.
1913
1914    Wire protocol version 2 supports transparent caching of many commands.
1915    To leverage this caching, servers can activate objects that cache
1916    command responses. Objects handle both cache writing and reading.
1917    This interface defines how that response caching mechanism works.
1918
1919    Wire protocol version 2 commands emit a series of objects that are
1920    serialized and sent to the client. The caching layer exists between
1921    the invocation of the command function and the sending of its output
1922    objects to an output layer.
1923
1924    Instances of this interface represent a binding to a cache that
1925    can serve a response (in place of calling a command function) and/or
1926    write responses to a cache for subsequent use.
1927
1928    When a command request arrives, the following happens with regards
1929    to this interface:
1930
1931    1. The server determines whether the command request is cacheable.
1932    2. If it is, an instance of this interface is spawned.
1933    3. The cacher is activated in a context manager (``__enter__`` is called).
1934    4. A cache *key* for that request is derived. This will call the
1935       instance's ``adjustcachekeystate()`` method so the derivation
1936       can be influenced.
1937    5. The cacher is informed of the derived cache key via a call to
1938       ``setcachekey()``.
1939    6. The cacher's ``lookup()`` method is called to test for presence of
1940       the derived key in the cache.
1941    7. If ``lookup()`` returns a hit, that cached result is used in place
1942       of invoking the command function. ``__exit__`` is called and the instance
1943       is discarded.
1944    8. The command function is invoked.
1945    9. ``onobject()`` is called for each object emitted by the command
1946       function.
1947    10. After the final object is seen, ``onfinished()`` is called.
1948    11. ``__exit__`` is called to signal the end of use of the instance.
1949
1950    Cache *key* derivation can be influenced by the instance.
1951
1952    Cache keys are initially derived by a deterministic representation of
1953    the command request. This includes the command name, arguments, protocol
1954    version, etc. This initial key derivation is performed by CBOR-encoding a
1955    data structure and feeding that output into a hasher.
1956
1957    Instances of this interface can influence this initial key derivation
1958    via ``adjustcachekeystate()``.
1959
1960    The instance is informed of the derived cache key via a call to
1961    ``setcachekey()``. The instance must store the key locally so it can
1962    be consulted on subsequent operations that may require it.
1963
1964    When constructed, the instance has access to a callable that can be used
1965    for encoding response objects. This callable receives as its single
1966    argument an object emitted by a command function. It returns an iterable
1967    of bytes chunks representing the encoded object. Unless the cacher is
1968    caching native Python objects in memory or has a way of reconstructing
1969    the original Python objects, implementations typically call this function
1970    to produce bytes from the output objects and then store those bytes in
1971    the cache. When it comes time to re-emit those bytes, they are wrapped
1972    in a ``wireprototypes.encodedresponse`` instance to tell the output
1973    layer that they are pre-encoded.
1974
1975    When receiving the objects emitted by the command function, instances
1976    can choose what to do with those objects. The simplest thing to do is
1977    re-emit the original objects. They will be forwarded to the output
1978    layer and will be processed as if the cacher did not exist.
1979
1980    Implementations could also choose to not emit objects - instead locally
1981    buffering objects or their encoded representation. They could then emit
1982    a single "coalesced" object when ``onfinished()`` is called. In
1983    this way, the implementation would function as a filtering layer of
1984    sorts.
1985
1986    When caching objects, typically the encoded form of the object will
1987    be stored. Keep in mind that if the original object is forwarded to
1988    the output layer, it will need to be encoded there as well. For large
1989    output, this redundant encoding could add overhead. Implementations
1990    could wrap the encoded object data in ``wireprototypes.encodedresponse``
1991    instances to avoid this overhead.
1992    """
1993
1994    def __enter__():
1995        """Marks the instance as active.
1996
1997        Should return self.
1998        """
1999
2000    def __exit__(exctype, excvalue, exctb):
2001        """Called when cacher is no longer used.
2002
2003        This can be used by implementations to perform cleanup actions (e.g.
2004        disconnecting network sockets, aborting a partially cached response.
2005        """
2006
2007    def adjustcachekeystate(state):
2008        """Influences cache key derivation by adjusting state to derive key.
2009
2010        A dict defining the state used to derive the cache key is passed.
2011
2012        Implementations can modify this dict to record additional state that
2013        is wanted to influence key derivation.
2014
2015        Implementations are *highly* encouraged to not modify or delete
2016        existing keys.
2017        """
2018
2019    def setcachekey(key):
2020        """Record the derived cache key for this request.
2021
2022        Instances may mutate the key for internal usage, as desired. e.g.
2023        instances may wish to prepend the repo name, introduce path
2024        components for filesystem or URL addressing, etc. Behavior is up to
2025        the cache.
2026
2027        Returns a bool indicating if the request is cacheable by this
2028        instance.
2029        """
2030
2031    def lookup():
2032        """Attempt to resolve an entry in the cache.
2033
2034        The instance is instructed to look for the cache key that it was
2035        informed about via the call to ``setcachekey()``.
2036
2037        If there's no cache hit or the cacher doesn't wish to use the cached
2038        entry, ``None`` should be returned.
2039
2040        Else, a dict defining the cached result should be returned. The
2041        dict may have the following keys:
2042
2043        objs
2044           An iterable of objects that should be sent to the client. That
2045           iterable of objects is expected to be what the command function
2046           would return if invoked or an equivalent representation thereof.
2047        """
2048
2049    def onobject(obj):
2050        """Called when a new object is emitted from the command function.
2051
2052        Receives as its argument the object that was emitted from the
2053        command function.
2054
2055        This method returns an iterator of objects to forward to the output
2056        layer. The easiest implementation is a generator that just
2057        ``yield obj``.
2058        """
2059
2060    def onfinished():
2061        """Called after all objects have been emitted from the command function.
2062
2063        Implementations should return an iterator of objects to forward to
2064        the output layer.
2065
2066        This method can be a generator.
2067        """
2068