1# hg.py - repository classes for mercurial
2#
3# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5#
6# This software may be used and distributed according to the terms of the
7# GNU General Public License version 2 or any later version.
8
9from __future__ import absolute_import
10
11import errno
12import os
13import shutil
14import stat
15
16from .i18n import _
17from .node import (
18    hex,
19    sha1nodeconstants,
20    short,
21)
22from .pycompat import getattr
23
24from . import (
25    bookmarks,
26    bundlerepo,
27    cmdutil,
28    destutil,
29    discovery,
30    error,
31    exchange,
32    extensions,
33    graphmod,
34    httppeer,
35    localrepo,
36    lock,
37    logcmdutil,
38    logexchange,
39    merge as mergemod,
40    mergestate as mergestatemod,
41    narrowspec,
42    phases,
43    requirements,
44    scmutil,
45    sshpeer,
46    statichttprepo,
47    ui as uimod,
48    unionrepo,
49    url,
50    util,
51    verify as verifymod,
52    vfs as vfsmod,
53)
54from .interfaces import repository as repositorymod
55from .utils import (
56    hashutil,
57    stringutil,
58    urlutil,
59)
60
61
62release = lock.release
63
64# shared features
65sharedbookmarks = b'bookmarks'
66
67
68def _local(path):
69    path = util.expandpath(urlutil.urllocalpath(path))
70
71    try:
72        # we use os.stat() directly here instead of os.path.isfile()
73        # because the latter started returning `False` on invalid path
74        # exceptions starting in 3.8 and we care about handling
75        # invalid paths specially here.
76        st = os.stat(path)
77        isfile = stat.S_ISREG(st.st_mode)
78    # Python 2 raises TypeError, Python 3 ValueError.
79    except (TypeError, ValueError) as e:
80        raise error.Abort(
81            _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82        )
83    except OSError:
84        isfile = False
85
86    return isfile and bundlerepo or localrepo
87
88
89def addbranchrevs(lrepo, other, branches, revs):
90    peer = other.peer()  # a courtesy to callers using a localrepo for other
91    hashbranch, branches = branches
92    if not hashbranch and not branches:
93        x = revs or None
94        if revs:
95            y = revs[0]
96        else:
97            y = None
98        return x, y
99    if revs:
100        revs = list(revs)
101    else:
102        revs = []
103
104    if not peer.capable(b'branchmap'):
105        if branches:
106            raise error.Abort(_(b"remote branch lookup not supported"))
107        revs.append(hashbranch)
108        return revs, revs[0]
109
110    with peer.commandexecutor() as e:
111        branchmap = e.callcommand(b'branchmap', {}).result()
112
113    def primary(branch):
114        if branch == b'.':
115            if not lrepo:
116                raise error.Abort(_(b"dirstate branch not accessible"))
117            branch = lrepo.dirstate.branch()
118        if branch in branchmap:
119            revs.extend(hex(r) for r in reversed(branchmap[branch]))
120            return True
121        else:
122            return False
123
124    for branch in branches:
125        if not primary(branch):
126            raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127    if hashbranch:
128        if not primary(hashbranch):
129            revs.append(hashbranch)
130    return revs, revs[0]
131
132
133def parseurl(path, branches=None):
134    '''parse url#branch, returning (url, (branch, branches))'''
135    msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136    util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137    return urlutil.parseurl(path, branches=branches)
138
139
140schemes = {
141    b'bundle': bundlerepo,
142    b'union': unionrepo,
143    b'file': _local,
144    b'http': httppeer,
145    b'https': httppeer,
146    b'ssh': sshpeer,
147    b'static-http': statichttprepo,
148}
149
150
151def _peerlookup(path):
152    u = urlutil.url(path)
153    scheme = u.scheme or b'file'
154    thing = schemes.get(scheme) or schemes[b'file']
155    try:
156        return thing(path)
157    except TypeError:
158        # we can't test callable(thing) because 'thing' can be an unloaded
159        # module that implements __call__
160        if not util.safehasattr(thing, b'instance'):
161            raise
162        return thing
163
164
165def islocal(repo):
166    '''return true if repo (or path pointing to repo) is local'''
167    if isinstance(repo, bytes):
168        try:
169            return _peerlookup(repo).islocal(repo)
170        except AttributeError:
171            return False
172    return repo.local()
173
174
175def openpath(ui, path, sendaccept=True):
176    '''open path with open if local, url.open if remote'''
177    pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178    if pathurl.islocal():
179        return util.posixfile(pathurl.localpath(), b'rb')
180    else:
181        return url.open(ui, path, sendaccept=sendaccept)
182
183
184# a list of (ui, repo) functions called for wire peer initialization
185wirepeersetupfuncs = []
186
187
188def _peerorrepo(
189    ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190):
191    """return a repository object for the specified path"""
192    obj = _peerlookup(path).instance(
193        ui, path, create, intents=intents, createopts=createopts
194    )
195    ui = getattr(obj, "ui", ui)
196    for f in presetupfuncs or []:
197        f(ui, obj)
198    ui.log(b'extension', b'- executing reposetup hooks\n')
199    with util.timedcm('all reposetup') as allreposetupstats:
200        for name, module in extensions.extensions(ui):
201            ui.log(b'extension', b'  - running reposetup for %s\n', name)
202            hook = getattr(module, 'reposetup', None)
203            if hook:
204                with util.timedcm('reposetup %r', name) as stats:
205                    hook(ui, obj)
206                ui.log(
207                    b'extension', b'  > reposetup for %s took %s\n', name, stats
208                )
209    ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210    if not obj.local():
211        for f in wirepeersetupfuncs:
212            f(ui, obj)
213    return obj
214
215
216def repository(
217    ui,
218    path=b'',
219    create=False,
220    presetupfuncs=None,
221    intents=None,
222    createopts=None,
223):
224    """return a repository object for the specified path"""
225    peer = _peerorrepo(
226        ui,
227        path,
228        create,
229        presetupfuncs=presetupfuncs,
230        intents=intents,
231        createopts=createopts,
232    )
233    repo = peer.local()
234    if not repo:
235        raise error.Abort(
236            _(b"repository '%s' is not local") % (path or peer.url())
237        )
238    return repo.filtered(b'visible')
239
240
241def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242    '''return a repository peer for the specified path'''
243    rui = remoteui(uiorrepo, opts)
244    return _peerorrepo(
245        rui, path, create, intents=intents, createopts=createopts
246    ).peer()
247
248
249def defaultdest(source):
250    """return default destination of clone if none is given
251
252    >>> defaultdest(b'foo')
253    'foo'
254    >>> defaultdest(b'/foo/bar')
255    'bar'
256    >>> defaultdest(b'/')
257    ''
258    >>> defaultdest(b'')
259    ''
260    >>> defaultdest(b'http://example.org/')
261    ''
262    >>> defaultdest(b'http://example.org/foo/')
263    'foo'
264    """
265    path = urlutil.url(source).path
266    if not path:
267        return b''
268    return os.path.basename(os.path.normpath(path))
269
270
271def sharedreposource(repo):
272    """Returns repository object for source repository of a shared repo.
273
274    If repo is not a shared repository, returns None.
275    """
276    if repo.sharedpath == repo.path:
277        return None
278
279    if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280        return repo.srcrepo
281
282    # the sharedpath always ends in the .hg; we want the path to the repo
283    source = repo.vfs.split(repo.sharedpath)[0]
284    srcurl, branches = urlutil.parseurl(source)
285    srcrepo = repository(repo.ui, srcurl)
286    repo.srcrepo = srcrepo
287    return srcrepo
288
289
290def share(
291    ui,
292    source,
293    dest=None,
294    update=True,
295    bookmarks=True,
296    defaultpath=None,
297    relative=False,
298):
299    '''create a shared repository'''
300
301    if not islocal(source):
302        raise error.Abort(_(b'can only share local repositories'))
303
304    if not dest:
305        dest = defaultdest(source)
306    else:
307        dest = urlutil.get_clone_path(ui, dest)[1]
308
309    if isinstance(source, bytes):
310        origsource, source, branches = urlutil.get_clone_path(ui, source)
311        srcrepo = repository(ui, source)
312        rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313    else:
314        srcrepo = source.local()
315        checkout = None
316
317    shareditems = set()
318    if bookmarks:
319        shareditems.add(sharedbookmarks)
320
321    r = repository(
322        ui,
323        dest,
324        create=True,
325        createopts={
326            b'sharedrepo': srcrepo,
327            b'sharedrelative': relative,
328            b'shareditems': shareditems,
329        },
330    )
331
332    postshare(srcrepo, r, defaultpath=defaultpath)
333    r = repository(ui, dest)
334    _postshareupdate(r, update, checkout=checkout)
335    return r
336
337
338def _prependsourcehgrc(repo):
339    """copies the source repo config and prepend it in current repo .hg/hgrc
340    on unshare. This is only done if the share was perfomed using share safe
341    method where we share config of source in shares"""
342    srcvfs = vfsmod.vfs(repo.sharedpath)
343    dstvfs = vfsmod.vfs(repo.path)
344
345    if not srcvfs.exists(b'hgrc'):
346        return
347
348    currentconfig = b''
349    if dstvfs.exists(b'hgrc'):
350        currentconfig = dstvfs.read(b'hgrc')
351
352    with dstvfs(b'hgrc', b'wb') as fp:
353        sourceconfig = srcvfs.read(b'hgrc')
354        fp.write(b"# Config copied from shared source\n")
355        fp.write(sourceconfig)
356        fp.write(b'\n')
357        fp.write(currentconfig)
358
359
360def unshare(ui, repo):
361    """convert a shared repository to a normal one
362
363    Copy the store data to the repo and remove the sharedpath data.
364
365    Returns a new repository object representing the unshared repository.
366
367    The passed repository object is not usable after this function is
368    called.
369    """
370
371    with repo.lock():
372        # we use locks here because if we race with commit, we
373        # can end up with extra data in the cloned revlogs that's
374        # not pointed to by changesets, thus causing verify to
375        # fail
376        destlock = copystore(ui, repo, repo.path)
377        with destlock or util.nullcontextmanager():
378            if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379                # we were sharing .hg/hgrc of the share source with the current
380                # repo. We need to copy that while unsharing otherwise it can
381                # disable hooks and other checks
382                _prependsourcehgrc(repo)
383
384            sharefile = repo.vfs.join(b'sharedpath')
385            util.rename(sharefile, sharefile + b'.old')
386
387            repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388            repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389            scmutil.writereporequirements(repo)
390
391    # Removing share changes some fundamental properties of the repo instance.
392    # So we instantiate a new repo object and operate on it rather than
393    # try to keep the existing repo usable.
394    newrepo = repository(repo.baseui, repo.root, create=False)
395
396    # TODO: figure out how to access subrepos that exist, but were previously
397    #       removed from .hgsub
398    c = newrepo[b'.']
399    subs = c.substate
400    for s in sorted(subs):
401        c.sub(s).unshare()
402
403    localrepo.poisonrepository(repo)
404
405    return newrepo
406
407
408def postshare(sourcerepo, destrepo, defaultpath=None):
409    """Called after a new shared repo is created.
410
411    The new repo only has a requirements file and pointer to the source.
412    This function configures additional shared data.
413
414    Extensions can wrap this function and write additional entries to
415    destrepo/.hg/shared to indicate additional pieces of data to be shared.
416    """
417    default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418    if default:
419        template = b'[paths]\ndefault = %s\n'
420        destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421    if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422        with destrepo.wlock():
423            narrowspec.copytoworkingcopy(destrepo)
424
425
426def _postshareupdate(repo, update, checkout=None):
427    """Maybe perform a working directory update after a shared repo is created.
428
429    ``update`` can be a boolean or a revision to update to.
430    """
431    if not update:
432        return
433
434    repo.ui.status(_(b"updating working directory\n"))
435    if update is not True:
436        checkout = update
437    for test in (checkout, b'default', b'tip'):
438        if test is None:
439            continue
440        try:
441            uprev = repo.lookup(test)
442            break
443        except error.RepoLookupError:
444            continue
445    _update(repo, uprev)
446
447
448def copystore(ui, srcrepo, destpath):
449    """copy files from store of srcrepo in destpath
450
451    returns destlock
452    """
453    destlock = None
454    try:
455        hardlink = None
456        topic = _(b'linking') if hardlink else _(b'copying')
457        with ui.makeprogress(topic, unit=_(b'files')) as progress:
458            num = 0
459            srcpublishing = srcrepo.publishing()
460            srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461            dstvfs = vfsmod.vfs(destpath)
462            for f in srcrepo.store.copylist():
463                if srcpublishing and f.endswith(b'phaseroots'):
464                    continue
465                dstbase = os.path.dirname(f)
466                if dstbase and not dstvfs.exists(dstbase):
467                    dstvfs.mkdir(dstbase)
468                if srcvfs.exists(f):
469                    if f.endswith(b'data'):
470                        # 'dstbase' may be empty (e.g. revlog format 0)
471                        lockfile = os.path.join(dstbase, b"lock")
472                        # lock to avoid premature writing to the target
473                        destlock = lock.lock(dstvfs, lockfile)
474                    hardlink, n = util.copyfiles(
475                        srcvfs.join(f), dstvfs.join(f), hardlink, progress
476                    )
477                    num += n
478            if hardlink:
479                ui.debug(b"linked %d files\n" % num)
480            else:
481                ui.debug(b"copied %d files\n" % num)
482        return destlock
483    except:  # re-raises
484        release(destlock)
485        raise
486
487
488def clonewithshare(
489    ui,
490    peeropts,
491    sharepath,
492    source,
493    srcpeer,
494    dest,
495    pull=False,
496    rev=None,
497    update=True,
498    stream=False,
499):
500    """Perform a clone using a shared repo.
501
502    The store for the repository will be located at <sharepath>/.hg. The
503    specified revisions will be cloned or pulled from "source". A shared repo
504    will be created at "dest" and a working copy will be created if "update" is
505    True.
506    """
507    revs = None
508    if rev:
509        if not srcpeer.capable(b'lookup'):
510            raise error.Abort(
511                _(
512                    b"src repository does not support "
513                    b"revision lookup and so doesn't "
514                    b"support clone by revision"
515                )
516            )
517
518        # TODO this is batchable.
519        remoterevs = []
520        for r in rev:
521            with srcpeer.commandexecutor() as e:
522                remoterevs.append(
523                    e.callcommand(
524                        b'lookup',
525                        {
526                            b'key': r,
527                        },
528                    ).result()
529                )
530        revs = remoterevs
531
532    # Obtain a lock before checking for or cloning the pooled repo otherwise
533    # 2 clients may race creating or populating it.
534    pooldir = os.path.dirname(sharepath)
535    # lock class requires the directory to exist.
536    try:
537        util.makedir(pooldir, False)
538    except OSError as e:
539        if e.errno != errno.EEXIST:
540            raise
541
542    poolvfs = vfsmod.vfs(pooldir)
543    basename = os.path.basename(sharepath)
544
545    with lock.lock(poolvfs, b'%s.lock' % basename):
546        if os.path.exists(sharepath):
547            ui.status(
548                _(b'(sharing from existing pooled repository %s)\n') % basename
549            )
550        else:
551            ui.status(
552                _(b'(sharing from new pooled repository %s)\n') % basename
553            )
554            # Always use pull mode because hardlinks in share mode don't work
555            # well. Never update because working copies aren't necessary in
556            # share mode.
557            clone(
558                ui,
559                peeropts,
560                source,
561                dest=sharepath,
562                pull=True,
563                revs=rev,
564                update=False,
565                stream=stream,
566            )
567
568    # Resolve the value to put in [paths] section for the source.
569    if islocal(source):
570        defaultpath = util.abspath(urlutil.urllocalpath(source))
571    else:
572        defaultpath = source
573
574    sharerepo = repository(ui, path=sharepath)
575    destrepo = share(
576        ui,
577        sharerepo,
578        dest=dest,
579        update=False,
580        bookmarks=False,
581        defaultpath=defaultpath,
582    )
583
584    # We need to perform a pull against the dest repo to fetch bookmarks
585    # and other non-store data that isn't shared by default. In the case of
586    # non-existing shared repo, this means we pull from the remote twice. This
587    # is a bit weird. But at the time it was implemented, there wasn't an easy
588    # way to pull just non-changegroup data.
589    exchange.pull(destrepo, srcpeer, heads=revs)
590
591    _postshareupdate(destrepo, update)
592
593    return srcpeer, peer(ui, peeropts, dest)
594
595
596# Recomputing caches is often slow on big repos, so copy them.
597def _copycache(srcrepo, dstcachedir, fname):
598    """copy a cache from srcrepo to destcachedir (if it exists)"""
599    srcfname = srcrepo.cachevfs.join(fname)
600    dstfname = os.path.join(dstcachedir, fname)
601    if os.path.exists(srcfname):
602        if not os.path.exists(dstcachedir):
603            os.mkdir(dstcachedir)
604        util.copyfile(srcfname, dstfname)
605
606
607def clone(
608    ui,
609    peeropts,
610    source,
611    dest=None,
612    pull=False,
613    revs=None,
614    update=True,
615    stream=False,
616    branch=None,
617    shareopts=None,
618    storeincludepats=None,
619    storeexcludepats=None,
620    depth=None,
621):
622    """Make a copy of an existing repository.
623
624    Create a copy of an existing repository in a new directory.  The
625    source and destination are URLs, as passed to the repository
626    function.  Returns a pair of repository peers, the source and
627    newly created destination.
628
629    The location of the source is added to the new repository's
630    .hg/hgrc file, as the default to be used for future pulls and
631    pushes.
632
633    If an exception is raised, the partly cloned/updated destination
634    repository will be deleted.
635
636    Arguments:
637
638    source: repository object or URL
639
640    dest: URL of destination repository to create (defaults to base
641    name of source repository)
642
643    pull: always pull from source repository, even in local case or if the
644    server prefers streaming
645
646    stream: stream raw data uncompressed from repository (fast over
647    LAN, slow over WAN)
648
649    revs: revision to clone up to (implies pull=True)
650
651    update: update working directory after clone completes, if
652    destination is local repository (True means update to default rev,
653    anything else is treated as a revision)
654
655    branch: branches to clone
656
657    shareopts: dict of options to control auto sharing behavior. The "pool" key
658    activates auto sharing mode and defines the directory for stores. The
659    "mode" key determines how to construct the directory name of the shared
660    repository. "identity" means the name is derived from the node of the first
661    changeset in the repository. "remote" means the name is derived from the
662    remote's path/URL. Defaults to "identity."
663
664    storeincludepats and storeexcludepats: sets of file patterns to include and
665    exclude in the repository copy, respectively. If not defined, all files
666    will be included (a "full" clone). Otherwise a "narrow" clone containing
667    only the requested files will be performed. If ``storeincludepats`` is not
668    defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669    ``path:.``. If both are empty sets, no files will be cloned.
670    """
671
672    if isinstance(source, bytes):
673        src = urlutil.get_clone_path(ui, source, branch)
674        origsource, source, branches = src
675        srcpeer = peer(ui, peeropts, source)
676    else:
677        srcpeer = source.peer()  # in case we were called with a localrepo
678        branches = (None, branch or [])
679        origsource = source = srcpeer.url()
680    srclock = destlock = cleandir = None
681    destpeer = None
682    try:
683        revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684
685        if dest is None:
686            dest = defaultdest(source)
687            if dest:
688                ui.status(_(b"destination directory: %s\n") % dest)
689        else:
690            dest = urlutil.get_clone_path(ui, dest)[0]
691
692        dest = urlutil.urllocalpath(dest)
693        source = urlutil.urllocalpath(source)
694
695        if not dest:
696            raise error.InputError(_(b"empty destination path is not valid"))
697
698        destvfs = vfsmod.vfs(dest, expandpath=True)
699        if destvfs.lexists():
700            if not destvfs.isdir():
701                raise error.InputError(
702                    _(b"destination '%s' already exists") % dest
703                )
704            elif destvfs.listdir():
705                raise error.InputError(
706                    _(b"destination '%s' is not empty") % dest
707                )
708
709        createopts = {}
710        narrow = False
711
712        if storeincludepats is not None:
713            narrowspec.validatepatterns(storeincludepats)
714            narrow = True
715
716        if storeexcludepats is not None:
717            narrowspec.validatepatterns(storeexcludepats)
718            narrow = True
719
720        if narrow:
721            # Include everything by default if only exclusion patterns defined.
722            if storeexcludepats and not storeincludepats:
723                storeincludepats = {b'path:.'}
724
725            createopts[b'narrowfiles'] = True
726
727        if depth:
728            createopts[b'shallowfilestore'] = True
729
730        if srcpeer.capable(b'lfs-serve'):
731            # Repository creation honors the config if it disabled the extension, so
732            # we can't just announce that lfs will be enabled.  This check avoids
733            # saying that lfs will be enabled, and then saying it's an unknown
734            # feature.  The lfs creation option is set in either case so that a
735            # requirement is added.  If the extension is explicitly disabled but the
736            # requirement is set, the clone aborts early, before transferring any
737            # data.
738            createopts[b'lfs'] = True
739
740            if extensions.disabled_help(b'lfs'):
741                ui.status(
742                    _(
743                        b'(remote is using large file support (lfs), but it is '
744                        b'explicitly disabled in the local configuration)\n'
745                    )
746                )
747            else:
748                ui.status(
749                    _(
750                        b'(remote is using large file support (lfs); lfs will '
751                        b'be enabled for this repository)\n'
752                    )
753                )
754
755        shareopts = shareopts or {}
756        sharepool = shareopts.get(b'pool')
757        sharenamemode = shareopts.get(b'mode')
758        if sharepool and islocal(dest):
759            sharepath = None
760            if sharenamemode == b'identity':
761                # Resolve the name from the initial changeset in the remote
762                # repository. This returns nullid when the remote is empty. It
763                # raises RepoLookupError if revision 0 is filtered or otherwise
764                # not available. If we fail to resolve, sharing is not enabled.
765                try:
766                    with srcpeer.commandexecutor() as e:
767                        rootnode = e.callcommand(
768                            b'lookup',
769                            {
770                                b'key': b'0',
771                            },
772                        ).result()
773
774                    if rootnode != sha1nodeconstants.nullid:
775                        sharepath = os.path.join(sharepool, hex(rootnode))
776                    else:
777                        ui.status(
778                            _(
779                                b'(not using pooled storage: '
780                                b'remote appears to be empty)\n'
781                            )
782                        )
783                except error.RepoLookupError:
784                    ui.status(
785                        _(
786                            b'(not using pooled storage: '
787                            b'unable to resolve identity of remote)\n'
788                        )
789                    )
790            elif sharenamemode == b'remote':
791                sharepath = os.path.join(
792                    sharepool, hex(hashutil.sha1(source).digest())
793                )
794            else:
795                raise error.Abort(
796                    _(b'unknown share naming mode: %s') % sharenamemode
797                )
798
799            # TODO this is a somewhat arbitrary restriction.
800            if narrow:
801                ui.status(
802                    _(b'(pooled storage not supported for narrow clones)\n')
803                )
804                sharepath = None
805
806            if sharepath:
807                return clonewithshare(
808                    ui,
809                    peeropts,
810                    sharepath,
811                    source,
812                    srcpeer,
813                    dest,
814                    pull=pull,
815                    rev=revs,
816                    update=update,
817                    stream=stream,
818                )
819
820        srcrepo = srcpeer.local()
821
822        abspath = origsource
823        if islocal(origsource):
824            abspath = util.abspath(urlutil.urllocalpath(origsource))
825
826        if islocal(dest):
827            if os.path.exists(dest):
828                # only clean up directories we create ourselves
829                hgdir = os.path.realpath(os.path.join(dest, b".hg"))
830                cleandir = hgdir
831            else:
832                cleandir = dest
833
834        copy = False
835        if (
836            srcrepo
837            and srcrepo.cancopy()
838            and islocal(dest)
839            and not phases.hassecret(srcrepo)
840        ):
841            copy = not pull and not revs
842
843        # TODO this is a somewhat arbitrary restriction.
844        if narrow:
845            copy = False
846
847        if copy:
848            try:
849                # we use a lock here because if we race with commit, we
850                # can end up with extra data in the cloned revlogs that's
851                # not pointed to by changesets, thus causing verify to
852                # fail
853                srclock = srcrepo.lock(wait=False)
854            except error.LockError:
855                copy = False
856
857        if copy:
858            srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
859
860            destrootpath = urlutil.urllocalpath(dest)
861            dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
862            localrepo.createrepository(
863                ui,
864                destrootpath,
865                requirements=dest_reqs,
866            )
867            destrepo = localrepo.makelocalrepository(ui, destrootpath)
868            destlock = destrepo.lock()
869            from . import streamclone  # avoid cycle
870
871            streamclone.local_copy(srcrepo, destrepo)
872
873            # we need to re-init the repo after manually copying the data
874            # into it
875            destpeer = peer(srcrepo, peeropts, dest)
876            srcrepo.hook(
877                b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
878            )
879        else:
880            try:
881                # only pass ui when no srcrepo
882                destpeer = peer(
883                    srcrepo or ui,
884                    peeropts,
885                    dest,
886                    create=True,
887                    createopts=createopts,
888                )
889            except OSError as inst:
890                if inst.errno == errno.EEXIST:
891                    cleandir = None
892                    raise error.Abort(
893                        _(b"destination '%s' already exists") % dest
894                    )
895                raise
896
897            if revs:
898                if not srcpeer.capable(b'lookup'):
899                    raise error.Abort(
900                        _(
901                            b"src repository does not support "
902                            b"revision lookup and so doesn't "
903                            b"support clone by revision"
904                        )
905                    )
906
907                # TODO this is batchable.
908                remoterevs = []
909                for rev in revs:
910                    with srcpeer.commandexecutor() as e:
911                        remoterevs.append(
912                            e.callcommand(
913                                b'lookup',
914                                {
915                                    b'key': rev,
916                                },
917                            ).result()
918                        )
919                revs = remoterevs
920
921                checkout = revs[0]
922            else:
923                revs = None
924            local = destpeer.local()
925            if local:
926                if narrow:
927                    with local.wlock(), local.lock():
928                        local.setnarrowpats(storeincludepats, storeexcludepats)
929                        narrowspec.copytoworkingcopy(local)
930
931                u = urlutil.url(abspath)
932                defaulturl = bytes(u)
933                local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934                if not stream:
935                    if pull:
936                        stream = False
937                    else:
938                        stream = None
939                # internal config: ui.quietbookmarkmove
940                overrides = {(b'ui', b'quietbookmarkmove'): True}
941                with local.ui.configoverride(overrides, b'clone'):
942                    exchange.pull(
943                        local,
944                        srcpeer,
945                        heads=revs,
946                        streamclonerequested=stream,
947                        includepats=storeincludepats,
948                        excludepats=storeexcludepats,
949                        depth=depth,
950                    )
951            elif srcrepo:
952                # TODO lift restriction once exchange.push() accepts narrow
953                # push.
954                if narrow:
955                    raise error.Abort(
956                        _(
957                            b'narrow clone not available for '
958                            b'remote destinations'
959                        )
960                    )
961
962                exchange.push(
963                    srcrepo,
964                    destpeer,
965                    revs=revs,
966                    bookmarks=srcrepo._bookmarks.keys(),
967                )
968            else:
969                raise error.Abort(
970                    _(b"clone from remote to remote not supported")
971                )
972
973        cleandir = None
974
975        destrepo = destpeer.local()
976        if destrepo:
977            template = uimod.samplehgrcs[b'cloned']
978            u = urlutil.url(abspath)
979            u.passwd = None
980            defaulturl = bytes(u)
981            destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982            destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983
984            if ui.configbool(b'experimental', b'remotenames'):
985                logexchange.pullremotenames(destrepo, srcpeer)
986
987            if update:
988                if update is not True:
989                    with srcpeer.commandexecutor() as e:
990                        checkout = e.callcommand(
991                            b'lookup',
992                            {
993                                b'key': update,
994                            },
995                        ).result()
996
997                uprev = None
998                status = None
999                if checkout is not None:
1000                    # Some extensions (at least hg-git and hg-subversion) have
1001                    # a peer.lookup() implementation that returns a name instead
1002                    # of a nodeid. We work around it here until we've figured
1003                    # out a better solution.
1004                    if len(checkout) == 20 and checkout in destrepo:
1005                        uprev = checkout
1006                    elif scmutil.isrevsymbol(destrepo, checkout):
1007                        uprev = scmutil.revsymbol(destrepo, checkout).node()
1008                    else:
1009                        if update is not True:
1010                            try:
1011                                uprev = destrepo.lookup(update)
1012                            except error.RepoLookupError:
1013                                pass
1014                if uprev is None:
1015                    try:
1016                        if destrepo._activebookmark:
1017                            uprev = destrepo.lookup(destrepo._activebookmark)
1018                            update = destrepo._activebookmark
1019                        else:
1020                            uprev = destrepo._bookmarks[b'@']
1021                            update = b'@'
1022                        bn = destrepo[uprev].branch()
1023                        if bn == b'default':
1024                            status = _(b"updating to bookmark %s\n" % update)
1025                        else:
1026                            status = (
1027                                _(b"updating to bookmark %s on branch %s\n")
1028                            ) % (update, bn)
1029                    except KeyError:
1030                        try:
1031                            uprev = destrepo.branchtip(b'default')
1032                        except error.RepoLookupError:
1033                            uprev = destrepo.lookup(b'tip')
1034                if not status:
1035                    bn = destrepo[uprev].branch()
1036                    status = _(b"updating to branch %s\n") % bn
1037                destrepo.ui.status(status)
1038                _update(destrepo, uprev)
1039                if update in destrepo._bookmarks:
1040                    bookmarks.activate(destrepo, update)
1041            if destlock is not None:
1042                release(destlock)
1043            # here is a tiny windows were someone could end up writing the
1044            # repository before the cache are sure to be warm. This is "fine"
1045            # as the only "bad" outcome would be some slowness. That potential
1046            # slowness already affect reader.
1047            with destrepo.lock():
1048                destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1049    finally:
1050        release(srclock, destlock)
1051        if cleandir is not None:
1052            shutil.rmtree(cleandir, True)
1053        if srcpeer is not None:
1054            srcpeer.close()
1055        if destpeer and destpeer.local() is None:
1056            destpeer.close()
1057    return srcpeer, destpeer
1058
1059
1060def _showstats(repo, stats, quietempty=False):
1061    if quietempty and stats.isempty():
1062        return
1063    repo.ui.status(
1064        _(
1065            b"%d files updated, %d files merged, "
1066            b"%d files removed, %d files unresolved\n"
1067        )
1068        % (
1069            stats.updatedcount,
1070            stats.mergedcount,
1071            stats.removedcount,
1072            stats.unresolvedcount,
1073        )
1074    )
1075
1076
1077def updaterepo(repo, node, overwrite, updatecheck=None):
1078    """Update the working directory to node.
1079
1080    When overwrite is set, changes are clobbered, merged else
1081
1082    returns stats (see pydoc mercurial.merge.applyupdates)"""
1083    repo.ui.deprecwarn(
1084        b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1085        b'5.7',
1086    )
1087    return mergemod._update(
1088        repo,
1089        node,
1090        branchmerge=False,
1091        force=overwrite,
1092        labels=[b'working copy', b'destination'],
1093        updatecheck=updatecheck,
1094    )
1095
1096
1097def update(repo, node, quietempty=False, updatecheck=None):
1098    """update the working directory to node"""
1099    stats = mergemod.update(repo[node], updatecheck=updatecheck)
1100    _showstats(repo, stats, quietempty)
1101    if stats.unresolvedcount:
1102        repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1103    return stats.unresolvedcount > 0
1104
1105
1106# naming conflict in clone()
1107_update = update
1108
1109
1110def clean(repo, node, show_stats=True, quietempty=False):
1111    """forcibly switch the working directory to node, clobbering changes"""
1112    stats = mergemod.clean_update(repo[node])
1113    assert stats.unresolvedcount == 0
1114    if show_stats:
1115        _showstats(repo, stats, quietempty)
1116    return False
1117
1118
1119# naming conflict in updatetotally()
1120_clean = clean
1121
1122_VALID_UPDATECHECKS = {
1123    mergemod.UPDATECHECK_ABORT,
1124    mergemod.UPDATECHECK_NONE,
1125    mergemod.UPDATECHECK_LINEAR,
1126    mergemod.UPDATECHECK_NO_CONFLICT,
1127}
1128
1129
1130def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1131    """Update the working directory with extra care for non-file components
1132
1133    This takes care of non-file components below:
1134
1135    :bookmark: might be advanced or (in)activated
1136
1137    This takes arguments below:
1138
1139    :checkout: to which revision the working directory is updated
1140    :brev: a name, which might be a bookmark to be activated after updating
1141    :clean: whether changes in the working directory can be discarded
1142    :updatecheck: how to deal with a dirty working directory
1143
1144    Valid values for updatecheck are the UPDATECHECK_* constants
1145    defined in the merge module. Passing `None` will result in using the
1146    configured default.
1147
1148     * ABORT: abort if the working directory is dirty
1149     * NONE: don't check (merge working directory changes into destination)
1150     * LINEAR: check that update is linear before merging working directory
1151               changes into destination
1152     * NO_CONFLICT: check that the update does not result in file merges
1153
1154    This returns whether conflict is detected at updating or not.
1155    """
1156    if updatecheck is None:
1157        updatecheck = ui.config(b'commands', b'update.check')
1158        if updatecheck not in _VALID_UPDATECHECKS:
1159            # If not configured, or invalid value configured
1160            updatecheck = mergemod.UPDATECHECK_LINEAR
1161    if updatecheck not in _VALID_UPDATECHECKS:
1162        raise ValueError(
1163            r'Invalid updatecheck value %r (can accept %r)'
1164            % (updatecheck, _VALID_UPDATECHECKS)
1165        )
1166    with repo.wlock():
1167        movemarkfrom = None
1168        warndest = False
1169        if checkout is None:
1170            updata = destutil.destupdate(repo, clean=clean)
1171            checkout, movemarkfrom, brev = updata
1172            warndest = True
1173
1174        if clean:
1175            ret = _clean(repo, checkout)
1176        else:
1177            if updatecheck == mergemod.UPDATECHECK_ABORT:
1178                cmdutil.bailifchanged(repo, merge=False)
1179                updatecheck = mergemod.UPDATECHECK_NONE
1180            ret = _update(repo, checkout, updatecheck=updatecheck)
1181
1182        if not ret and movemarkfrom:
1183            if movemarkfrom == repo[b'.'].node():
1184                pass  # no-op update
1185            elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1186                b = ui.label(repo._activebookmark, b'bookmarks.active')
1187                ui.status(_(b"updating bookmark %s\n") % b)
1188            else:
1189                # this can happen with a non-linear update
1190                b = ui.label(repo._activebookmark, b'bookmarks')
1191                ui.status(_(b"(leaving bookmark %s)\n") % b)
1192                bookmarks.deactivate(repo)
1193        elif brev in repo._bookmarks:
1194            if brev != repo._activebookmark:
1195                b = ui.label(brev, b'bookmarks.active')
1196                ui.status(_(b"(activating bookmark %s)\n") % b)
1197            bookmarks.activate(repo, brev)
1198        elif brev:
1199            if repo._activebookmark:
1200                b = ui.label(repo._activebookmark, b'bookmarks')
1201                ui.status(_(b"(leaving bookmark %s)\n") % b)
1202            bookmarks.deactivate(repo)
1203
1204        if warndest:
1205            destutil.statusotherdests(ui, repo)
1206
1207    return ret
1208
1209
1210def merge(
1211    ctx,
1212    force=False,
1213    remind=True,
1214    labels=None,
1215):
1216    """Branch merge with node, resolving changes. Return true if any
1217    unresolved conflicts."""
1218    repo = ctx.repo()
1219    stats = mergemod.merge(ctx, force=force, labels=labels)
1220    _showstats(repo, stats)
1221    if stats.unresolvedcount:
1222        repo.ui.status(
1223            _(
1224                b"use 'hg resolve' to retry unresolved file merges "
1225                b"or 'hg merge --abort' to abandon\n"
1226            )
1227        )
1228    elif remind:
1229        repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1230    return stats.unresolvedcount > 0
1231
1232
1233def abortmerge(ui, repo):
1234    ms = mergestatemod.mergestate.read(repo)
1235    if ms.active():
1236        # there were conflicts
1237        node = ms.localctx.hex()
1238    else:
1239        # there were no conficts, mergestate was not stored
1240        node = repo[b'.'].hex()
1241
1242    repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1243    stats = mergemod.clean_update(repo[node])
1244    assert stats.unresolvedcount == 0
1245    _showstats(repo, stats)
1246
1247
1248def _incoming(
1249    displaychlist,
1250    subreporecurse,
1251    ui,
1252    repo,
1253    source,
1254    opts,
1255    buffered=False,
1256    subpath=None,
1257):
1258    """
1259    Helper for incoming / gincoming.
1260    displaychlist gets called with
1261        (remoterepo, incomingchangesetlist, displayer) parameters,
1262    and is supposed to contain only code that can't be unified.
1263    """
1264    srcs = urlutil.get_pull_paths(repo, ui, [source])
1265    srcs = list(srcs)
1266    if len(srcs) != 1:
1267        msg = _(b'for now, incoming supports only a single source, %d provided')
1268        msg %= len(srcs)
1269        raise error.Abort(msg)
1270    path = srcs[0]
1271    source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
1272    if subpath is not None:
1273        subpath = urlutil.url(subpath)
1274        if subpath.isabs():
1275            source = bytes(subpath)
1276        else:
1277            p = urlutil.url(source)
1278            p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1279            source = bytes(p)
1280    other = peer(repo, opts, source)
1281    cleanupfn = other.close
1282    try:
1283        ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1284        revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1285
1286        if revs:
1287            revs = [other.lookup(rev) for rev in revs]
1288        other, chlist, cleanupfn = bundlerepo.getremotechanges(
1289            ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1290        )
1291
1292        if not chlist:
1293            ui.status(_(b"no changes found\n"))
1294            return subreporecurse()
1295        ui.pager(b'incoming')
1296        displayer = logcmdutil.changesetdisplayer(
1297            ui, other, opts, buffered=buffered
1298        )
1299        displaychlist(other, chlist, displayer)
1300        displayer.close()
1301    finally:
1302        cleanupfn()
1303    subreporecurse()
1304    return 0  # exit code is zero since we found incoming changes
1305
1306
1307def incoming(ui, repo, source, opts, subpath=None):
1308    def subreporecurse():
1309        ret = 1
1310        if opts.get(b'subrepos'):
1311            ctx = repo[None]
1312            for subpath in sorted(ctx.substate):
1313                sub = ctx.sub(subpath)
1314                ret = min(ret, sub.incoming(ui, source, opts))
1315        return ret
1316
1317    def display(other, chlist, displayer):
1318        limit = logcmdutil.getlimit(opts)
1319        if opts.get(b'newest_first'):
1320            chlist.reverse()
1321        count = 0
1322        for n in chlist:
1323            if limit is not None and count >= limit:
1324                break
1325            parents = [
1326                p for p in other.changelog.parents(n) if p != repo.nullid
1327            ]
1328            if opts.get(b'no_merges') and len(parents) == 2:
1329                continue
1330            count += 1
1331            displayer.show(other[n])
1332
1333    return _incoming(
1334        display, subreporecurse, ui, repo, source, opts, subpath=subpath
1335    )
1336
1337
1338def _outgoing(ui, repo, dests, opts, subpath=None):
1339    out = set()
1340    others = []
1341    for path in urlutil.get_push_paths(repo, ui, dests):
1342        dest = path.pushloc or path.loc
1343        if subpath is not None:
1344            subpath = urlutil.url(subpath)
1345            if subpath.isabs():
1346                dest = bytes(subpath)
1347            else:
1348                p = urlutil.url(dest)
1349                p.path = os.path.normpath(b'%s/%s' % (p.path, subpath))
1350                dest = bytes(p)
1351        branches = path.branch, opts.get(b'branch') or []
1352
1353        ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1354        revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1355        if revs:
1356            revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1357
1358        other = peer(repo, opts, dest)
1359        try:
1360            outgoing = discovery.findcommonoutgoing(
1361                repo, other, revs, force=opts.get(b'force')
1362            )
1363            o = outgoing.missing
1364            out.update(o)
1365            if not o:
1366                scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1367            others.append(other)
1368        except:  # re-raises
1369            other.close()
1370            raise
1371    # make sure this is ordered by revision number
1372    outgoing_revs = list(out)
1373    cl = repo.changelog
1374    outgoing_revs.sort(key=cl.rev)
1375    return outgoing_revs, others
1376
1377
1378def _outgoing_recurse(ui, repo, dests, opts):
1379    ret = 1
1380    if opts.get(b'subrepos'):
1381        ctx = repo[None]
1382        for subpath in sorted(ctx.substate):
1383            sub = ctx.sub(subpath)
1384            ret = min(ret, sub.outgoing(ui, dests, opts))
1385    return ret
1386
1387
1388def _outgoing_filter(repo, revs, opts):
1389    """apply revision filtering/ordering option for outgoing"""
1390    limit = logcmdutil.getlimit(opts)
1391    no_merges = opts.get(b'no_merges')
1392    if opts.get(b'newest_first'):
1393        revs.reverse()
1394    if limit is None and not no_merges:
1395        for r in revs:
1396            yield r
1397        return
1398
1399    count = 0
1400    cl = repo.changelog
1401    for n in revs:
1402        if limit is not None and count >= limit:
1403            break
1404        parents = [p for p in cl.parents(n) if p != repo.nullid]
1405        if no_merges and len(parents) == 2:
1406            continue
1407        count += 1
1408        yield n
1409
1410
1411def outgoing(ui, repo, dests, opts, subpath=None):
1412    if opts.get(b'graph'):
1413        logcmdutil.checkunsupportedgraphflags([], opts)
1414    o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1415    ret = 1
1416    try:
1417        if o:
1418            ret = 0
1419
1420            if opts.get(b'graph'):
1421                revdag = logcmdutil.graphrevs(repo, o, opts)
1422                ui.pager(b'outgoing')
1423                displayer = logcmdutil.changesetdisplayer(
1424                    ui, repo, opts, buffered=True
1425                )
1426                logcmdutil.displaygraph(
1427                    ui, repo, revdag, displayer, graphmod.asciiedges
1428                )
1429            else:
1430                ui.pager(b'outgoing')
1431                displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1432                for n in _outgoing_filter(repo, o, opts):
1433                    displayer.show(repo[n])
1434                displayer.close()
1435        for oth in others:
1436            cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1437            ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1438        return ret  # exit code is zero since we found outgoing changes
1439    finally:
1440        for oth in others:
1441            oth.close()
1442
1443
1444def verify(repo, level=None):
1445    """verify the consistency of a repository"""
1446    ret = verifymod.verify(repo, level=level)
1447
1448    # Broken subrepo references in hidden csets don't seem worth worrying about,
1449    # since they can't be pushed/pulled, and --hidden can be used if they are a
1450    # concern.
1451
1452    # pathto() is needed for -R case
1453    revs = repo.revs(
1454        b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1455    )
1456
1457    if revs:
1458        repo.ui.status(_(b'checking subrepo links\n'))
1459        for rev in revs:
1460            ctx = repo[rev]
1461            try:
1462                for subpath in ctx.substate:
1463                    try:
1464                        ret = (
1465                            ctx.sub(subpath, allowcreate=False).verify() or ret
1466                        )
1467                    except error.RepoError as e:
1468                        repo.ui.warn(b'%d: %s\n' % (rev, e))
1469            except Exception:
1470                repo.ui.warn(
1471                    _(b'.hgsubstate is corrupt in revision %s\n')
1472                    % short(ctx.node())
1473                )
1474
1475    return ret
1476
1477
1478def remoteui(src, opts):
1479    """build a remote ui from ui or repo and opts"""
1480    if util.safehasattr(src, b'baseui'):  # looks like a repository
1481        dst = src.baseui.copy()  # drop repo-specific config
1482        src = src.ui  # copy target options from repo
1483    else:  # assume it's a global ui object
1484        dst = src.copy()  # keep all global options
1485
1486    # copy ssh-specific options
1487    for o in b'ssh', b'remotecmd':
1488        v = opts.get(o) or src.config(b'ui', o)
1489        if v:
1490            dst.setconfig(b"ui", o, v, b'copied')
1491
1492    # copy bundle-specific options
1493    r = src.config(b'bundle', b'mainreporoot')
1494    if r:
1495        dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1496
1497    # copy selected local settings to the remote ui
1498    for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1499        for key, val in src.configitems(sect):
1500            dst.setconfig(sect, key, val, b'copied')
1501    v = src.config(b'web', b'cacerts')
1502    if v:
1503        dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1504
1505    return dst
1506
1507
1508# Files of interest
1509# Used to check if the repository has changed looking at mtime and size of
1510# these files.
1511foi = [
1512    (b'spath', b'00changelog.i'),
1513    (b'spath', b'phaseroots'),  # ! phase can change content at the same size
1514    (b'spath', b'obsstore'),
1515    (b'path', b'bookmarks'),  # ! bookmark can change content at the same size
1516]
1517
1518
1519class cachedlocalrepo(object):
1520    """Holds a localrepository that can be cached and reused."""
1521
1522    def __init__(self, repo):
1523        """Create a new cached repo from an existing repo.
1524
1525        We assume the passed in repo was recently created. If the
1526        repo has changed between when it was created and when it was
1527        turned into a cache, it may not refresh properly.
1528        """
1529        assert isinstance(repo, localrepo.localrepository)
1530        self._repo = repo
1531        self._state, self.mtime = self._repostate()
1532        self._filtername = repo.filtername
1533
1534    def fetch(self):
1535        """Refresh (if necessary) and return a repository.
1536
1537        If the cached instance is out of date, it will be recreated
1538        automatically and returned.
1539
1540        Returns a tuple of the repo and a boolean indicating whether a new
1541        repo instance was created.
1542        """
1543        # We compare the mtimes and sizes of some well-known files to
1544        # determine if the repo changed. This is not precise, as mtimes
1545        # are susceptible to clock skew and imprecise filesystems and
1546        # file content can change while maintaining the same size.
1547
1548        state, mtime = self._repostate()
1549        if state == self._state:
1550            return self._repo, False
1551
1552        repo = repository(self._repo.baseui, self._repo.url())
1553        if self._filtername:
1554            self._repo = repo.filtered(self._filtername)
1555        else:
1556            self._repo = repo.unfiltered()
1557        self._state = state
1558        self.mtime = mtime
1559
1560        return self._repo, True
1561
1562    def _repostate(self):
1563        state = []
1564        maxmtime = -1
1565        for attr, fname in foi:
1566            prefix = getattr(self._repo, attr)
1567            p = os.path.join(prefix, fname)
1568            try:
1569                st = os.stat(p)
1570            except OSError:
1571                st = os.stat(prefix)
1572            state.append((st[stat.ST_MTIME], st.st_size))
1573            maxmtime = max(maxmtime, st[stat.ST_MTIME])
1574
1575        return tuple(state), maxmtime
1576
1577    def copy(self):
1578        """Obtain a copy of this class instance.
1579
1580        A new localrepository instance is obtained. The new instance should be
1581        completely independent of the original.
1582        """
1583        repo = repository(self._repo.baseui, self._repo.origroot)
1584        if self._filtername:
1585            repo = repo.filtered(self._filtername)
1586        else:
1587            repo = repo.unfiltered()
1588        c = cachedlocalrepo(repo)
1589        c._state = self._state
1590        c.mtime = self.mtime
1591        return c
1592