51 from .interfaces import repository as repositorymod |
51 from .interfaces import repository as repositorymod |
52 |
52 |
53 release = lock.release |
53 release = lock.release |
54 |
54 |
55 # shared features |
55 # shared features |
56 sharedbookmarks = 'bookmarks' |
56 sharedbookmarks = b'bookmarks' |
57 |
57 |
58 |
58 |
59 def _local(path): |
59 def _local(path): |
60 path = util.expandpath(util.urllocalpath(path)) |
60 path = util.expandpath(util.urllocalpath(path)) |
61 |
61 |
62 try: |
62 try: |
63 isfile = os.path.isfile(path) |
63 isfile = os.path.isfile(path) |
64 # Python 2 raises TypeError, Python 3 ValueError. |
64 # Python 2 raises TypeError, Python 3 ValueError. |
65 except (TypeError, ValueError) as e: |
65 except (TypeError, ValueError) as e: |
66 raise error.Abort( |
66 raise error.Abort( |
67 _('invalid path %s: %s') % (path, pycompat.bytestr(e)) |
67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e)) |
68 ) |
68 ) |
69 |
69 |
70 return isfile and bundlerepo or localrepo |
70 return isfile and bundlerepo or localrepo |
71 |
71 |
72 |
72 |
83 if revs: |
83 if revs: |
84 revs = list(revs) |
84 revs = list(revs) |
85 else: |
85 else: |
86 revs = [] |
86 revs = [] |
87 |
87 |
88 if not peer.capable('branchmap'): |
88 if not peer.capable(b'branchmap'): |
89 if branches: |
89 if branches: |
90 raise error.Abort(_("remote branch lookup not supported")) |
90 raise error.Abort(_(b"remote branch lookup not supported")) |
91 revs.append(hashbranch) |
91 revs.append(hashbranch) |
92 return revs, revs[0] |
92 return revs, revs[0] |
93 |
93 |
94 with peer.commandexecutor() as e: |
94 with peer.commandexecutor() as e: |
95 branchmap = e.callcommand('branchmap', {}).result() |
95 branchmap = e.callcommand(b'branchmap', {}).result() |
96 |
96 |
97 def primary(branch): |
97 def primary(branch): |
98 if branch == '.': |
98 if branch == b'.': |
99 if not lrepo: |
99 if not lrepo: |
100 raise error.Abort(_("dirstate branch not accessible")) |
100 raise error.Abort(_(b"dirstate branch not accessible")) |
101 branch = lrepo.dirstate.branch() |
101 branch = lrepo.dirstate.branch() |
102 if branch in branchmap: |
102 if branch in branchmap: |
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch])) |
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch])) |
104 return True |
104 return True |
105 else: |
105 else: |
106 return False |
106 return False |
107 |
107 |
108 for branch in branches: |
108 for branch in branches: |
109 if not primary(branch): |
109 if not primary(branch): |
110 raise error.RepoLookupError(_("unknown branch '%s'") % branch) |
110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) |
111 if hashbranch: |
111 if hashbranch: |
112 if not primary(hashbranch): |
112 if not primary(hashbranch): |
113 revs.append(hashbranch) |
113 revs.append(hashbranch) |
114 return revs, revs[0] |
114 return revs, revs[0] |
115 |
115 |
124 u.fragment = None |
124 u.fragment = None |
125 return bytes(u), (branch, branches or []) |
125 return bytes(u), (branch, branches or []) |
126 |
126 |
127 |
127 |
128 schemes = { |
128 schemes = { |
129 'bundle': bundlerepo, |
129 b'bundle': bundlerepo, |
130 'union': unionrepo, |
130 b'union': unionrepo, |
131 'file': _local, |
131 b'file': _local, |
132 'http': httppeer, |
132 b'http': httppeer, |
133 'https': httppeer, |
133 b'https': httppeer, |
134 'ssh': sshpeer, |
134 b'ssh': sshpeer, |
135 'static-http': statichttprepo, |
135 b'static-http': statichttprepo, |
136 } |
136 } |
137 |
137 |
138 |
138 |
139 def _peerlookup(path): |
139 def _peerlookup(path): |
140 u = util.url(path) |
140 u = util.url(path) |
141 scheme = u.scheme or 'file' |
141 scheme = u.scheme or b'file' |
142 thing = schemes.get(scheme) or schemes['file'] |
142 thing = schemes.get(scheme) or schemes[b'file'] |
143 try: |
143 try: |
144 return thing(path) |
144 return thing(path) |
145 except TypeError: |
145 except TypeError: |
146 # we can't test callable(thing) because 'thing' can be an unloaded |
146 # we can't test callable(thing) because 'thing' can be an unloaded |
147 # module that implements __call__ |
147 # module that implements __call__ |
148 if not util.safehasattr(thing, 'instance'): |
148 if not util.safehasattr(thing, b'instance'): |
149 raise |
149 raise |
150 return thing |
150 return thing |
151 |
151 |
152 |
152 |
153 def islocal(repo): |
153 def islocal(repo): |
162 |
162 |
163 def openpath(ui, path, sendaccept=True): |
163 def openpath(ui, path, sendaccept=True): |
164 '''open path with open if local, url.open if remote''' |
164 '''open path with open if local, url.open if remote''' |
165 pathurl = util.url(path, parsequery=False, parsefragment=False) |
165 pathurl = util.url(path, parsequery=False, parsefragment=False) |
166 if pathurl.islocal(): |
166 if pathurl.islocal(): |
167 return util.posixfile(pathurl.localpath(), 'rb') |
167 return util.posixfile(pathurl.localpath(), b'rb') |
168 else: |
168 else: |
169 return url.open(ui, path, sendaccept=sendaccept) |
169 return url.open(ui, path, sendaccept=sendaccept) |
170 |
170 |
171 |
171 |
172 # a list of (ui, repo) functions called for wire peer initialization |
172 # a list of (ui, repo) functions called for wire peer initialization |
182 ) |
182 ) |
183 ui = getattr(obj, "ui", ui) |
183 ui = getattr(obj, "ui", ui) |
184 for f in presetupfuncs or []: |
184 for f in presetupfuncs or []: |
185 f(ui, obj) |
185 f(ui, obj) |
186 ui.log(b'extension', b'- executing reposetup hooks\n') |
186 ui.log(b'extension', b'- executing reposetup hooks\n') |
187 with util.timedcm('all reposetup') as allreposetupstats: |
187 with util.timedcm(b'all reposetup') as allreposetupstats: |
188 for name, module in extensions.extensions(ui): |
188 for name, module in extensions.extensions(ui): |
189 ui.log(b'extension', b' - running reposetup for %s\n', name) |
189 ui.log(b'extension', b' - running reposetup for %s\n', name) |
190 hook = getattr(module, 'reposetup', None) |
190 hook = getattr(module, 'reposetup', None) |
191 if hook: |
191 if hook: |
192 with util.timedcm('reposetup %r', name) as stats: |
192 with util.timedcm(b'reposetup %r', name) as stats: |
193 hook(ui, obj) |
193 hook(ui, obj) |
194 ui.log( |
194 ui.log( |
195 b'extension', b' > reposetup for %s took %s\n', name, stats |
195 b'extension', b' > reposetup for %s took %s\n', name, stats |
196 ) |
196 ) |
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) |
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) |
336 # not pointed to by changesets, thus causing verify to |
341 # not pointed to by changesets, thus causing verify to |
337 # fail |
342 # fail |
338 destlock = copystore(ui, repo, repo.path) |
343 destlock = copystore(ui, repo, repo.path) |
339 with destlock or util.nullcontextmanager(): |
344 with destlock or util.nullcontextmanager(): |
340 |
345 |
341 sharefile = repo.vfs.join('sharedpath') |
346 sharefile = repo.vfs.join(b'sharedpath') |
342 util.rename(sharefile, sharefile + '.old') |
347 util.rename(sharefile, sharefile + b'.old') |
343 |
348 |
344 repo.requirements.discard('shared') |
349 repo.requirements.discard(b'shared') |
345 repo.requirements.discard('relshared') |
350 repo.requirements.discard(b'relshared') |
346 repo._writerequirements() |
351 repo._writerequirements() |
347 |
352 |
348 # Removing share changes some fundamental properties of the repo instance. |
353 # Removing share changes some fundamental properties of the repo instance. |
349 # So we instantiate a new repo object and operate on it rather than |
354 # So we instantiate a new repo object and operate on it rather than |
350 # try to keep the existing repo usable. |
355 # try to keep the existing repo usable. |
351 newrepo = repository(repo.baseui, repo.root, create=False) |
356 newrepo = repository(repo.baseui, repo.root, create=False) |
352 |
357 |
353 # TODO: figure out how to access subrepos that exist, but were previously |
358 # TODO: figure out how to access subrepos that exist, but were previously |
354 # removed from .hgsub |
359 # removed from .hgsub |
355 c = newrepo['.'] |
360 c = newrepo[b'.'] |
356 subs = c.substate |
361 subs = c.substate |
357 for s in sorted(subs): |
362 for s in sorted(subs): |
358 c.sub(s).unshare() |
363 c.sub(s).unshare() |
359 |
364 |
360 localrepo.poisonrepository(repo) |
365 localrepo.poisonrepository(repo) |
369 This function configures additional shared data. |
374 This function configures additional shared data. |
370 |
375 |
371 Extensions can wrap this function and write additional entries to |
376 Extensions can wrap this function and write additional entries to |
372 destrepo/.hg/shared to indicate additional pieces of data to be shared. |
377 destrepo/.hg/shared to indicate additional pieces of data to be shared. |
373 """ |
378 """ |
374 default = defaultpath or sourcerepo.ui.config('paths', 'default') |
379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default') |
375 if default: |
380 if default: |
376 template = '[paths]\n' 'default = %s\n' |
381 template = b'[paths]\n' b'default = %s\n' |
377 destrepo.vfs.write('hgrc', util.tonativeeol(template % default)) |
382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default)) |
378 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements: |
383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements: |
379 with destrepo.wlock(): |
384 with destrepo.wlock(): |
380 narrowspec.copytoworkingcopy(destrepo) |
385 narrowspec.copytoworkingcopy(destrepo) |
381 |
386 |
382 |
387 |
408 returns destlock |
413 returns destlock |
409 ''' |
414 ''' |
410 destlock = None |
415 destlock = None |
411 try: |
416 try: |
412 hardlink = None |
417 hardlink = None |
413 topic = _('linking') if hardlink else _('copying') |
418 topic = _(b'linking') if hardlink else _(b'copying') |
414 with ui.makeprogress(topic, unit=_('files')) as progress: |
419 with ui.makeprogress(topic, unit=_(b'files')) as progress: |
415 num = 0 |
420 num = 0 |
416 srcpublishing = srcrepo.publishing() |
421 srcpublishing = srcrepo.publishing() |
417 srcvfs = vfsmod.vfs(srcrepo.sharedpath) |
422 srcvfs = vfsmod.vfs(srcrepo.sharedpath) |
418 dstvfs = vfsmod.vfs(destpath) |
423 dstvfs = vfsmod.vfs(destpath) |
419 for f in srcrepo.store.copylist(): |
424 for f in srcrepo.store.copylist(): |
420 if srcpublishing and f.endswith('phaseroots'): |
425 if srcpublishing and f.endswith(b'phaseroots'): |
421 continue |
426 continue |
422 dstbase = os.path.dirname(f) |
427 dstbase = os.path.dirname(f) |
423 if dstbase and not dstvfs.exists(dstbase): |
428 if dstbase and not dstvfs.exists(dstbase): |
424 dstvfs.mkdir(dstbase) |
429 dstvfs.mkdir(dstbase) |
425 if srcvfs.exists(f): |
430 if srcvfs.exists(f): |
426 if f.endswith('data'): |
431 if f.endswith(b'data'): |
427 # 'dstbase' may be empty (e.g. revlog format 0) |
432 # 'dstbase' may be empty (e.g. revlog format 0) |
428 lockfile = os.path.join(dstbase, "lock") |
433 lockfile = os.path.join(dstbase, b"lock") |
429 # lock to avoid premature writing to the target |
434 # lock to avoid premature writing to the target |
430 destlock = lock.lock(dstvfs, lockfile) |
435 destlock = lock.lock(dstvfs, lockfile) |
431 hardlink, n = util.copyfiles( |
436 hardlink, n = util.copyfiles( |
432 srcvfs.join(f), dstvfs.join(f), hardlink, progress |
437 srcvfs.join(f), dstvfs.join(f), hardlink, progress |
433 ) |
438 ) |
434 num += n |
439 num += n |
435 if hardlink: |
440 if hardlink: |
436 ui.debug("linked %d files\n" % num) |
441 ui.debug(b"linked %d files\n" % num) |
437 else: |
442 else: |
438 ui.debug("copied %d files\n" % num) |
443 ui.debug(b"copied %d files\n" % num) |
439 return destlock |
444 return destlock |
440 except: # re-raises |
445 except: # re-raises |
441 release(destlock) |
446 release(destlock) |
442 raise |
447 raise |
443 |
448 |
461 will be created at "dest" and a working copy will be created if "update" is |
466 will be created at "dest" and a working copy will be created if "update" is |
462 True. |
467 True. |
463 """ |
468 """ |
464 revs = None |
469 revs = None |
465 if rev: |
470 if rev: |
466 if not srcpeer.capable('lookup'): |
471 if not srcpeer.capable(b'lookup'): |
467 raise error.Abort( |
472 raise error.Abort( |
468 _( |
473 _( |
469 "src repository does not support " |
474 b"src repository does not support " |
470 "revision lookup and so doesn't " |
475 b"revision lookup and so doesn't " |
471 "support clone by revision" |
476 b"support clone by revision" |
472 ) |
477 ) |
473 ) |
478 ) |
474 |
479 |
475 # TODO this is batchable. |
480 # TODO this is batchable. |
476 remoterevs = [] |
481 remoterevs = [] |
477 for r in rev: |
482 for r in rev: |
478 with srcpeer.commandexecutor() as e: |
483 with srcpeer.commandexecutor() as e: |
479 remoterevs.append(e.callcommand('lookup', {'key': r,}).result()) |
484 remoterevs.append( |
|
485 e.callcommand(b'lookup', {b'key': r,}).result() |
|
486 ) |
480 revs = remoterevs |
487 revs = remoterevs |
481 |
488 |
482 # Obtain a lock before checking for or cloning the pooled repo otherwise |
489 # Obtain a lock before checking for or cloning the pooled repo otherwise |
483 # 2 clients may race creating or populating it. |
490 # 2 clients may race creating or populating it. |
484 pooldir = os.path.dirname(sharepath) |
491 pooldir = os.path.dirname(sharepath) |
543 |
552 |
544 # Recomputing branch cache might be slow on big repos, |
553 # Recomputing branch cache might be slow on big repos, |
545 # so just copy it |
554 # so just copy it |
546 def _copycache(srcrepo, dstcachedir, fname): |
555 def _copycache(srcrepo, dstcachedir, fname): |
547 """copy a cache from srcrepo to destcachedir (if it exists)""" |
556 """copy a cache from srcrepo to destcachedir (if it exists)""" |
548 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname) |
557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname) |
549 dstbranchcache = os.path.join(dstcachedir, fname) |
558 dstbranchcache = os.path.join(dstcachedir, fname) |
550 if os.path.exists(srcbranchcache): |
559 if os.path.exists(srcbranchcache): |
551 if not os.path.exists(dstcachedir): |
560 if not os.path.exists(dstcachedir): |
552 os.mkdir(dstcachedir) |
561 os.mkdir(dstcachedir) |
553 util.copyfile(srcbranchcache, dstbranchcache) |
562 util.copyfile(srcbranchcache, dstbranchcache) |
629 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) |
638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) |
630 |
639 |
631 if dest is None: |
640 if dest is None: |
632 dest = defaultdest(source) |
641 dest = defaultdest(source) |
633 if dest: |
642 if dest: |
634 ui.status(_("destination directory: %s\n") % dest) |
643 ui.status(_(b"destination directory: %s\n") % dest) |
635 else: |
644 else: |
636 dest = ui.expandpath(dest) |
645 dest = ui.expandpath(dest) |
637 |
646 |
638 dest = util.urllocalpath(dest) |
647 dest = util.urllocalpath(dest) |
639 source = util.urllocalpath(source) |
648 source = util.urllocalpath(source) |
640 |
649 |
641 if not dest: |
650 if not dest: |
642 raise error.Abort(_("empty destination path is not valid")) |
651 raise error.Abort(_(b"empty destination path is not valid")) |
643 |
652 |
644 destvfs = vfsmod.vfs(dest, expandpath=True) |
653 destvfs = vfsmod.vfs(dest, expandpath=True) |
645 if destvfs.lexists(): |
654 if destvfs.lexists(): |
646 if not destvfs.isdir(): |
655 if not destvfs.isdir(): |
647 raise error.Abort(_("destination '%s' already exists") % dest) |
656 raise error.Abort(_(b"destination '%s' already exists") % dest) |
648 elif destvfs.listdir(): |
657 elif destvfs.listdir(): |
649 raise error.Abort(_("destination '%s' is not empty") % dest) |
658 raise error.Abort(_(b"destination '%s' is not empty") % dest) |
650 |
659 |
651 createopts = {} |
660 createopts = {} |
652 narrow = False |
661 narrow = False |
653 |
662 |
654 if storeincludepats is not None: |
663 if storeincludepats is not None: |
660 narrow = True |
669 narrow = True |
661 |
670 |
662 if narrow: |
671 if narrow: |
663 # Include everything by default if only exclusion patterns defined. |
672 # Include everything by default if only exclusion patterns defined. |
664 if storeexcludepats and not storeincludepats: |
673 if storeexcludepats and not storeincludepats: |
665 storeincludepats = {'path:.'} |
674 storeincludepats = {b'path:.'} |
666 |
675 |
667 createopts['narrowfiles'] = True |
676 createopts[b'narrowfiles'] = True |
668 |
677 |
669 if depth: |
678 if depth: |
670 createopts['shallowfilestore'] = True |
679 createopts[b'shallowfilestore'] = True |
671 |
680 |
672 if srcpeer.capable(b'lfs-serve'): |
681 if srcpeer.capable(b'lfs-serve'): |
673 # Repository creation honors the config if it disabled the extension, so |
682 # Repository creation honors the config if it disabled the extension, so |
674 # we can't just announce that lfs will be enabled. This check avoids |
683 # we can't just announce that lfs will be enabled. This check avoids |
675 # saying that lfs will be enabled, and then saying it's an unknown |
684 # saying that lfs will be enabled, and then saying it's an unknown |
676 # feature. The lfs creation option is set in either case so that a |
685 # feature. The lfs creation option is set in either case so that a |
677 # requirement is added. If the extension is explicitly disabled but the |
686 # requirement is added. If the extension is explicitly disabled but the |
678 # requirement is set, the clone aborts early, before transferring any |
687 # requirement is set, the clone aborts early, before transferring any |
679 # data. |
688 # data. |
680 createopts['lfs'] = True |
689 createopts[b'lfs'] = True |
681 |
690 |
682 if extensions.disabledext('lfs'): |
691 if extensions.disabledext(b'lfs'): |
683 ui.status( |
692 ui.status( |
684 _( |
693 _( |
685 '(remote is using large file support (lfs), but it is ' |
694 b'(remote is using large file support (lfs), but it is ' |
686 'explicitly disabled in the local configuration)\n' |
695 b'explicitly disabled in the local configuration)\n' |
687 ) |
696 ) |
688 ) |
697 ) |
689 else: |
698 else: |
690 ui.status( |
699 ui.status( |
691 _( |
700 _( |
692 '(remote is using large file support (lfs); lfs will ' |
701 b'(remote is using large file support (lfs); lfs will ' |
693 'be enabled for this repository)\n' |
702 b'be enabled for this repository)\n' |
694 ) |
703 ) |
695 ) |
704 ) |
696 |
705 |
697 shareopts = shareopts or {} |
706 shareopts = shareopts or {} |
698 sharepool = shareopts.get('pool') |
707 sharepool = shareopts.get(b'pool') |
699 sharenamemode = shareopts.get('mode') |
708 sharenamemode = shareopts.get(b'mode') |
700 if sharepool and islocal(dest): |
709 if sharepool and islocal(dest): |
701 sharepath = None |
710 sharepath = None |
702 if sharenamemode == 'identity': |
711 if sharenamemode == b'identity': |
703 # Resolve the name from the initial changeset in the remote |
712 # Resolve the name from the initial changeset in the remote |
704 # repository. This returns nullid when the remote is empty. It |
713 # repository. This returns nullid when the remote is empty. It |
705 # raises RepoLookupError if revision 0 is filtered or otherwise |
714 # raises RepoLookupError if revision 0 is filtered or otherwise |
706 # not available. If we fail to resolve, sharing is not enabled. |
715 # not available. If we fail to resolve, sharing is not enabled. |
707 try: |
716 try: |
708 with srcpeer.commandexecutor() as e: |
717 with srcpeer.commandexecutor() as e: |
709 rootnode = e.callcommand('lookup', {'key': '0',}).result() |
718 rootnode = e.callcommand( |
|
719 b'lookup', {b'key': b'0',} |
|
720 ).result() |
710 |
721 |
711 if rootnode != node.nullid: |
722 if rootnode != node.nullid: |
712 sharepath = os.path.join(sharepool, node.hex(rootnode)) |
723 sharepath = os.path.join(sharepool, node.hex(rootnode)) |
713 else: |
724 else: |
714 ui.status( |
725 ui.status( |
715 _( |
726 _( |
716 '(not using pooled storage: ' |
727 b'(not using pooled storage: ' |
717 'remote appears to be empty)\n' |
728 b'remote appears to be empty)\n' |
718 ) |
729 ) |
719 ) |
730 ) |
720 except error.RepoLookupError: |
731 except error.RepoLookupError: |
721 ui.status( |
732 ui.status( |
722 _( |
733 _( |
723 '(not using pooled storage: ' |
734 b'(not using pooled storage: ' |
724 'unable to resolve identity of remote)\n' |
735 b'unable to resolve identity of remote)\n' |
725 ) |
736 ) |
726 ) |
737 ) |
727 elif sharenamemode == 'remote': |
738 elif sharenamemode == b'remote': |
728 sharepath = os.path.join( |
739 sharepath = os.path.join( |
729 sharepool, node.hex(hashlib.sha1(source).digest()) |
740 sharepool, node.hex(hashlib.sha1(source).digest()) |
730 ) |
741 ) |
731 else: |
742 else: |
732 raise error.Abort( |
743 raise error.Abort( |
733 _('unknown share naming mode: %s') % sharenamemode |
744 _(b'unknown share naming mode: %s') % sharenamemode |
734 ) |
745 ) |
735 |
746 |
736 # TODO this is a somewhat arbitrary restriction. |
747 # TODO this is a somewhat arbitrary restriction. |
737 if narrow: |
748 if narrow: |
738 ui.status(_('(pooled storage not supported for narrow clones)\n')) |
749 ui.status(_(b'(pooled storage not supported for narrow clones)\n')) |
739 sharepath = None |
750 sharepath = None |
740 |
751 |
741 if sharepath: |
752 if sharepath: |
742 return clonewithshare( |
753 return clonewithshare( |
743 ui, |
754 ui, |
798 util.makedir(destpath, notindexed=True) |
809 util.makedir(destpath, notindexed=True) |
799 except OSError as inst: |
810 except OSError as inst: |
800 if inst.errno == errno.EEXIST: |
811 if inst.errno == errno.EEXIST: |
801 cleandir = None |
812 cleandir = None |
802 raise error.Abort( |
813 raise error.Abort( |
803 _("destination '%s' already exists") % dest |
814 _(b"destination '%s' already exists") % dest |
804 ) |
815 ) |
805 raise |
816 raise |
806 |
817 |
807 destlock = copystore(ui, srcrepo, destpath) |
818 destlock = copystore(ui, srcrepo, destpath) |
808 # copy bookmarks over |
819 # copy bookmarks over |
809 srcbookmarks = srcrepo.vfs.join('bookmarks') |
820 srcbookmarks = srcrepo.vfs.join(b'bookmarks') |
810 dstbookmarks = os.path.join(destpath, 'bookmarks') |
821 dstbookmarks = os.path.join(destpath, b'bookmarks') |
811 if os.path.exists(srcbookmarks): |
822 if os.path.exists(srcbookmarks): |
812 util.copyfile(srcbookmarks, dstbookmarks) |
823 util.copyfile(srcbookmarks, dstbookmarks) |
813 |
824 |
814 dstcachedir = os.path.join(destpath, 'cache') |
825 dstcachedir = os.path.join(destpath, b'cache') |
815 for cache in cacheutil.cachetocopy(srcrepo): |
826 for cache in cacheutil.cachetocopy(srcrepo): |
816 _copycache(srcrepo, dstcachedir, cache) |
827 _copycache(srcrepo, dstcachedir, cache) |
817 |
828 |
818 # we need to re-init the repo after manually copying the data |
829 # we need to re-init the repo after manually copying the data |
819 # into it |
830 # into it |
820 destpeer = peer(srcrepo, peeropts, dest) |
831 destpeer = peer(srcrepo, peeropts, dest) |
821 srcrepo.hook('outgoing', source='clone', node=node.hex(node.nullid)) |
832 srcrepo.hook( |
|
833 b'outgoing', source=b'clone', node=node.hex(node.nullid) |
|
834 ) |
822 else: |
835 else: |
823 try: |
836 try: |
824 # only pass ui when no srcrepo |
837 # only pass ui when no srcrepo |
825 destpeer = peer( |
838 destpeer = peer( |
826 srcrepo or ui, |
839 srcrepo or ui, |
866 local.setnarrowpats(storeincludepats, storeexcludepats) |
879 local.setnarrowpats(storeincludepats, storeexcludepats) |
867 narrowspec.copytoworkingcopy(local) |
880 narrowspec.copytoworkingcopy(local) |
868 |
881 |
869 u = util.url(abspath) |
882 u = util.url(abspath) |
870 defaulturl = bytes(u) |
883 defaulturl = bytes(u) |
871 local.ui.setconfig('paths', 'default', defaulturl, 'clone') |
884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone') |
872 if not stream: |
885 if not stream: |
873 if pull: |
886 if pull: |
874 stream = False |
887 stream = False |
875 else: |
888 else: |
876 stream = None |
889 stream = None |
877 # internal config: ui.quietbookmarkmove |
890 # internal config: ui.quietbookmarkmove |
878 overrides = {('ui', 'quietbookmarkmove'): True} |
891 overrides = {(b'ui', b'quietbookmarkmove'): True} |
879 with local.ui.configoverride(overrides, 'clone'): |
892 with local.ui.configoverride(overrides, b'clone'): |
880 exchange.pull( |
893 exchange.pull( |
881 local, |
894 local, |
882 srcpeer, |
895 srcpeer, |
883 revs, |
896 revs, |
884 streamclonerequested=stream, |
897 streamclonerequested=stream, |
903 revs=revs, |
916 revs=revs, |
904 bookmarks=srcrepo._bookmarks.keys(), |
917 bookmarks=srcrepo._bookmarks.keys(), |
905 ) |
918 ) |
906 else: |
919 else: |
907 raise error.Abort( |
920 raise error.Abort( |
908 _("clone from remote to remote not supported") |
921 _(b"clone from remote to remote not supported") |
909 ) |
922 ) |
910 |
923 |
911 cleandir = None |
924 cleandir = None |
912 |
925 |
913 destrepo = destpeer.local() |
926 destrepo = destpeer.local() |
914 if destrepo: |
927 if destrepo: |
915 template = uimod.samplehgrcs['cloned'] |
928 template = uimod.samplehgrcs[b'cloned'] |
916 u = util.url(abspath) |
929 u = util.url(abspath) |
917 u.passwd = None |
930 u.passwd = None |
918 defaulturl = bytes(u) |
931 defaulturl = bytes(u) |
919 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl)) |
932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl)) |
920 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone') |
933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone') |
921 |
934 |
922 if ui.configbool('experimental', 'remotenames'): |
935 if ui.configbool(b'experimental', b'remotenames'): |
923 logexchange.pullremotenames(destrepo, srcpeer) |
936 logexchange.pullremotenames(destrepo, srcpeer) |
924 |
937 |
925 if update: |
938 if update: |
926 if update is not True: |
939 if update is not True: |
927 with srcpeer.commandexecutor() as e: |
940 with srcpeer.commandexecutor() as e: |
928 checkout = e.callcommand( |
941 checkout = e.callcommand( |
929 'lookup', {'key': update,} |
942 b'lookup', {b'key': update,} |
930 ).result() |
943 ).result() |
931 |
944 |
932 uprev = None |
945 uprev = None |
933 status = None |
946 status = None |
934 if checkout is not None: |
947 if checkout is not None: |
946 uprev = destrepo.lookup(update) |
959 uprev = destrepo.lookup(update) |
947 except error.RepoLookupError: |
960 except error.RepoLookupError: |
948 pass |
961 pass |
949 if uprev is None: |
962 if uprev is None: |
950 try: |
963 try: |
951 uprev = destrepo._bookmarks['@'] |
964 uprev = destrepo._bookmarks[b'@'] |
952 update = '@' |
965 update = b'@' |
953 bn = destrepo[uprev].branch() |
966 bn = destrepo[uprev].branch() |
954 if bn == 'default': |
967 if bn == b'default': |
955 status = _("updating to bookmark @\n") |
968 status = _(b"updating to bookmark @\n") |
956 else: |
969 else: |
957 status = ( |
970 status = ( |
958 _("updating to bookmark @ on branch %s\n") % bn |
971 _(b"updating to bookmark @ on branch %s\n") % bn |
959 ) |
972 ) |
960 except KeyError: |
973 except KeyError: |
961 try: |
974 try: |
962 uprev = destrepo.branchtip('default') |
975 uprev = destrepo.branchtip(b'default') |
963 except error.RepoLookupError: |
976 except error.RepoLookupError: |
964 uprev = destrepo.lookup('tip') |
977 uprev = destrepo.lookup(b'tip') |
965 if not status: |
978 if not status: |
966 bn = destrepo[uprev].branch() |
979 bn = destrepo[uprev].branch() |
967 status = _("updating to branch %s\n") % bn |
980 status = _(b"updating to branch %s\n") % bn |
968 destrepo.ui.status(status) |
981 destrepo.ui.status(status) |
969 _update(destrepo, uprev) |
982 _update(destrepo, uprev) |
970 if update in destrepo._bookmarks: |
983 if update in destrepo._bookmarks: |
971 bookmarks.activate(destrepo, update) |
984 bookmarks.activate(destrepo, update) |
972 finally: |
985 finally: |
1004 return mergemod.update( |
1017 return mergemod.update( |
1005 repo, |
1018 repo, |
1006 node, |
1019 node, |
1007 branchmerge=False, |
1020 branchmerge=False, |
1008 force=overwrite, |
1021 force=overwrite, |
1009 labels=['working copy', 'destination'], |
1022 labels=[b'working copy', b'destination'], |
1010 updatecheck=updatecheck, |
1023 updatecheck=updatecheck, |
1011 ) |
1024 ) |
1012 |
1025 |
1013 |
1026 |
1014 def update(repo, node, quietempty=False, updatecheck=None): |
1027 def update(repo, node, quietempty=False, updatecheck=None): |
1015 """update the working directory to node""" |
1028 """update the working directory to node""" |
1016 stats = updaterepo(repo, node, False, updatecheck=updatecheck) |
1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck) |
1017 _showstats(repo, stats, quietempty) |
1030 _showstats(repo, stats, quietempty) |
1018 if stats.unresolvedcount: |
1031 if stats.unresolvedcount: |
1019 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) |
1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n")) |
1020 return stats.unresolvedcount > 0 |
1033 return stats.unresolvedcount > 0 |
1021 |
1034 |
1022 |
1035 |
1023 # naming conflict in clone() |
1036 # naming conflict in clone() |
1024 _update = update |
1037 _update = update |
1025 |
1038 |
1026 |
1039 |
1027 def clean(repo, node, show_stats=True, quietempty=False): |
1040 def clean(repo, node, show_stats=True, quietempty=False): |
1028 """forcibly switch the working directory to node, clobbering changes""" |
1041 """forcibly switch the working directory to node, clobbering changes""" |
1029 stats = updaterepo(repo, node, True) |
1042 stats = updaterepo(repo, node, True) |
1030 repo.vfs.unlinkpath('graftstate', ignoremissing=True) |
1043 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True) |
1031 if show_stats: |
1044 if show_stats: |
1032 _showstats(repo, stats, quietempty) |
1045 _showstats(repo, stats, quietempty) |
1033 return stats.unresolvedcount > 0 |
1046 return stats.unresolvedcount > 0 |
1034 |
1047 |
1035 |
1048 |
1095 cmdutil.bailifchanged(repo, merge=False) |
1108 cmdutil.bailifchanged(repo, merge=False) |
1096 updatecheck = mergemod.UPDATECHECK_NONE |
1109 updatecheck = mergemod.UPDATECHECK_NONE |
1097 ret = _update(repo, checkout, updatecheck=updatecheck) |
1110 ret = _update(repo, checkout, updatecheck=updatecheck) |
1098 |
1111 |
1099 if not ret and movemarkfrom: |
1112 if not ret and movemarkfrom: |
1100 if movemarkfrom == repo['.'].node(): |
1113 if movemarkfrom == repo[b'.'].node(): |
1101 pass # no-op update |
1114 pass # no-op update |
1102 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): |
1115 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()): |
1103 b = ui.label(repo._activebookmark, 'bookmarks.active') |
1116 b = ui.label(repo._activebookmark, b'bookmarks.active') |
1104 ui.status(_("updating bookmark %s\n") % b) |
1117 ui.status(_(b"updating bookmark %s\n") % b) |
1105 else: |
1118 else: |
1106 # this can happen with a non-linear update |
1119 # this can happen with a non-linear update |
1107 b = ui.label(repo._activebookmark, 'bookmarks') |
1120 b = ui.label(repo._activebookmark, b'bookmarks') |
1108 ui.status(_("(leaving bookmark %s)\n") % b) |
1121 ui.status(_(b"(leaving bookmark %s)\n") % b) |
1109 bookmarks.deactivate(repo) |
1122 bookmarks.deactivate(repo) |
1110 elif brev in repo._bookmarks: |
1123 elif brev in repo._bookmarks: |
1111 if brev != repo._activebookmark: |
1124 if brev != repo._activebookmark: |
1112 b = ui.label(brev, 'bookmarks.active') |
1125 b = ui.label(brev, b'bookmarks.active') |
1113 ui.status(_("(activating bookmark %s)\n") % b) |
1126 ui.status(_(b"(activating bookmark %s)\n") % b) |
1114 bookmarks.activate(repo, brev) |
1127 bookmarks.activate(repo, brev) |
1115 elif brev: |
1128 elif brev: |
1116 if repo._activebookmark: |
1129 if repo._activebookmark: |
1117 b = ui.label(repo._activebookmark, 'bookmarks') |
1130 b = ui.label(repo._activebookmark, b'bookmarks') |
1118 ui.status(_("(leaving bookmark %s)\n") % b) |
1131 ui.status(_(b"(leaving bookmark %s)\n") % b) |
1119 bookmarks.deactivate(repo) |
1132 bookmarks.deactivate(repo) |
1120 |
1133 |
1121 if warndest: |
1134 if warndest: |
1122 destutil.statusotherdests(ui, repo) |
1135 destutil.statusotherdests(ui, repo) |
1123 |
1136 |
1148 ) |
1161 ) |
1149 _showstats(repo, stats) |
1162 _showstats(repo, stats) |
1150 if stats.unresolvedcount: |
1163 if stats.unresolvedcount: |
1151 repo.ui.status( |
1164 repo.ui.status( |
1152 _( |
1165 _( |
1153 "use 'hg resolve' to retry unresolved file merges " |
1166 b"use 'hg resolve' to retry unresolved file merges " |
1154 "or 'hg merge --abort' to abandon\n" |
1167 b"or 'hg merge --abort' to abandon\n" |
1155 ) |
1168 ) |
1156 ) |
1169 ) |
1157 elif remind: |
1170 elif remind: |
1158 repo.ui.status(_("(branch merge, don't forget to commit)\n")) |
1171 repo.ui.status(_(b"(branch merge, don't forget to commit)\n")) |
1159 return stats.unresolvedcount > 0 |
1172 return stats.unresolvedcount > 0 |
1160 |
1173 |
1161 |
1174 |
1162 def abortmerge(ui, repo): |
1175 def abortmerge(ui, repo): |
1163 ms = mergemod.mergestate.read(repo) |
1176 ms = mergemod.mergestate.read(repo) |
1164 if ms.active(): |
1177 if ms.active(): |
1165 # there were conflicts |
1178 # there were conflicts |
1166 node = ms.localctx.hex() |
1179 node = ms.localctx.hex() |
1167 else: |
1180 else: |
1168 # there were no conficts, mergestate was not stored |
1181 # there were no conficts, mergestate was not stored |
1169 node = repo['.'].hex() |
1182 node = repo[b'.'].hex() |
1170 |
1183 |
1171 repo.ui.status( |
1184 repo.ui.status( |
1172 _("aborting the merge, updating back to" " %s\n") % node[:12] |
1185 _(b"aborting the merge, updating back to" b" %s\n") % node[:12] |
1173 ) |
1186 ) |
1174 stats = mergemod.update(repo, node, branchmerge=False, force=True) |
1187 stats = mergemod.update(repo, node, branchmerge=False, force=True) |
1175 _showstats(repo, stats) |
1188 _showstats(repo, stats) |
1176 return stats.unresolvedcount > 0 |
1189 return stats.unresolvedcount > 0 |
1177 |
1190 |
1183 Helper for incoming / gincoming. |
1196 Helper for incoming / gincoming. |
1184 displaychlist gets called with |
1197 displaychlist gets called with |
1185 (remoterepo, incomingchangesetlist, displayer) parameters, |
1198 (remoterepo, incomingchangesetlist, displayer) parameters, |
1186 and is supposed to contain only code that can't be unified. |
1199 and is supposed to contain only code that can't be unified. |
1187 """ |
1200 """ |
1188 source, branches = parseurl(ui.expandpath(source), opts.get('branch')) |
1201 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch')) |
1189 other = peer(repo, opts, source) |
1202 other = peer(repo, opts, source) |
1190 ui.status(_('comparing with %s\n') % util.hidepassword(source)) |
1203 ui.status(_(b'comparing with %s\n') % util.hidepassword(source)) |
1191 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev')) |
1204 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev')) |
1192 |
1205 |
1193 if revs: |
1206 if revs: |
1194 revs = [other.lookup(rev) for rev in revs] |
1207 revs = [other.lookup(rev) for rev in revs] |
1195 other, chlist, cleanupfn = bundlerepo.getremotechanges( |
1208 other, chlist, cleanupfn = bundlerepo.getremotechanges( |
1196 ui, repo, other, revs, opts["bundle"], opts["force"] |
1209 ui, repo, other, revs, opts[b"bundle"], opts[b"force"] |
1197 ) |
1210 ) |
1198 try: |
1211 try: |
1199 if not chlist: |
1212 if not chlist: |
1200 ui.status(_("no changes found\n")) |
1213 ui.status(_(b"no changes found\n")) |
1201 return subreporecurse() |
1214 return subreporecurse() |
1202 ui.pager('incoming') |
1215 ui.pager(b'incoming') |
1203 displayer = logcmdutil.changesetdisplayer( |
1216 displayer = logcmdutil.changesetdisplayer( |
1204 ui, other, opts, buffered=buffered |
1217 ui, other, opts, buffered=buffered |
1205 ) |
1218 ) |
1206 displaychlist(other, chlist, displayer) |
1219 displaychlist(other, chlist, displayer) |
1207 displayer.close() |
1220 displayer.close() |
1212 |
1225 |
1213 |
1226 |
1214 def incoming(ui, repo, source, opts): |
1227 def incoming(ui, repo, source, opts): |
1215 def subreporecurse(): |
1228 def subreporecurse(): |
1216 ret = 1 |
1229 ret = 1 |
1217 if opts.get('subrepos'): |
1230 if opts.get(b'subrepos'): |
1218 ctx = repo[None] |
1231 ctx = repo[None] |
1219 for subpath in sorted(ctx.substate): |
1232 for subpath in sorted(ctx.substate): |
1220 sub = ctx.sub(subpath) |
1233 sub = ctx.sub(subpath) |
1221 ret = min(ret, sub.incoming(ui, source, opts)) |
1234 ret = min(ret, sub.incoming(ui, source, opts)) |
1222 return ret |
1235 return ret |
1223 |
1236 |
1224 def display(other, chlist, displayer): |
1237 def display(other, chlist, displayer): |
1225 limit = logcmdutil.getlimit(opts) |
1238 limit = logcmdutil.getlimit(opts) |
1226 if opts.get('newest_first'): |
1239 if opts.get(b'newest_first'): |
1227 chlist.reverse() |
1240 chlist.reverse() |
1228 count = 0 |
1241 count = 0 |
1229 for n in chlist: |
1242 for n in chlist: |
1230 if limit is not None and count >= limit: |
1243 if limit is not None and count >= limit: |
1231 break |
1244 break |
1232 parents = [p for p in other.changelog.parents(n) if p != nullid] |
1245 parents = [p for p in other.changelog.parents(n) if p != nullid] |
1233 if opts.get('no_merges') and len(parents) == 2: |
1246 if opts.get(b'no_merges') and len(parents) == 2: |
1234 continue |
1247 continue |
1235 count += 1 |
1248 count += 1 |
1236 displayer.show(other[n]) |
1249 displayer.show(other[n]) |
1237 |
1250 |
1238 return _incoming(display, subreporecurse, ui, repo, source, opts) |
1251 return _incoming(display, subreporecurse, ui, repo, source, opts) |
1239 |
1252 |
1240 |
1253 |
1241 def _outgoing(ui, repo, dest, opts): |
1254 def _outgoing(ui, repo, dest, opts): |
1242 path = ui.paths.getpath(dest, default=('default-push', 'default')) |
1255 path = ui.paths.getpath(dest, default=(b'default-push', b'default')) |
1243 if not path: |
1256 if not path: |
1244 raise error.Abort( |
1257 raise error.Abort( |
1245 _('default repository not configured!'), |
1258 _(b'default repository not configured!'), |
1246 hint=_("see 'hg help config.paths'"), |
1259 hint=_(b"see 'hg help config.paths'"), |
1247 ) |
1260 ) |
1248 dest = path.pushloc or path.loc |
1261 dest = path.pushloc or path.loc |
1249 branches = path.branch, opts.get('branch') or [] |
1262 branches = path.branch, opts.get(b'branch') or [] |
1250 |
1263 |
1251 ui.status(_('comparing with %s\n') % util.hidepassword(dest)) |
1264 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest)) |
1252 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) |
1265 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev')) |
1253 if revs: |
1266 if revs: |
1254 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] |
1267 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] |
1255 |
1268 |
1256 other = peer(repo, opts, dest) |
1269 other = peer(repo, opts, dest) |
1257 outgoing = discovery.findcommonoutgoing( |
1270 outgoing = discovery.findcommonoutgoing( |
1258 repo, other, revs, force=opts.get('force') |
1271 repo, other, revs, force=opts.get(b'force') |
1259 ) |
1272 ) |
1260 o = outgoing.missing |
1273 o = outgoing.missing |
1261 if not o: |
1274 if not o: |
1262 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
1275 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
1263 return o, other |
1276 return o, other |
1264 |
1277 |
1265 |
1278 |
1266 def outgoing(ui, repo, dest, opts): |
1279 def outgoing(ui, repo, dest, opts): |
1267 def recurse(): |
1280 def recurse(): |
1268 ret = 1 |
1281 ret = 1 |
1269 if opts.get('subrepos'): |
1282 if opts.get(b'subrepos'): |
1270 ctx = repo[None] |
1283 ctx = repo[None] |
1271 for subpath in sorted(ctx.substate): |
1284 for subpath in sorted(ctx.substate): |
1272 sub = ctx.sub(subpath) |
1285 sub = ctx.sub(subpath) |
1273 ret = min(ret, sub.outgoing(ui, dest, opts)) |
1286 ret = min(ret, sub.outgoing(ui, dest, opts)) |
1274 return ret |
1287 return ret |
1277 o, other = _outgoing(ui, repo, dest, opts) |
1290 o, other = _outgoing(ui, repo, dest, opts) |
1278 if not o: |
1291 if not o: |
1279 cmdutil.outgoinghooks(ui, repo, other, opts, o) |
1292 cmdutil.outgoinghooks(ui, repo, other, opts, o) |
1280 return recurse() |
1293 return recurse() |
1281 |
1294 |
1282 if opts.get('newest_first'): |
1295 if opts.get(b'newest_first'): |
1283 o.reverse() |
1296 o.reverse() |
1284 ui.pager('outgoing') |
1297 ui.pager(b'outgoing') |
1285 displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
1298 displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
1286 count = 0 |
1299 count = 0 |
1287 for n in o: |
1300 for n in o: |
1288 if limit is not None and count >= limit: |
1301 if limit is not None and count >= limit: |
1289 break |
1302 break |
1290 parents = [p for p in repo.changelog.parents(n) if p != nullid] |
1303 parents = [p for p in repo.changelog.parents(n) if p != nullid] |
1291 if opts.get('no_merges') and len(parents) == 2: |
1304 if opts.get(b'no_merges') and len(parents) == 2: |
1292 continue |
1305 continue |
1293 count += 1 |
1306 count += 1 |
1294 displayer.show(repo[n]) |
1307 displayer.show(repo[n]) |
1295 displayer.close() |
1308 displayer.close() |
1296 cmdutil.outgoinghooks(ui, repo, other, opts, o) |
1309 cmdutil.outgoinghooks(ui, repo, other, opts, o) |
1306 # since they can't be pushed/pulled, and --hidden can be used if they are a |
1319 # since they can't be pushed/pulled, and --hidden can be used if they are a |
1307 # concern. |
1320 # concern. |
1308 |
1321 |
1309 # pathto() is needed for -R case |
1322 # pathto() is needed for -R case |
1310 revs = repo.revs( |
1323 revs = repo.revs( |
1311 "filelog(%s)", util.pathto(repo.root, repo.getcwd(), '.hgsubstate') |
1324 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate') |
1312 ) |
1325 ) |
1313 |
1326 |
1314 if revs: |
1327 if revs: |
1315 repo.ui.status(_('checking subrepo links\n')) |
1328 repo.ui.status(_(b'checking subrepo links\n')) |
1316 for rev in revs: |
1329 for rev in revs: |
1317 ctx = repo[rev] |
1330 ctx = repo[rev] |
1318 try: |
1331 try: |
1319 for subpath in ctx.substate: |
1332 for subpath in ctx.substate: |
1320 try: |
1333 try: |
1321 ret = ( |
1334 ret = ( |
1322 ctx.sub(subpath, allowcreate=False).verify() or ret |
1335 ctx.sub(subpath, allowcreate=False).verify() or ret |
1323 ) |
1336 ) |
1324 except error.RepoError as e: |
1337 except error.RepoError as e: |
1325 repo.ui.warn('%d: %s\n' % (rev, e)) |
1338 repo.ui.warn(b'%d: %s\n' % (rev, e)) |
1326 except Exception: |
1339 except Exception: |
1327 repo.ui.warn( |
1340 repo.ui.warn( |
1328 _('.hgsubstate is corrupt in revision %s\n') |
1341 _(b'.hgsubstate is corrupt in revision %s\n') |
1329 % node.short(ctx.node()) |
1342 % node.short(ctx.node()) |
1330 ) |
1343 ) |
1331 |
1344 |
1332 return ret |
1345 return ret |
1333 |
1346 |
1334 |
1347 |
1335 def remoteui(src, opts): |
1348 def remoteui(src, opts): |
1336 'build a remote ui from ui or repo and opts' |
1349 b'build a remote ui from ui or repo and opts' |
1337 if util.safehasattr(src, 'baseui'): # looks like a repository |
1350 if util.safehasattr(src, b'baseui'): # looks like a repository |
1338 dst = src.baseui.copy() # drop repo-specific config |
1351 dst = src.baseui.copy() # drop repo-specific config |
1339 src = src.ui # copy target options from repo |
1352 src = src.ui # copy target options from repo |
1340 else: # assume it's a global ui object |
1353 else: # assume it's a global ui object |
1341 dst = src.copy() # keep all global options |
1354 dst = src.copy() # keep all global options |
1342 |
1355 |
1343 # copy ssh-specific options |
1356 # copy ssh-specific options |
1344 for o in 'ssh', 'remotecmd': |
1357 for o in b'ssh', b'remotecmd': |
1345 v = opts.get(o) or src.config('ui', o) |
1358 v = opts.get(o) or src.config(b'ui', o) |
1346 if v: |
1359 if v: |
1347 dst.setconfig("ui", o, v, 'copied') |
1360 dst.setconfig(b"ui", o, v, b'copied') |
1348 |
1361 |
1349 # copy bundle-specific options |
1362 # copy bundle-specific options |
1350 r = src.config('bundle', 'mainreporoot') |
1363 r = src.config(b'bundle', b'mainreporoot') |
1351 if r: |
1364 if r: |
1352 dst.setconfig('bundle', 'mainreporoot', r, 'copied') |
1365 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied') |
1353 |
1366 |
1354 # copy selected local settings to the remote ui |
1367 # copy selected local settings to the remote ui |
1355 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'): |
1368 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'): |
1356 for key, val in src.configitems(sect): |
1369 for key, val in src.configitems(sect): |
1357 dst.setconfig(sect, key, val, 'copied') |
1370 dst.setconfig(sect, key, val, b'copied') |
1358 v = src.config('web', 'cacerts') |
1371 v = src.config(b'web', b'cacerts') |
1359 if v: |
1372 if v: |
1360 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied') |
1373 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied') |
1361 |
1374 |
1362 return dst |
1375 return dst |
1363 |
1376 |
1364 |
1377 |
1365 # Files of interest |
1378 # Files of interest |
1366 # Used to check if the repository has changed looking at mtime and size of |
1379 # Used to check if the repository has changed looking at mtime and size of |
1367 # these files. |
1380 # these files. |
1368 foi = [ |
1381 foi = [ |
1369 ('spath', '00changelog.i'), |
1382 (b'spath', b'00changelog.i'), |
1370 ('spath', 'phaseroots'), # ! phase can change content at the same size |
1383 (b'spath', b'phaseroots'), # ! phase can change content at the same size |
1371 ('spath', 'obsstore'), |
1384 (b'spath', b'obsstore'), |
1372 ('path', 'bookmarks'), # ! bookmark can change content at the same size |
1385 (b'path', b'bookmarks'), # ! bookmark can change content at the same size |
1373 ] |
1386 ] |
1374 |
1387 |
1375 |
1388 |
1376 class cachedlocalrepo(object): |
1389 class cachedlocalrepo(object): |
1377 """Holds a localrepository that can be cached and reused.""" |
1390 """Holds a localrepository that can be cached and reused.""" |