comparison mercurial/localrepo.py @ 43076:2372284d9457

formatting: blacken the codebase This is using my patch to black (https://github.com/psf/black/pull/826) so we don't un-wrap collection literals. Done with: hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S # skip-blame mass-reformatting only # no-check-commit reformats foo_bar functions Differential Revision: https://phab.mercurial-scm.org/D6971
author Augie Fackler <augie@google.com>
date Sun, 06 Oct 2019 09:45:02 -0400
parents 827cb4fe62a3
children 687b865b95ad
comparison
equal deleted inserted replaced
43075:57875cf423c9 43076:2372284d9457
72 from .utils import ( 72 from .utils import (
73 procutil, 73 procutil,
74 stringutil, 74 stringutil,
75 ) 75 )
76 76
77 from .revlogutils import ( 77 from .revlogutils import constants as revlogconst
78 constants as revlogconst,
79 )
80 78
81 release = lockmod.release 79 release = lockmod.release
82 urlerr = util.urlerr 80 urlerr = util.urlerr
83 urlreq = util.urlreq 81 urlreq = util.urlreq
84 82
85 # set of (path, vfs-location) tuples. vfs-location is: 83 # set of (path, vfs-location) tuples. vfs-location is:
86 # - 'plain for vfs relative paths 84 # - 'plain for vfs relative paths
87 # - '' for svfs relative paths 85 # - '' for svfs relative paths
88 _cachedfiles = set() 86 _cachedfiles = set()
89 87
88
90 class _basefilecache(scmutil.filecache): 89 class _basefilecache(scmutil.filecache):
91 """All filecache usage on repo are done for logic that should be unfiltered 90 """All filecache usage on repo are done for logic that should be unfiltered
92 """ 91 """
92
93 def __get__(self, repo, type=None): 93 def __get__(self, repo, type=None):
94 if repo is None: 94 if repo is None:
95 return self 95 return self
96 # proxy to unfiltered __dict__ since filtered repo has no entry 96 # proxy to unfiltered __dict__ since filtered repo has no entry
97 unfi = repo.unfiltered() 97 unfi = repo.unfiltered()
102 return super(_basefilecache, self).__get__(unfi, type) 102 return super(_basefilecache, self).__get__(unfi, type)
103 103
104 def set(self, repo, value): 104 def set(self, repo, value):
105 return super(_basefilecache, self).set(repo.unfiltered(), value) 105 return super(_basefilecache, self).set(repo.unfiltered(), value)
106 106
107
107 class repofilecache(_basefilecache): 108 class repofilecache(_basefilecache):
108 """filecache for files in .hg but outside of .hg/store""" 109 """filecache for files in .hg but outside of .hg/store"""
110
109 def __init__(self, *paths): 111 def __init__(self, *paths):
110 super(repofilecache, self).__init__(*paths) 112 super(repofilecache, self).__init__(*paths)
111 for path in paths: 113 for path in paths:
112 _cachedfiles.add((path, 'plain')) 114 _cachedfiles.add((path, 'plain'))
113 115
114 def join(self, obj, fname): 116 def join(self, obj, fname):
115 return obj.vfs.join(fname) 117 return obj.vfs.join(fname)
116 118
119
117 class storecache(_basefilecache): 120 class storecache(_basefilecache):
118 """filecache for files in the store""" 121 """filecache for files in the store"""
122
119 def __init__(self, *paths): 123 def __init__(self, *paths):
120 super(storecache, self).__init__(*paths) 124 super(storecache, self).__init__(*paths)
121 for path in paths: 125 for path in paths:
122 _cachedfiles.add((path, '')) 126 _cachedfiles.add((path, ''))
123 127
124 def join(self, obj, fname): 128 def join(self, obj, fname):
125 return obj.sjoin(fname) 129 return obj.sjoin(fname)
126 130
131
127 class mixedrepostorecache(_basefilecache): 132 class mixedrepostorecache(_basefilecache):
128 """filecache for a mix files in .hg/store and outside""" 133 """filecache for a mix files in .hg/store and outside"""
134
129 def __init__(self, *pathsandlocations): 135 def __init__(self, *pathsandlocations):
130 # scmutil.filecache only uses the path for passing back into our 136 # scmutil.filecache only uses the path for passing back into our
131 # join(), so we can safely pass a list of paths and locations 137 # join(), so we can safely pass a list of paths and locations
132 super(mixedrepostorecache, self).__init__(*pathsandlocations) 138 super(mixedrepostorecache, self).__init__(*pathsandlocations)
133 _cachedfiles.update(pathsandlocations) 139 _cachedfiles.update(pathsandlocations)
136 fname, location = fnameandlocation 142 fname, location = fnameandlocation
137 if location == 'plain': 143 if location == 'plain':
138 return obj.vfs.join(fname) 144 return obj.vfs.join(fname)
139 else: 145 else:
140 if location != '': 146 if location != '':
141 raise error.ProgrammingError('unexpected location: %s' % 147 raise error.ProgrammingError(
142 location) 148 'unexpected location: %s' % location
149 )
143 return obj.sjoin(fname) 150 return obj.sjoin(fname)
151
144 152
145 def isfilecached(repo, name): 153 def isfilecached(repo, name):
146 """check if a repo has already cached "name" filecache-ed property 154 """check if a repo has already cached "name" filecache-ed property
147 155
148 This returns (cachedobj-or-None, iscached) tuple. 156 This returns (cachedobj-or-None, iscached) tuple.
150 cacheentry = repo.unfiltered()._filecache.get(name, None) 158 cacheentry = repo.unfiltered()._filecache.get(name, None)
151 if not cacheentry: 159 if not cacheentry:
152 return None, False 160 return None, False
153 return cacheentry.obj, True 161 return cacheentry.obj, True
154 162
163
155 class unfilteredpropertycache(util.propertycache): 164 class unfilteredpropertycache(util.propertycache):
156 """propertycache that apply to unfiltered repo only""" 165 """propertycache that apply to unfiltered repo only"""
157 166
158 def __get__(self, repo, type=None): 167 def __get__(self, repo, type=None):
159 unfi = repo.unfiltered() 168 unfi = repo.unfiltered()
160 if unfi is repo: 169 if unfi is repo:
161 return super(unfilteredpropertycache, self).__get__(unfi) 170 return super(unfilteredpropertycache, self).__get__(unfi)
162 return getattr(unfi, self.name) 171 return getattr(unfi, self.name)
163 172
173
164 class filteredpropertycache(util.propertycache): 174 class filteredpropertycache(util.propertycache):
165 """propertycache that must take filtering in account""" 175 """propertycache that must take filtering in account"""
166 176
167 def cachevalue(self, obj, value): 177 def cachevalue(self, obj, value):
168 object.__setattr__(obj, self.name, value) 178 object.__setattr__(obj, self.name, value)
170 180
171 def hasunfilteredcache(repo, name): 181 def hasunfilteredcache(repo, name):
172 """check if a repo has an unfilteredpropertycache value for <name>""" 182 """check if a repo has an unfilteredpropertycache value for <name>"""
173 return name in vars(repo.unfiltered()) 183 return name in vars(repo.unfiltered())
174 184
185
175 def unfilteredmethod(orig): 186 def unfilteredmethod(orig):
176 """decorate method that always need to be run on unfiltered version""" 187 """decorate method that always need to be run on unfiltered version"""
188
177 def wrapper(repo, *args, **kwargs): 189 def wrapper(repo, *args, **kwargs):
178 return orig(repo.unfiltered(), *args, **kwargs) 190 return orig(repo.unfiltered(), *args, **kwargs)
191
179 return wrapper 192 return wrapper
180 193
181 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle', 194
182 'unbundle'} 195 moderncaps = {
196 'lookup',
197 'branchmap',
198 'pushkey',
199 'known',
200 'getbundle',
201 'unbundle',
202 }
183 legacycaps = moderncaps.union({'changegroupsubset'}) 203 legacycaps = moderncaps.union({'changegroupsubset'})
204
184 205
185 @interfaceutil.implementer(repository.ipeercommandexecutor) 206 @interfaceutil.implementer(repository.ipeercommandexecutor)
186 class localcommandexecutor(object): 207 class localcommandexecutor(object):
187 def __init__(self, peer): 208 def __init__(self, peer):
188 self._peer = peer 209 self._peer = peer
195 def __exit__(self, exctype, excvalue, exctb): 216 def __exit__(self, exctype, excvalue, exctb):
196 self.close() 217 self.close()
197 218
198 def callcommand(self, command, args): 219 def callcommand(self, command, args):
199 if self._sent: 220 if self._sent:
200 raise error.ProgrammingError('callcommand() cannot be used after ' 221 raise error.ProgrammingError(
201 'sendcommands()') 222 'callcommand() cannot be used after ' 'sendcommands()'
223 )
202 224
203 if self._closed: 225 if self._closed:
204 raise error.ProgrammingError('callcommand() cannot be used after ' 226 raise error.ProgrammingError(
205 'close()') 227 'callcommand() cannot be used after ' 'close()'
228 )
206 229
207 # We don't need to support anything fancy. Just call the named 230 # We don't need to support anything fancy. Just call the named
208 # method on the peer and return a resolved future. 231 # method on the peer and return a resolved future.
209 fn = getattr(self._peer, pycompat.sysstr(command)) 232 fn = getattr(self._peer, pycompat.sysstr(command))
210 233
223 self._sent = True 246 self._sent = True
224 247
225 def close(self): 248 def close(self):
226 self._closed = True 249 self._closed = True
227 250
251
228 @interfaceutil.implementer(repository.ipeercommands) 252 @interfaceutil.implementer(repository.ipeercommands)
229 class localpeer(repository.peer): 253 class localpeer(repository.peer):
230 '''peer for a local repo; reflects only the most recent API''' 254 '''peer for a local repo; reflects only the most recent API'''
231 255
232 def __init__(self, repo, caps=None): 256 def __init__(self, repo, caps=None):
268 def clonebundles(self): 292 def clonebundles(self):
269 return self._repo.tryread('clonebundles.manifest') 293 return self._repo.tryread('clonebundles.manifest')
270 294
271 def debugwireargs(self, one, two, three=None, four=None, five=None): 295 def debugwireargs(self, one, two, three=None, four=None, five=None):
272 """Used to test argument passing over the wire""" 296 """Used to test argument passing over the wire"""
273 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three), 297 return "%s %s %s %s %s" % (
274 pycompat.bytestr(four), 298 one,
275 pycompat.bytestr(five)) 299 two,
276 300 pycompat.bytestr(three),
277 def getbundle(self, source, heads=None, common=None, bundlecaps=None, 301 pycompat.bytestr(four),
278 **kwargs): 302 pycompat.bytestr(five),
279 chunks = exchange.getbundlechunks(self._repo, source, heads=heads, 303 )
280 common=common, bundlecaps=bundlecaps, 304
281 **kwargs)[1] 305 def getbundle(
306 self, source, heads=None, common=None, bundlecaps=None, **kwargs
307 ):
308 chunks = exchange.getbundlechunks(
309 self._repo,
310 source,
311 heads=heads,
312 common=common,
313 bundlecaps=bundlecaps,
314 **kwargs
315 )[1]
282 cb = util.chunkbuffer(chunks) 316 cb = util.chunkbuffer(chunks)
283 317
284 if exchange.bundle2requested(bundlecaps): 318 if exchange.bundle2requested(bundlecaps):
285 # When requesting a bundle2, getbundle returns a stream to make the 319 # When requesting a bundle2, getbundle returns a stream to make the
286 # wire level function happier. We need to build a proper object 320 # wire level function happier. We need to build a proper object
303 337
304 def pushkey(self, namespace, key, old, new): 338 def pushkey(self, namespace, key, old, new):
305 return self._repo.pushkey(namespace, key, old, new) 339 return self._repo.pushkey(namespace, key, old, new)
306 340
307 def stream_out(self): 341 def stream_out(self):
308 raise error.Abort(_('cannot perform stream clone against local ' 342 raise error.Abort(
309 'peer')) 343 _('cannot perform stream clone against local ' 'peer')
344 )
310 345
311 def unbundle(self, bundle, heads, url): 346 def unbundle(self, bundle, heads, url):
312 """apply a bundle on a repo 347 """apply a bundle on a repo
313 348
314 This function handles the repo locking itself.""" 349 This function handles the repo locking itself."""
339 stream = util.chunkbuffer(bundler.getchunks()) 374 stream = util.chunkbuffer(bundler.getchunks())
340 b = bundle2.getunbundler(self.ui, stream) 375 b = bundle2.getunbundler(self.ui, stream)
341 bundle2.processbundle(self._repo, b) 376 bundle2.processbundle(self._repo, b)
342 raise 377 raise
343 except error.PushRaced as exc: 378 except error.PushRaced as exc:
344 raise error.ResponseError(_('push failed:'), 379 raise error.ResponseError(
345 stringutil.forcebytestr(exc)) 380 _('push failed:'), stringutil.forcebytestr(exc)
381 )
346 382
347 # End of _basewirecommands interface. 383 # End of _basewirecommands interface.
348 384
349 # Begin of peer interface. 385 # Begin of peer interface.
350 386
351 def commandexecutor(self): 387 def commandexecutor(self):
352 return localcommandexecutor(self) 388 return localcommandexecutor(self)
353 389
354 # End of peer interface. 390 # End of peer interface.
391
355 392
356 @interfaceutil.implementer(repository.ipeerlegacycommands) 393 @interfaceutil.implementer(repository.ipeerlegacycommands)
357 class locallegacypeer(localpeer): 394 class locallegacypeer(localpeer):
358 '''peer extension which implements legacy methods too; used for tests with 395 '''peer extension which implements legacy methods too; used for tests with
359 restricted capabilities''' 396 restricted capabilities'''
368 405
369 def branches(self, nodes): 406 def branches(self, nodes):
370 return self._repo.branches(nodes) 407 return self._repo.branches(nodes)
371 408
372 def changegroup(self, nodes, source): 409 def changegroup(self, nodes, source):
373 outgoing = discovery.outgoing(self._repo, missingroots=nodes, 410 outgoing = discovery.outgoing(
374 missingheads=self._repo.heads()) 411 self._repo, missingroots=nodes, missingheads=self._repo.heads()
412 )
375 return changegroup.makechangegroup(self._repo, outgoing, '01', source) 413 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
376 414
377 def changegroupsubset(self, bases, heads, source): 415 def changegroupsubset(self, bases, heads, source):
378 outgoing = discovery.outgoing(self._repo, missingroots=bases, 416 outgoing = discovery.outgoing(
379 missingheads=heads) 417 self._repo, missingroots=bases, missingheads=heads
418 )
380 return changegroup.makechangegroup(self._repo, outgoing, '01', source) 419 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
381 420
382 # End of baselegacywirecommands interface. 421 # End of baselegacywirecommands interface.
422
383 423
384 # Increment the sub-version when the revlog v2 format changes to lock out old 424 # Increment the sub-version when the revlog v2 format changes to lock out old
385 # clients. 425 # clients.
386 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1' 426 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
387 427
404 # 444 #
405 # The function receives a set of requirement strings that the repository 445 # The function receives a set of requirement strings that the repository
406 # is capable of opening. Functions will typically add elements to the 446 # is capable of opening. Functions will typically add elements to the
407 # set to reflect that the extension knows how to handle that requirements. 447 # set to reflect that the extension knows how to handle that requirements.
408 featuresetupfuncs = set() 448 featuresetupfuncs = set()
449
409 450
410 def makelocalrepository(baseui, path, intents=None): 451 def makelocalrepository(baseui, path, intents=None):
411 """Create a local repository object. 452 """Create a local repository object.
412 453
413 Given arguments needed to construct a local repository, this function 454 Given arguments needed to construct a local repository, this function
520 sharedpath = hgvfs.join(sharedpath) 561 sharedpath = hgvfs.join(sharedpath)
521 562
522 sharedvfs = vfsmod.vfs(sharedpath, realpath=True) 563 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
523 564
524 if not sharedvfs.exists(): 565 if not sharedvfs.exists():
525 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent ' 566 raise error.RepoError(
526 b'directory %s') % sharedvfs.base) 567 _(b'.hg/sharedpath points to nonexistent ' b'directory %s')
568 % sharedvfs.base
569 )
527 570
528 features.add(repository.REPO_FEATURE_SHARED_STORAGE) 571 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
529 572
530 storebasepath = sharedvfs.base 573 storebasepath = sharedvfs.base
531 cachepath = sharedvfs.join(b'cache') 574 cachepath = sharedvfs.join(b'cache')
532 else: 575 else:
533 storebasepath = hgvfs.base 576 storebasepath = hgvfs.base
534 cachepath = hgvfs.join(b'cache') 577 cachepath = hgvfs.join(b'cache')
535 wcachepath = hgvfs.join(b'wcache') 578 wcachepath = hgvfs.join(b'wcache')
536 579
537
538 # The store has changed over time and the exact layout is dictated by 580 # The store has changed over time and the exact layout is dictated by
539 # requirements. The store interface abstracts differences across all 581 # requirements. The store interface abstracts differences across all
540 # of them. 582 # of them.
541 store = makestore(requirements, storebasepath, 583 store = makestore(
542 lambda base: vfsmod.vfs(base, cacheaudited=True)) 584 requirements,
585 storebasepath,
586 lambda base: vfsmod.vfs(base, cacheaudited=True),
587 )
543 hgvfs.createmode = store.createmode 588 hgvfs.createmode = store.createmode
544 589
545 storevfs = store.vfs 590 storevfs = store.vfs
546 storevfs.options = resolvestorevfsoptions(ui, requirements, features) 591 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
547 592
561 extrastate = {} 606 extrastate = {}
562 607
563 for iface, fn in REPO_INTERFACES: 608 for iface, fn in REPO_INTERFACES:
564 # We pass all potentially useful state to give extensions tons of 609 # We pass all potentially useful state to give extensions tons of
565 # flexibility. 610 # flexibility.
566 typ = fn()(ui=ui, 611 typ = fn()(
567 intents=intents, 612 ui=ui,
568 requirements=requirements, 613 intents=intents,
569 features=features, 614 requirements=requirements,
570 wdirvfs=wdirvfs, 615 features=features,
571 hgvfs=hgvfs, 616 wdirvfs=wdirvfs,
572 store=store, 617 hgvfs=hgvfs,
573 storevfs=storevfs, 618 store=store,
574 storeoptions=storevfs.options, 619 storevfs=storevfs,
575 cachevfs=cachevfs, 620 storeoptions=storevfs.options,
576 wcachevfs=wcachevfs, 621 cachevfs=cachevfs,
577 extensionmodulenames=extensionmodulenames, 622 wcachevfs=wcachevfs,
578 extrastate=extrastate, 623 extensionmodulenames=extensionmodulenames,
579 baseclasses=bases) 624 extrastate=extrastate,
625 baseclasses=bases,
626 )
580 627
581 if not isinstance(typ, type): 628 if not isinstance(typ, type):
582 raise error.ProgrammingError('unable to construct type for %s' % 629 raise error.ProgrammingError(
583 iface) 630 'unable to construct type for %s' % iface
631 )
584 632
585 bases.append(typ) 633 bases.append(typ)
586 634
587 # type() allows you to use characters in type names that wouldn't be 635 # type() allows you to use characters in type names that wouldn't be
588 # recognized as Python symbols in source code. We abuse that to add 636 # recognized as Python symbols in source code. We abuse that to add
589 # rich information about our constructed repo. 637 # rich information about our constructed repo.
590 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % ( 638 name = pycompat.sysstr(
591 wdirvfs.base, 639 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
592 b','.join(sorted(requirements)))) 640 )
593 641
594 cls = type(name, tuple(bases), {}) 642 cls = type(name, tuple(bases), {})
595 643
596 return cls( 644 return cls(
597 baseui=baseui, 645 baseui=baseui,
604 sharedpath=storebasepath, 652 sharedpath=storebasepath,
605 store=store, 653 store=store,
606 cachevfs=cachevfs, 654 cachevfs=cachevfs,
607 wcachevfs=wcachevfs, 655 wcachevfs=wcachevfs,
608 features=features, 656 features=features,
609 intents=intents) 657 intents=intents,
658 )
659
610 660
611 def loadhgrc(ui, wdirvfs, hgvfs, requirements): 661 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
612 """Load hgrc files/content into a ui instance. 662 """Load hgrc files/content into a ui instance.
613 663
614 This is called during repository opening to load any additional 664 This is called during repository opening to load any additional
624 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base) 674 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
625 return True 675 return True
626 except IOError: 676 except IOError:
627 return False 677 return False
628 678
679
629 def afterhgrcload(ui, wdirvfs, hgvfs, requirements): 680 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
630 """Perform additional actions after .hg/hgrc is loaded. 681 """Perform additional actions after .hg/hgrc is loaded.
631 682
632 This function is called during repository loading immediately after 683 This function is called during repository loading immediately after
633 the .hg/hgrc file is loaded and before per-repo extensions are loaded. 684 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
648 continue 699 continue
649 700
650 for name in names: 701 for name in names:
651 if not ui.hasconfig(b'extensions', name): 702 if not ui.hasconfig(b'extensions', name):
652 ui.setconfig(b'extensions', name, b'', source='autoload') 703 ui.setconfig(b'extensions', name, b'', source='autoload')
704
653 705
654 def gathersupportedrequirements(ui): 706 def gathersupportedrequirements(ui):
655 """Determine the complete set of recognized requirements.""" 707 """Determine the complete set of recognized requirements."""
656 # Start with all requirements supported by this file. 708 # Start with all requirements supported by this file.
657 supported = set(localrepository._basesupported) 709 supported = set(localrepository._basesupported)
672 if engine.name() == 'zstd': 724 if engine.name() == 'zstd':
673 supported.add(b'revlog-compression-zstd') 725 supported.add(b'revlog-compression-zstd')
674 726
675 return supported 727 return supported
676 728
729
677 def ensurerequirementsrecognized(requirements, supported): 730 def ensurerequirementsrecognized(requirements, supported):
678 """Validate that a set of local requirements is recognized. 731 """Validate that a set of local requirements is recognized.
679 732
680 Receives a set of requirements. Raises an ``error.RepoError`` if there 733 Receives a set of requirements. Raises an ``error.RepoError`` if there
681 exists any requirement in that set that currently loaded code doesn't 734 exists any requirement in that set that currently loaded code doesn't
694 747
695 missing.add(requirement) 748 missing.add(requirement)
696 749
697 if missing: 750 if missing:
698 raise error.RequirementError( 751 raise error.RequirementError(
699 _(b'repository requires features unknown to this Mercurial: %s') % 752 _(b'repository requires features unknown to this Mercurial: %s')
700 b' '.join(sorted(missing)), 753 % b' '.join(sorted(missing)),
701 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement ' 754 hint=_(
702 b'for more information')) 755 b'see https://mercurial-scm.org/wiki/MissingRequirement '
756 b'for more information'
757 ),
758 )
759
703 760
704 def ensurerequirementscompatible(ui, requirements): 761 def ensurerequirementscompatible(ui, requirements):
705 """Validates that a set of recognized requirements is mutually compatible. 762 """Validates that a set of recognized requirements is mutually compatible.
706 763
707 Some requirements may not be compatible with others or require 764 Some requirements may not be compatible with others or require
713 checking. 770 checking.
714 771
715 ``error.RepoError`` should be raised on failure. 772 ``error.RepoError`` should be raised on failure.
716 """ 773 """
717 if b'exp-sparse' in requirements and not sparse.enabled: 774 if b'exp-sparse' in requirements and not sparse.enabled:
718 raise error.RepoError(_(b'repository is using sparse feature but ' 775 raise error.RepoError(
719 b'sparse is not enabled; enable the ' 776 _(
720 b'"sparse" extensions to access')) 777 b'repository is using sparse feature but '
778 b'sparse is not enabled; enable the '
779 b'"sparse" extensions to access'
780 )
781 )
782
721 783
722 def makestore(requirements, path, vfstype): 784 def makestore(requirements, path, vfstype):
723 """Construct a storage object for a repository.""" 785 """Construct a storage object for a repository."""
724 if b'store' in requirements: 786 if b'store' in requirements:
725 if b'fncache' in requirements: 787 if b'fncache' in requirements:
726 return storemod.fncachestore(path, vfstype, 788 return storemod.fncachestore(
727 b'dotencode' in requirements) 789 path, vfstype, b'dotencode' in requirements
790 )
728 791
729 return storemod.encodedstore(path, vfstype) 792 return storemod.encodedstore(path, vfstype)
730 793
731 return storemod.basicstore(path, vfstype) 794 return storemod.basicstore(path, vfstype)
795
732 796
733 def resolvestorevfsoptions(ui, requirements, features): 797 def resolvestorevfsoptions(ui, requirements, features):
734 """Resolve the options to pass to the store vfs opener. 798 """Resolve the options to pass to the store vfs opener.
735 799
736 The returned dict is used to influence behavior of the storage layer. 800 The returned dict is used to influence behavior of the storage layer.
750 # This revlog format is super old and we don't bother trying to parse 814 # This revlog format is super old and we don't bother trying to parse
751 # opener options for it because those options wouldn't do anything 815 # opener options for it because those options wouldn't do anything
752 # meaningful on such old repos. 816 # meaningful on such old repos.
753 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements: 817 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
754 options.update(resolverevlogstorevfsoptions(ui, requirements, features)) 818 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
755 else: # explicitly mark repo as using revlogv0 819 else: # explicitly mark repo as using revlogv0
756 options['revlogv0'] = True 820 options['revlogv0'] = True
757 821
758 writecopiesto = ui.config('experimental', 'copies.write-to') 822 writecopiesto = ui.config('experimental', 'copies.write-to')
759 copiesextramode = ('changeset-only', 'compatibility') 823 copiesextramode = ('changeset-only', 'compatibility')
760 if (writecopiesto in copiesextramode): 824 if writecopiesto in copiesextramode:
761 options['copies-storage'] = 'extra' 825 options['copies-storage'] = 'extra'
762 826
763 return options 827 return options
828
764 829
765 def resolverevlogstorevfsoptions(ui, requirements, features): 830 def resolverevlogstorevfsoptions(ui, requirements, features):
766 """Resolve opener options specific to revlogs.""" 831 """Resolve opener options specific to revlogs."""
767 832
768 options = {} 833 options = {}
779 # experimental config: format.chunkcachesize 844 # experimental config: format.chunkcachesize
780 chunkcachesize = ui.configint(b'format', b'chunkcachesize') 845 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
781 if chunkcachesize is not None: 846 if chunkcachesize is not None:
782 options[b'chunkcachesize'] = chunkcachesize 847 options[b'chunkcachesize'] = chunkcachesize
783 848
784 deltabothparents = ui.configbool(b'storage', 849 deltabothparents = ui.configbool(
785 b'revlog.optimize-delta-parent-choice') 850 b'storage', b'revlog.optimize-delta-parent-choice'
851 )
786 options[b'deltabothparents'] = deltabothparents 852 options[b'deltabothparents'] = deltabothparents
787 853
788 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta') 854 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
789 lazydeltabase = False 855 lazydeltabase = False
790 if lazydelta: 856 if lazydelta:
791 lazydeltabase = ui.configbool(b'storage', 857 lazydeltabase = ui.configbool(
792 b'revlog.reuse-external-delta-parent') 858 b'storage', b'revlog.reuse-external-delta-parent'
859 )
793 if lazydeltabase is None: 860 if lazydeltabase is None:
794 lazydeltabase = not scmutil.gddeltaconfig(ui) 861 lazydeltabase = not scmutil.gddeltaconfig(ui)
795 options[b'lazydelta'] = lazydelta 862 options[b'lazydelta'] = lazydelta
796 options[b'lazydeltabase'] = lazydeltabase 863 options[b'lazydeltabase'] = lazydeltabase
797 864
798 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan') 865 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
799 if 0 <= chainspan: 866 if 0 <= chainspan:
800 options[b'maxdeltachainspan'] = chainspan 867 options[b'maxdeltachainspan'] = chainspan
801 868
802 mmapindexthreshold = ui.configbytes(b'experimental', 869 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
803 b'mmapindexthreshold')
804 if mmapindexthreshold is not None: 870 if mmapindexthreshold is not None:
805 options[b'mmapindexthreshold'] = mmapindexthreshold 871 options[b'mmapindexthreshold'] = mmapindexthreshold
806 872
807 withsparseread = ui.configbool(b'experimental', b'sparse-read') 873 withsparseread = ui.configbool(b'experimental', b'sparse-read')
808 srdensitythres = float(ui.config(b'experimental', 874 srdensitythres = float(
809 b'sparse-read.density-threshold')) 875 ui.config(b'experimental', b'sparse-read.density-threshold')
810 srmingapsize = ui.configbytes(b'experimental', 876 )
811 b'sparse-read.min-gap-size') 877 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
812 options[b'with-sparse-read'] = withsparseread 878 options[b'with-sparse-read'] = withsparseread
813 options[b'sparse-read-density-threshold'] = srdensitythres 879 options[b'sparse-read-density-threshold'] = srdensitythres
814 options[b'sparse-read-min-gap-size'] = srmingapsize 880 options[b'sparse-read-min-gap-size'] = srmingapsize
815 881
816 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements 882 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
852 if repository.NARROW_REQUIREMENT in requirements: 918 if repository.NARROW_REQUIREMENT in requirements:
853 options[b'enableellipsis'] = True 919 options[b'enableellipsis'] = True
854 920
855 return options 921 return options
856 922
923
857 def makemain(**kwargs): 924 def makemain(**kwargs):
858 """Produce a type conforming to ``ilocalrepositorymain``.""" 925 """Produce a type conforming to ``ilocalrepositorymain``."""
859 return localrepository 926 return localrepository
860 927
928
861 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) 929 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
862 class revlogfilestorage(object): 930 class revlogfilestorage(object):
863 """File storage when using revlogs.""" 931 """File storage when using revlogs."""
864 932
865 def file(self, path): 933 def file(self, path):
866 if path[0] == b'/': 934 if path[0] == b'/':
867 path = path[1:] 935 path = path[1:]
868 936
869 return filelog.filelog(self.svfs, path) 937 return filelog.filelog(self.svfs, path)
870 938
939
871 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) 940 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
872 class revlognarrowfilestorage(object): 941 class revlognarrowfilestorage(object):
873 """File storage when using revlogs and narrow files.""" 942 """File storage when using revlogs and narrow files."""
874 943
875 def file(self, path): 944 def file(self, path):
876 if path[0] == b'/': 945 if path[0] == b'/':
877 path = path[1:] 946 path = path[1:]
878 947
879 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch) 948 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
949
880 950
881 def makefilestorage(requirements, features, **kwargs): 951 def makefilestorage(requirements, features, **kwargs):
882 """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" 952 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
883 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE) 953 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
884 features.add(repository.REPO_FEATURE_STREAM_CLONE) 954 features.add(repository.REPO_FEATURE_STREAM_CLONE)
885 955
886 if repository.NARROW_REQUIREMENT in requirements: 956 if repository.NARROW_REQUIREMENT in requirements:
887 return revlognarrowfilestorage 957 return revlognarrowfilestorage
888 else: 958 else:
889 return revlogfilestorage 959 return revlogfilestorage
960
890 961
891 # List of repository interfaces and factory functions for them. Each 962 # List of repository interfaces and factory functions for them. Each
892 # will be called in order during ``makelocalrepository()`` to iteratively 963 # will be called in order during ``makelocalrepository()`` to iteratively
893 # derive the final type for a local repository instance. We capture the 964 # derive the final type for a local repository instance. We capture the
894 # function as a lambda so we don't hold a reference and the module-level 965 # function as a lambda so we don't hold a reference and the module-level
895 # functions can be wrapped. 966 # functions can be wrapped.
896 REPO_INTERFACES = [ 967 REPO_INTERFACES = [
897 (repository.ilocalrepositorymain, lambda: makemain), 968 (repository.ilocalrepositorymain, lambda: makemain),
898 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage), 969 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
899 ] 970 ]
971
900 972
901 @interfaceutil.implementer(repository.ilocalrepositorymain) 973 @interfaceutil.implementer(repository.ilocalrepositorymain)
902 class localrepository(object): 974 class localrepository(object):
903 """Main class for representing local repositories. 975 """Main class for representing local repositories.
904 976
933 'fncache', 1005 'fncache',
934 'shared', 1006 'shared',
935 'relshared', 1007 'relshared',
936 'dotencode', 1008 'dotencode',
937 'exp-sparse', 1009 'exp-sparse',
938 'internal-phase' 1010 'internal-phase',
939 } 1011 }
940 1012
941 # list of prefix for file which can be written without 'wlock' 1013 # list of prefix for file which can be written without 'wlock'
942 # Extensions should extend this list when needed 1014 # Extensions should extend this list when needed
943 _wlockfreeprefix = { 1015 _wlockfreeprefix = {
956 # this changeset was introduced. Someone should fix 1028 # this changeset was introduced. Someone should fix
957 # the remainig bit and drop this line 1029 # the remainig bit and drop this line
958 'bisect.state', 1030 'bisect.state',
959 } 1031 }
960 1032
961 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements, 1033 def __init__(
962 supportedrequirements, sharedpath, store, cachevfs, wcachevfs, 1034 self,
963 features, intents=None): 1035 baseui,
1036 ui,
1037 origroot,
1038 wdirvfs,
1039 hgvfs,
1040 requirements,
1041 supportedrequirements,
1042 sharedpath,
1043 store,
1044 cachevfs,
1045 wcachevfs,
1046 features,
1047 intents=None,
1048 ):
964 """Create a new local repository instance. 1049 """Create a new local repository instance.
965 1050
966 Most callers should use ``hg.repository()``, ``localrepo.instance()``, 1051 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
967 or ``localrepo.makelocalrepository()`` for obtaining a new repository 1052 or ``localrepo.makelocalrepository()`` for obtaining a new repository
968 object. 1053 object.
1030 self.wcachevfs = wcachevfs 1115 self.wcachevfs = wcachevfs
1031 self.features = features 1116 self.features = features
1032 1117
1033 self.filtername = None 1118 self.filtername = None
1034 1119
1035 if (self.ui.configbool('devel', 'all-warnings') or 1120 if self.ui.configbool('devel', 'all-warnings') or self.ui.configbool(
1036 self.ui.configbool('devel', 'check-locks')): 1121 'devel', 'check-locks'
1122 ):
1037 self.vfs.audit = self._getvfsward(self.vfs.audit) 1123 self.vfs.audit = self._getvfsward(self.vfs.audit)
1038 # A list of callback to shape the phase if no data were found. 1124 # A list of callback to shape the phase if no data were found.
1039 # Callback are in the form: func(repo, roots) --> processed root. 1125 # Callback are in the form: func(repo, roots) --> processed root.
1040 # This list it to be filled by extension during repo setup 1126 # This list it to be filled by extension during repo setup
1041 self._phasedefaults = [] 1127 self._phasedefaults = []
1043 color.setup(self.ui) 1129 color.setup(self.ui)
1044 1130
1045 self.spath = self.store.path 1131 self.spath = self.store.path
1046 self.svfs = self.store.vfs 1132 self.svfs = self.store.vfs
1047 self.sjoin = self.store.join 1133 self.sjoin = self.store.join
1048 if (self.ui.configbool('devel', 'all-warnings') or 1134 if self.ui.configbool('devel', 'all-warnings') or self.ui.configbool(
1049 self.ui.configbool('devel', 'check-locks')): 1135 'devel', 'check-locks'
1050 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs 1136 ):
1137 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1051 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit) 1138 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1052 else: # standard vfs 1139 else: # standard vfs
1053 self.svfs.audit = self._getsvfsward(self.svfs.audit) 1140 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1054 1141
1055 self._dirstatevalidatewarned = False 1142 self._dirstatevalidatewarned = False
1056 1143
1057 self._branchcaches = branchmap.BranchMapCache() 1144 self._branchcaches = branchmap.BranchMapCache()
1089 self._extrafilterid = repoview.extrafilter(ui) 1176 self._extrafilterid = repoview.extrafilter(ui)
1090 1177
1091 def _getvfsward(self, origfunc): 1178 def _getvfsward(self, origfunc):
1092 """build a ward for self.vfs""" 1179 """build a ward for self.vfs"""
1093 rref = weakref.ref(self) 1180 rref = weakref.ref(self)
1181
1094 def checkvfs(path, mode=None): 1182 def checkvfs(path, mode=None):
1095 ret = origfunc(path, mode=mode) 1183 ret = origfunc(path, mode=mode)
1096 repo = rref() 1184 repo = rref()
1097 if (repo is None 1185 if (
1186 repo is None
1098 or not util.safehasattr(repo, '_wlockref') 1187 or not util.safehasattr(repo, '_wlockref')
1099 or not util.safehasattr(repo, '_lockref')): 1188 or not util.safehasattr(repo, '_lockref')
1189 ):
1100 return 1190 return
1101 if mode in (None, 'r', 'rb'): 1191 if mode in (None, 'r', 'rb'):
1102 return 1192 return
1103 if path.startswith(repo.path): 1193 if path.startswith(repo.path):
1104 # truncate name relative to the repository (.hg) 1194 # truncate name relative to the repository (.hg)
1105 path = path[len(repo.path) + 1:] 1195 path = path[len(repo.path) + 1 :]
1106 if path.startswith('cache/'): 1196 if path.startswith('cache/'):
1107 msg = 'accessing cache with vfs instead of cachevfs: "%s"' 1197 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1108 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs") 1198 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1109 if path.startswith('journal.') or path.startswith('undo.'): 1199 if path.startswith('journal.') or path.startswith('undo.'):
1110 # journal is covered by 'lock' 1200 # journal is covered by 'lock'
1111 if repo._currentlock(repo._lockref) is None: 1201 if repo._currentlock(repo._lockref) is None:
1112 repo.ui.develwarn('write with no lock: "%s"' % path, 1202 repo.ui.develwarn(
1113 stacklevel=3, config='check-locks') 1203 'write with no lock: "%s"' % path,
1204 stacklevel=3,
1205 config='check-locks',
1206 )
1114 elif repo._currentlock(repo._wlockref) is None: 1207 elif repo._currentlock(repo._wlockref) is None:
1115 # rest of vfs files are covered by 'wlock' 1208 # rest of vfs files are covered by 'wlock'
1116 # 1209 #
1117 # exclude special files 1210 # exclude special files
1118 for prefix in self._wlockfreeprefix: 1211 for prefix in self._wlockfreeprefix:
1119 if path.startswith(prefix): 1212 if path.startswith(prefix):
1120 return 1213 return
1121 repo.ui.develwarn('write with no wlock: "%s"' % path, 1214 repo.ui.develwarn(
1122 stacklevel=3, config='check-locks') 1215 'write with no wlock: "%s"' % path,
1216 stacklevel=3,
1217 config='check-locks',
1218 )
1123 return ret 1219 return ret
1220
1124 return checkvfs 1221 return checkvfs
1125 1222
1126 def _getsvfsward(self, origfunc): 1223 def _getsvfsward(self, origfunc):
1127 """build a ward for self.svfs""" 1224 """build a ward for self.svfs"""
1128 rref = weakref.ref(self) 1225 rref = weakref.ref(self)
1226
1129 def checksvfs(path, mode=None): 1227 def checksvfs(path, mode=None):
1130 ret = origfunc(path, mode=mode) 1228 ret = origfunc(path, mode=mode)
1131 repo = rref() 1229 repo = rref()
1132 if repo is None or not util.safehasattr(repo, '_lockref'): 1230 if repo is None or not util.safehasattr(repo, '_lockref'):
1133 return 1231 return
1134 if mode in (None, 'r', 'rb'): 1232 if mode in (None, 'r', 'rb'):
1135 return 1233 return
1136 if path.startswith(repo.sharedpath): 1234 if path.startswith(repo.sharedpath):
1137 # truncate name relative to the repository (.hg) 1235 # truncate name relative to the repository (.hg)
1138 path = path[len(repo.sharedpath) + 1:] 1236 path = path[len(repo.sharedpath) + 1 :]
1139 if repo._currentlock(repo._lockref) is None: 1237 if repo._currentlock(repo._lockref) is None:
1140 repo.ui.develwarn('write with no lock: "%s"' % path, 1238 repo.ui.develwarn(
1141 stacklevel=4) 1239 'write with no lock: "%s"' % path, stacklevel=4
1240 )
1142 return ret 1241 return ret
1242
1143 return checksvfs 1243 return checksvfs
1144 1244
1145 def close(self): 1245 def close(self):
1146 self._writecaches() 1246 self._writecaches()
1147 1247
1150 self._revbranchcache.write() 1250 self._revbranchcache.write()
1151 1251
1152 def _restrictcapabilities(self, caps): 1252 def _restrictcapabilities(self, caps):
1153 if self.ui.configbool('experimental', 'bundle2-advertise'): 1253 if self.ui.configbool('experimental', 'bundle2-advertise'):
1154 caps = set(caps) 1254 caps = set(caps)
1155 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self, 1255 capsblob = bundle2.encodecaps(
1156 role='client')) 1256 bundle2.getrepocaps(self, role='client')
1257 )
1157 caps.add('bundle2=' + urlreq.quote(capsblob)) 1258 caps.add('bundle2=' + urlreq.quote(capsblob))
1158 return caps 1259 return caps
1159 1260
1160 def _writerequirements(self): 1261 def _writerequirements(self):
1161 scmutil.writerequires(self.vfs, self.requirements) 1262 scmutil.writerequires(self.vfs, self.requirements)
1171 1272
1172 @property 1273 @property
1173 def nofsauditor(self): 1274 def nofsauditor(self):
1174 # This is only used by context.basectx.match in order to detect 1275 # This is only used by context.basectx.match in order to detect
1175 # files in subrepos. 1276 # files in subrepos.
1176 return pathutil.pathauditor(self.root, callback=self._checknested, 1277 return pathutil.pathauditor(
1177 realfs=False, cached=True) 1278 self.root, callback=self._checknested, realfs=False, cached=True
1279 )
1178 1280
1179 def _checknested(self, path): 1281 def _checknested(self, path):
1180 """Determine if path is a legal nested repository.""" 1282 """Determine if path is a legal nested repository."""
1181 if not path.startswith(self.root): 1283 if not path.startswith(self.root):
1182 return False 1284 return False
1183 subpath = path[len(self.root) + 1:] 1285 subpath = path[len(self.root) + 1 :]
1184 normsubpath = util.pconvert(subpath) 1286 normsubpath = util.pconvert(subpath)
1185 1287
1186 # XXX: Checking against the current working copy is wrong in 1288 # XXX: Checking against the current working copy is wrong in
1187 # the sense that it can reject things like 1289 # the sense that it can reject things like
1188 # 1290 #
1207 if prefix in ctx.substate: 1309 if prefix in ctx.substate:
1208 if prefix == normsubpath: 1310 if prefix == normsubpath:
1209 return True 1311 return True
1210 else: 1312 else:
1211 sub = ctx.sub(prefix) 1313 sub = ctx.sub(prefix)
1212 return sub.checknested(subpath[len(prefix) + 1:]) 1314 return sub.checknested(subpath[len(prefix) + 1 :])
1213 else: 1315 else:
1214 parts.pop() 1316 parts.pop()
1215 return False 1317 return False
1216 1318
1217 def peer(self): 1319 def peer(self):
1218 return localpeer(self) # not cached to avoid reference cycle 1320 return localpeer(self) # not cached to avoid reference cycle
1219 1321
1220 def unfiltered(self): 1322 def unfiltered(self):
1221 """Return unfiltered version of the repository 1323 """Return unfiltered version of the repository
1222 1324
1223 Intended to be overwritten by filtered repo.""" 1325 Intended to be overwritten by filtered repo."""
1234 the "served" view, regardless of the initial view used by `repo`. 1336 the "served" view, regardless of the initial view used by `repo`.
1235 1337
1236 In other word, there is always only one level of `repoview` "filtering". 1338 In other word, there is always only one level of `repoview` "filtering".
1237 """ 1339 """
1238 if self._extrafilterid is not None and '%' not in name: 1340 if self._extrafilterid is not None and '%' not in name:
1239 name = name + '%' + self._extrafilterid 1341 name = name + '%' + self._extrafilterid
1240 1342
1241 cls = repoview.newtype(self.unfiltered().__class__) 1343 cls = repoview.newtype(self.unfiltered().__class__)
1242 return cls(self, name, visibilityexceptions) 1344 return cls(self, name, visibilityexceptions)
1243 1345
1244 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'), 1346 @mixedrepostorecache(
1245 ('bookmarks', ''), ('00changelog.i', '')) 1347 ('bookmarks', 'plain'),
1348 ('bookmarks.current', 'plain'),
1349 ('bookmarks', ''),
1350 ('00changelog.i', ''),
1351 )
1246 def _bookmarks(self): 1352 def _bookmarks(self):
1247 # Since the multiple files involved in the transaction cannot be 1353 # Since the multiple files involved in the transaction cannot be
1248 # written atomically (with current repository format), there is a race 1354 # written atomically (with current repository format), there is a race
1249 # condition here. 1355 # condition here.
1250 # 1356 #
1295 self._refreshchangelog() 1401 self._refreshchangelog()
1296 return bookmarks.bmstore(self) 1402 return bookmarks.bmstore(self)
1297 1403
1298 def _refreshchangelog(self): 1404 def _refreshchangelog(self):
1299 """make sure the in memory changelog match the on-disk one""" 1405 """make sure the in memory changelog match the on-disk one"""
1300 if ('changelog' in vars(self) and self.currenttransaction() is None): 1406 if 'changelog' in vars(self) and self.currenttransaction() is None:
1301 del self.changelog 1407 del self.changelog
1302 1408
1303 @property 1409 @property
1304 def _activebookmark(self): 1410 def _activebookmark(self):
1305 return self._bookmarks.active 1411 return self._bookmarks.active
1329 1435
1330 def _makedirstate(self): 1436 def _makedirstate(self):
1331 """Extension point for wrapping the dirstate per-repo.""" 1437 """Extension point for wrapping the dirstate per-repo."""
1332 sparsematchfn = lambda: sparse.matcher(self) 1438 sparsematchfn = lambda: sparse.matcher(self)
1333 1439
1334 return dirstate.dirstate(self.vfs, self.ui, self.root, 1440 return dirstate.dirstate(
1335 self._dirstatevalidate, sparsematchfn) 1441 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1442 )
1336 1443
1337 def _dirstatevalidate(self, node): 1444 def _dirstatevalidate(self, node):
1338 try: 1445 try:
1339 self.changelog.rev(node) 1446 self.changelog.rev(node)
1340 return node 1447 return node
1341 except error.LookupError: 1448 except error.LookupError:
1342 if not self._dirstatevalidatewarned: 1449 if not self._dirstatevalidatewarned:
1343 self._dirstatevalidatewarned = True 1450 self._dirstatevalidatewarned = True
1344 self.ui.warn(_("warning: ignoring unknown" 1451 self.ui.warn(
1345 " working parent %s!\n") % short(node)) 1452 _("warning: ignoring unknown" " working parent %s!\n")
1453 % short(node)
1454 )
1346 return nullid 1455 return nullid
1347 1456
1348 @storecache(narrowspec.FILENAME) 1457 @storecache(narrowspec.FILENAME)
1349 def narrowpats(self): 1458 def narrowpats(self):
1350 """matcher patterns for this repository's narrowspec 1459 """matcher patterns for this repository's narrowspec
1396 return context.workingctx(self) 1505 return context.workingctx(self)
1397 if isinstance(changeid, context.basectx): 1506 if isinstance(changeid, context.basectx):
1398 return changeid 1507 return changeid
1399 if isinstance(changeid, slice): 1508 if isinstance(changeid, slice):
1400 # wdirrev isn't contiguous so the slice shouldn't include it 1509 # wdirrev isn't contiguous so the slice shouldn't include it
1401 return [self[i] 1510 return [
1402 for i in pycompat.xrange(*changeid.indices(len(self))) 1511 self[i]
1403 if i not in self.changelog.filteredrevs] 1512 for i in pycompat.xrange(*changeid.indices(len(self)))
1513 if i not in self.changelog.filteredrevs
1514 ]
1404 try: 1515 try:
1405 if isinstance(changeid, int): 1516 if isinstance(changeid, int):
1406 node = self.changelog.node(changeid) 1517 node = self.changelog.node(changeid)
1407 rev = changeid 1518 rev = changeid
1408 elif changeid == 'null': 1519 elif changeid == 'null':
1419 elif len(changeid) == 20: 1530 elif len(changeid) == 20:
1420 try: 1531 try:
1421 node = changeid 1532 node = changeid
1422 rev = self.changelog.rev(changeid) 1533 rev = self.changelog.rev(changeid)
1423 except error.FilteredLookupError: 1534 except error.FilteredLookupError:
1424 changeid = hex(changeid) # for the error message 1535 changeid = hex(changeid) # for the error message
1425 raise 1536 raise
1426 except LookupError: 1537 except LookupError:
1427 # check if it might have come from damaged dirstate 1538 # check if it might have come from damaged dirstate
1428 # 1539 #
1429 # XXX we could avoid the unfiltered if we had a recognizable 1540 # XXX we could avoid the unfiltered if we had a recognizable
1430 # exception for filtered changeset access 1541 # exception for filtered changeset access
1431 if (self.local() 1542 if (
1432 and changeid in self.unfiltered().dirstate.parents()): 1543 self.local()
1544 and changeid in self.unfiltered().dirstate.parents()
1545 ):
1433 msg = _("working directory has unknown parent '%s'!") 1546 msg = _("working directory has unknown parent '%s'!")
1434 raise error.Abort(msg % short(changeid)) 1547 raise error.Abort(msg % short(changeid))
1435 changeid = hex(changeid) # for the error message 1548 changeid = hex(changeid) # for the error message
1436 raise 1549 raise
1437 1550
1438 elif len(changeid) == 40: 1551 elif len(changeid) == 40:
1439 node = bin(changeid) 1552 node = bin(changeid)
1440 rev = self.changelog.rev(node) 1553 rev = self.changelog.rev(node)
1441 else: 1554 else:
1442 raise error.ProgrammingError( 1555 raise error.ProgrammingError(
1443 "unsupported changeid '%s' of type %s" % 1556 "unsupported changeid '%s' of type %s"
1444 (changeid, type(changeid))) 1557 % (changeid, type(changeid))
1558 )
1445 1559
1446 return context.changectx(self, rev, node) 1560 return context.changectx(self, rev, node)
1447 1561
1448 except (error.FilteredIndexError, error.FilteredLookupError): 1562 except (error.FilteredIndexError, error.FilteredLookupError):
1449 raise error.FilteredRepoLookupError(_("filtered revision '%s'") 1563 raise error.FilteredRepoLookupError(
1450 % pycompat.bytestr(changeid)) 1564 _("filtered revision '%s'") % pycompat.bytestr(changeid)
1565 )
1451 except (IndexError, LookupError): 1566 except (IndexError, LookupError):
1452 raise error.RepoLookupError( 1567 raise error.RepoLookupError(
1453 _("unknown revision '%s'") % pycompat.bytestr(changeid)) 1568 _("unknown revision '%s'") % pycompat.bytestr(changeid)
1569 )
1454 except error.WdirUnsupported: 1570 except error.WdirUnsupported:
1455 return context.workingctx(self) 1571 return context.workingctx(self)
1456 1572
1457 def __contains__(self, changeid): 1573 def __contains__(self, changeid):
1458 """True if the given changeid exists 1574 """True if the given changeid exists
1514 expand user aliases, specify ``user=True``. To provide some local 1630 expand user aliases, specify ``user=True``. To provide some local
1515 definitions overriding user aliases, set ``localalias`` to 1631 definitions overriding user aliases, set ``localalias`` to
1516 ``{name: definitionstring}``. 1632 ``{name: definitionstring}``.
1517 ''' 1633 '''
1518 if user: 1634 if user:
1519 m = revset.matchany(self.ui, specs, 1635 m = revset.matchany(
1520 lookup=revset.lookupfn(self), 1636 self.ui,
1521 localalias=localalias) 1637 specs,
1638 lookup=revset.lookupfn(self),
1639 localalias=localalias,
1640 )
1522 else: 1641 else:
1523 m = revset.matchany(None, specs, localalias=localalias) 1642 m = revset.matchany(None, specs, localalias=localalias)
1524 return m(self) 1643 return m(self)
1525 1644
1526 def url(self): 1645 def url(self):
1587 # mq and bookmarks add tags, but do not set the tagtype at all. 1706 # mq and bookmarks add tags, but do not set the tagtype at all.
1588 # Should each extension invent its own tag type? Should there 1707 # Should each extension invent its own tag type? Should there
1589 # be one tagtype for all such "virtual" tags? Or is the status 1708 # be one tagtype for all such "virtual" tags? Or is the status
1590 # quo fine? 1709 # quo fine?
1591 1710
1592
1593 # map tag name to (node, hist) 1711 # map tag name to (node, hist)
1594 alltags = tagsmod.findglobaltags(self.ui, self) 1712 alltags = tagsmod.findglobaltags(self.ui, self)
1595 # map tag name to tag type 1713 # map tag name to tag type
1596 tagtypes = dict((tag, 'global') for tag in alltags) 1714 tagtypes = dict((tag, 'global') for tag in alltags)
1597 1715
1604 tags = {} 1722 tags = {}
1605 for (name, (node, hist)) in alltags.iteritems(): 1723 for (name, (node, hist)) in alltags.iteritems():
1606 if node != nullid: 1724 if node != nullid:
1607 tags[encoding.tolocal(name)] = node 1725 tags[encoding.tolocal(name)] = node
1608 tags['tip'] = self.changelog.tip() 1726 tags['tip'] = self.changelog.tip()
1609 tagtypes = dict([(encoding.tolocal(name), value) 1727 tagtypes = dict(
1610 for (name, value) in tagtypes.iteritems()]) 1728 [
1729 (encoding.tolocal(name), value)
1730 for (name, value) in tagtypes.iteritems()
1731 ]
1732 )
1611 return (tags, tagtypes) 1733 return (tags, tagtypes)
1612 1734
1613 def tagtype(self, tagname): 1735 def tagtype(self, tagname):
1614 ''' 1736 '''
1615 return the type of the given tag. result can be: 1737 return the type of the given tag. result can be:
1739 self.dirstate.copy(None, f) 1861 self.dirstate.copy(None, f)
1740 1862
1741 def filectx(self, path, changeid=None, fileid=None, changectx=None): 1863 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1742 """changeid must be a changeset revision, if specified. 1864 """changeid must be a changeset revision, if specified.
1743 fileid can be a file revision or node.""" 1865 fileid can be a file revision or node."""
1744 return context.filectx(self, path, changeid, fileid, 1866 return context.filectx(
1745 changectx=changectx) 1867 self, path, changeid, fileid, changectx=changectx
1868 )
1746 1869
1747 def getcwd(self): 1870 def getcwd(self):
1748 return self.dirstate.getcwd() 1871 return self.dirstate.getcwd()
1749 1872
1750 def pathto(self, f, cwd=None): 1873 def pathto(self, f, cwd=None):
1760 fn = None 1883 fn = None
1761 params = cmd 1884 params = cmd
1762 for name, filterfn in self._datafilters.iteritems(): 1885 for name, filterfn in self._datafilters.iteritems():
1763 if cmd.startswith(name): 1886 if cmd.startswith(name):
1764 fn = filterfn 1887 fn = filterfn
1765 params = cmd[len(name):].lstrip() 1888 params = cmd[len(name) :].lstrip()
1766 break 1889 break
1767 if not fn: 1890 if not fn:
1768 fn = lambda s, c, **kwargs: procutil.filter(s, c) 1891 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1769 # Wrap old filters not supporting keyword arguments 1892 # Wrap old filters not supporting keyword arguments
1770 if not pycompat.getargspec(fn)[2]: 1893 if not pycompat.getargspec(fn)[2]:
1808 """ 1931 """
1809 data = self._filter(self._decodefilterpats, filename, data) 1932 data = self._filter(self._decodefilterpats, filename, data)
1810 if 'l' in flags: 1933 if 'l' in flags:
1811 self.wvfs.symlink(data, filename) 1934 self.wvfs.symlink(data, filename)
1812 else: 1935 else:
1813 self.wvfs.write(filename, data, backgroundclose=backgroundclose, 1936 self.wvfs.write(
1814 **kwargs) 1937 filename, data, backgroundclose=backgroundclose, **kwargs
1938 )
1815 if 'x' in flags: 1939 if 'x' in flags:
1816 self.wvfs.setflags(filename, False, True) 1940 self.wvfs.setflags(filename, False, True)
1817 else: 1941 else:
1818 self.wvfs.setflags(filename, False, False) 1942 self.wvfs.setflags(filename, False, False)
1819 return len(data) 1943 return len(data)
1831 if tr and tr.running(): 1955 if tr and tr.running():
1832 return tr 1956 return tr
1833 return None 1957 return None
1834 1958
1835 def transaction(self, desc, report=None): 1959 def transaction(self, desc, report=None):
1836 if (self.ui.configbool('devel', 'all-warnings') 1960 if self.ui.configbool('devel', 'all-warnings') or self.ui.configbool(
1837 or self.ui.configbool('devel', 'check-locks')): 1961 'devel', 'check-locks'
1962 ):
1838 if self._currentlock(self._lockref) is None: 1963 if self._currentlock(self._lockref) is None:
1839 raise error.ProgrammingError('transaction requires locking') 1964 raise error.ProgrammingError('transaction requires locking')
1840 tr = self.currenttransaction() 1965 tr = self.currenttransaction()
1841 if tr is not None: 1966 if tr is not None:
1842 return tr.nest(name=desc) 1967 return tr.nest(name=desc)
1843 1968
1844 # abort here if the journal already exists 1969 # abort here if the journal already exists
1845 if self.svfs.exists("journal"): 1970 if self.svfs.exists("journal"):
1846 raise error.RepoError( 1971 raise error.RepoError(
1847 _("abandoned transaction found"), 1972 _("abandoned transaction found"),
1848 hint=_("run 'hg recover' to clean up transaction")) 1973 hint=_("run 'hg recover' to clean up transaction"),
1974 )
1849 1975
1850 idbase = "%.40f#%f" % (random.random(), time.time()) 1976 idbase = "%.40f#%f" % (random.random(), time.time())
1851 ha = hex(hashlib.sha1(idbase).digest()) 1977 ha = hex(hashlib.sha1(idbase).digest())
1852 txnid = 'TXN:' + ha 1978 txnid = 'TXN:' + ha
1853 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid) 1979 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1856 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] 1982 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1857 if report: 1983 if report:
1858 rp = report 1984 rp = report
1859 else: 1985 else:
1860 rp = self.ui.warn 1986 rp = self.ui.warn
1861 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/ 1987 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1862 # we must avoid cyclic reference between repo and transaction. 1988 # we must avoid cyclic reference between repo and transaction.
1863 reporef = weakref.ref(self) 1989 reporef = weakref.ref(self)
1864 # Code to track tag movement 1990 # Code to track tag movement
1865 # 1991 #
1866 # Since tags are all handled as file content, it is actually quite hard 1992 # Since tags are all handled as file content, it is actually quite hard
1897 tracktags = lambda x: None 2023 tracktags = lambda x: None
1898 # experimental config: experimental.hook-track-tags 2024 # experimental config: experimental.hook-track-tags
1899 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags') 2025 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1900 if desc != 'strip' and shouldtracktags: 2026 if desc != 'strip' and shouldtracktags:
1901 oldheads = self.changelog.headrevs() 2027 oldheads = self.changelog.headrevs()
2028
1902 def tracktags(tr2): 2029 def tracktags(tr2):
1903 repo = reporef() 2030 repo = reporef()
1904 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads) 2031 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1905 newheads = repo.changelog.headrevs() 2032 newheads = repo.changelog.headrevs()
1906 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads) 2033 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1907 # notes: we compare lists here. 2034 # notes: we compare lists here.
1908 # As we do it only once buiding set would not be cheaper 2035 # As we do it only once buiding set would not be cheaper
1909 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes) 2036 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1910 if changes: 2037 if changes:
1911 tr2.hookargs['tag_moved'] = '1' 2038 tr2.hookargs['tag_moved'] = '1'
1912 with repo.vfs('changes/tags.changes', 'w', 2039 with repo.vfs(
1913 atomictemp=True) as changesfile: 2040 'changes/tags.changes', 'w', atomictemp=True
2041 ) as changesfile:
1914 # note: we do not register the file to the transaction 2042 # note: we do not register the file to the transaction
1915 # because we needs it to still exist on the transaction 2043 # because we needs it to still exist on the transaction
1916 # is close (for txnclose hooks) 2044 # is close (for txnclose hooks)
1917 tagsmod.writediff(changesfile, changes) 2045 tagsmod.writediff(changesfile, changes)
2046
1918 def validate(tr2): 2047 def validate(tr2):
1919 """will run pre-closing hooks""" 2048 """will run pre-closing hooks"""
1920 # XXX the transaction API is a bit lacking here so we take a hacky 2049 # XXX the transaction API is a bit lacking here so we take a hacky
1921 # path for now 2050 # path for now
1922 # 2051 #
1934 # This will have to be fixed before we remove the experimental 2063 # This will have to be fixed before we remove the experimental
1935 # gating. 2064 # gating.
1936 tracktags(tr2) 2065 tracktags(tr2)
1937 repo = reporef() 2066 repo = reporef()
1938 2067
1939 r = repo.ui.configsuboptions('experimental', 2068 r = repo.ui.configsuboptions(
1940 'single-head-per-branch') 2069 'experimental', 'single-head-per-branch'
2070 )
1941 singlehead, singleheadsub = r 2071 singlehead, singleheadsub = r
1942 if singlehead: 2072 if singlehead:
1943 accountclosed = singleheadsub.get("account-closed-heads", False) 2073 accountclosed = singleheadsub.get("account-closed-heads", False)
1944 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed) 2074 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
1945 if hook.hashook(repo.ui, 'pretxnclose-bookmark'): 2075 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1946 for name, (old, new) in sorted(tr.changes['bookmarks'].items()): 2076 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1947 args = tr.hookargs.copy() 2077 args = tr.hookargs.copy()
1948 args.update(bookmarks.preparehookargs(name, old, new)) 2078 args.update(bookmarks.preparehookargs(name, old, new))
1949 repo.hook('pretxnclose-bookmark', throw=True, 2079 repo.hook(
1950 **pycompat.strkwargs(args)) 2080 'pretxnclose-bookmark',
2081 throw=True,
2082 **pycompat.strkwargs(args)
2083 )
1951 if hook.hashook(repo.ui, 'pretxnclose-phase'): 2084 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1952 cl = repo.unfiltered().changelog 2085 cl = repo.unfiltered().changelog
1953 for rev, (old, new) in tr.changes['phases'].items(): 2086 for rev, (old, new) in tr.changes['phases'].items():
1954 args = tr.hookargs.copy() 2087 args = tr.hookargs.copy()
1955 node = hex(cl.node(rev)) 2088 node = hex(cl.node(rev))
1956 args.update(phases.preparehookargs(node, old, new)) 2089 args.update(phases.preparehookargs(node, old, new))
1957 repo.hook('pretxnclose-phase', throw=True, 2090 repo.hook(
1958 **pycompat.strkwargs(args)) 2091 'pretxnclose-phase',
1959 2092 throw=True,
1960 repo.hook('pretxnclose', throw=True, 2093 **pycompat.strkwargs(args)
1961 **pycompat.strkwargs(tr.hookargs)) 2094 )
2095
2096 repo.hook(
2097 'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2098 )
2099
1962 def releasefn(tr, success): 2100 def releasefn(tr, success):
1963 repo = reporef() 2101 repo = reporef()
1964 if repo is None: 2102 if repo is None:
1965 # If the repo has been GC'd (and this release function is being 2103 # If the repo has been GC'd (and this release function is being
1966 # called from transaction.__del__), there's not much we can do, 2104 # called from transaction.__del__), there's not much we can do,
1981 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate') 2119 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1982 repo.dirstate.restorebackup(None, 'journal.dirstate') 2120 repo.dirstate.restorebackup(None, 'journal.dirstate')
1983 2121
1984 repo.invalidate(clearfilecache=True) 2122 repo.invalidate(clearfilecache=True)
1985 2123
1986 tr = transaction.transaction(rp, self.svfs, vfsmap, 2124 tr = transaction.transaction(
1987 "journal", 2125 rp,
1988 "undo", 2126 self.svfs,
1989 aftertrans(renames), 2127 vfsmap,
1990 self.store.createmode, 2128 "journal",
1991 validator=validate, 2129 "undo",
1992 releasefn=releasefn, 2130 aftertrans(renames),
1993 checkambigfiles=_cachedfiles, 2131 self.store.createmode,
1994 name=desc) 2132 validator=validate,
2133 releasefn=releasefn,
2134 checkambigfiles=_cachedfiles,
2135 name=desc,
2136 )
1995 tr.changes['origrepolen'] = len(self) 2137 tr.changes['origrepolen'] = len(self)
1996 tr.changes['obsmarkers'] = set() 2138 tr.changes['obsmarkers'] = set()
1997 tr.changes['phases'] = {} 2139 tr.changes['phases'] = {}
1998 tr.changes['bookmarks'] = {} 2140 tr.changes['bookmarks'] = {}
1999 2141
2001 tr.hookargs['txnname'] = desc 2143 tr.hookargs['txnname'] = desc
2002 # note: writing the fncache only during finalize mean that the file is 2144 # note: writing the fncache only during finalize mean that the file is
2003 # outdated when running hooks. As fncache is used for streaming clone, 2145 # outdated when running hooks. As fncache is used for streaming clone,
2004 # this is not expected to break anything that happen during the hooks. 2146 # this is not expected to break anything that happen during the hooks.
2005 tr.addfinalize('flush-fncache', self.store.write) 2147 tr.addfinalize('flush-fncache', self.store.write)
2148
2006 def txnclosehook(tr2): 2149 def txnclosehook(tr2):
2007 """To be run if transaction is successful, will schedule a hook run 2150 """To be run if transaction is successful, will schedule a hook run
2008 """ 2151 """
2009 # Don't reference tr2 in hook() so we don't hold a reference. 2152 # Don't reference tr2 in hook() so we don't hold a reference.
2010 # This reduces memory consumption when there are multiple 2153 # This reduces memory consumption when there are multiple
2017 if hook.hashook(repo.ui, 'txnclose-bookmark'): 2160 if hook.hashook(repo.ui, 'txnclose-bookmark'):
2018 bmchanges = sorted(tr.changes['bookmarks'].items()) 2161 bmchanges = sorted(tr.changes['bookmarks'].items())
2019 for name, (old, new) in bmchanges: 2162 for name, (old, new) in bmchanges:
2020 args = tr.hookargs.copy() 2163 args = tr.hookargs.copy()
2021 args.update(bookmarks.preparehookargs(name, old, new)) 2164 args.update(bookmarks.preparehookargs(name, old, new))
2022 repo.hook('txnclose-bookmark', throw=False, 2165 repo.hook(
2023 **pycompat.strkwargs(args)) 2166 'txnclose-bookmark',
2167 throw=False,
2168 **pycompat.strkwargs(args)
2169 )
2024 2170
2025 if hook.hashook(repo.ui, 'txnclose-phase'): 2171 if hook.hashook(repo.ui, 'txnclose-phase'):
2026 cl = repo.unfiltered().changelog 2172 cl = repo.unfiltered().changelog
2027 phasemv = sorted(tr.changes['phases'].items()) 2173 phasemv = sorted(tr.changes['phases'].items())
2028 for rev, (old, new) in phasemv: 2174 for rev, (old, new) in phasemv:
2029 args = tr.hookargs.copy() 2175 args = tr.hookargs.copy()
2030 node = hex(cl.node(rev)) 2176 node = hex(cl.node(rev))
2031 args.update(phases.preparehookargs(node, old, new)) 2177 args.update(phases.preparehookargs(node, old, new))
2032 repo.hook('txnclose-phase', throw=False, 2178 repo.hook(
2033 **pycompat.strkwargs(args)) 2179 'txnclose-phase',
2034 2180 throw=False,
2035 repo.hook('txnclose', throw=False, 2181 **pycompat.strkwargs(args)
2036 **pycompat.strkwargs(hookargs)) 2182 )
2183
2184 repo.hook(
2185 'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2186 )
2187
2037 reporef()._afterlock(hookfunc) 2188 reporef()._afterlock(hookfunc)
2189
2038 tr.addfinalize('txnclose-hook', txnclosehook) 2190 tr.addfinalize('txnclose-hook', txnclosehook)
2039 # Include a leading "-" to make it happen before the transaction summary 2191 # Include a leading "-" to make it happen before the transaction summary
2040 # reports registered via scmutil.registersummarycallback() whose names 2192 # reports registered via scmutil.registersummarycallback() whose names
2041 # are 00-txnreport etc. That way, the caches will be warm when the 2193 # are 00-txnreport etc. That way, the caches will be warm when the
2042 # callbacks run. 2194 # callbacks run.
2043 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr)) 2195 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
2196
2044 def txnaborthook(tr2): 2197 def txnaborthook(tr2):
2045 """To be run if transaction is aborted 2198 """To be run if transaction is aborted
2046 """ 2199 """
2047 reporef().hook('txnabort', throw=False, 2200 reporef().hook(
2048 **pycompat.strkwargs(tr2.hookargs)) 2201 'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2202 )
2203
2049 tr.addabort('txnabort-hook', txnaborthook) 2204 tr.addabort('txnabort-hook', txnaborthook)
2050 # avoid eager cache invalidation. in-memory data should be identical 2205 # avoid eager cache invalidation. in-memory data should be identical
2051 # to stored data if transaction has no error. 2206 # to stored data if transaction has no error.
2052 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats) 2207 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
2053 self._transref = weakref.ref(tr) 2208 self._transref = weakref.ref(tr)
2054 scmutil.registersummarycallback(self, tr, desc) 2209 scmutil.registersummarycallback(self, tr, desc)
2055 return tr 2210 return tr
2056 2211
2057 def _journalfiles(self): 2212 def _journalfiles(self):
2058 return ((self.svfs, 'journal'), 2213 return (
2059 (self.svfs, 'journal.narrowspec'), 2214 (self.svfs, 'journal'),
2060 (self.vfs, 'journal.narrowspec.dirstate'), 2215 (self.svfs, 'journal.narrowspec'),
2061 (self.vfs, 'journal.dirstate'), 2216 (self.vfs, 'journal.narrowspec.dirstate'),
2062 (self.vfs, 'journal.branch'), 2217 (self.vfs, 'journal.dirstate'),
2063 (self.vfs, 'journal.desc'), 2218 (self.vfs, 'journal.branch'),
2064 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'), 2219 (self.vfs, 'journal.desc'),
2065 (self.svfs, 'journal.phaseroots')) 2220 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
2221 (self.svfs, 'journal.phaseroots'),
2222 )
2066 2223
2067 def undofiles(self): 2224 def undofiles(self):
2068 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] 2225 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2069 2226
2070 @unfilteredmethod 2227 @unfilteredmethod
2071 def _writejournal(self, desc): 2228 def _writejournal(self, desc):
2072 self.dirstate.savebackup(None, 'journal.dirstate') 2229 self.dirstate.savebackup(None, 'journal.dirstate')
2073 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate') 2230 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2074 narrowspec.savebackup(self, 'journal.narrowspec') 2231 narrowspec.savebackup(self, 'journal.narrowspec')
2075 self.vfs.write("journal.branch", 2232 self.vfs.write(
2076 encoding.fromlocal(self.dirstate.branch())) 2233 "journal.branch", encoding.fromlocal(self.dirstate.branch())
2077 self.vfs.write("journal.desc", 2234 )
2078 "%d\n%s\n" % (len(self), desc)) 2235 self.vfs.write("journal.desc", "%d\n%s\n" % (len(self), desc))
2079 bookmarksvfs = bookmarks.bookmarksvfs(self) 2236 bookmarksvfs = bookmarks.bookmarksvfs(self)
2080 bookmarksvfs.write("journal.bookmarks", 2237 bookmarksvfs.write(
2081 bookmarksvfs.tryread("bookmarks")) 2238 "journal.bookmarks", bookmarksvfs.tryread("bookmarks")
2082 self.svfs.write("journal.phaseroots", 2239 )
2083 self.svfs.tryread("phaseroots")) 2240 self.svfs.write("journal.phaseroots", self.svfs.tryread("phaseroots"))
2084 2241
2085 def recover(self): 2242 def recover(self):
2086 with self.lock(): 2243 with self.lock():
2087 if self.svfs.exists("journal"): 2244 if self.svfs.exists("journal"):
2088 self.ui.status(_("rolling back interrupted transaction\n")) 2245 self.ui.status(_("rolling back interrupted transaction\n"))
2089 vfsmap = {'': self.svfs, 2246 vfsmap = {
2090 'plain': self.vfs,} 2247 '': self.svfs,
2091 transaction.rollback(self.svfs, vfsmap, "journal", 2248 'plain': self.vfs,
2092 self.ui.warn, 2249 }
2093 checkambigfiles=_cachedfiles) 2250 transaction.rollback(
2251 self.svfs,
2252 vfsmap,
2253 "journal",
2254 self.ui.warn,
2255 checkambigfiles=_cachedfiles,
2256 )
2094 self.invalidate() 2257 self.invalidate()
2095 return True 2258 return True
2096 else: 2259 else:
2097 self.ui.warn(_("no interrupted transaction available\n")) 2260 self.ui.warn(_("no interrupted transaction available\n"))
2098 return False 2261 return False
2110 self.ui.warn(_("no rollback information available\n")) 2273 self.ui.warn(_("no rollback information available\n"))
2111 return 1 2274 return 1
2112 finally: 2275 finally:
2113 release(dsguard, lock, wlock) 2276 release(dsguard, lock, wlock)
2114 2277
2115 @unfilteredmethod # Until we get smarter cache management 2278 @unfilteredmethod # Until we get smarter cache management
2116 def _rollback(self, dryrun, force, dsguard): 2279 def _rollback(self, dryrun, force, dsguard):
2117 ui = self.ui 2280 ui = self.ui
2118 try: 2281 try:
2119 args = self.vfs.read('undo.desc').splitlines() 2282 args = self.vfs.read('undo.desc').splitlines()
2120 (oldlen, desc, detail) = (int(args[0]), args[1], None) 2283 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2121 if len(args) >= 3: 2284 if len(args) >= 3:
2122 detail = args[2] 2285 detail = args[2]
2123 oldtip = oldlen - 1 2286 oldtip = oldlen - 1
2124 2287
2125 if detail and ui.verbose: 2288 if detail and ui.verbose:
2126 msg = (_('repository tip rolled back to revision %d' 2289 msg = _(
2127 ' (undo %s: %s)\n') 2290 'repository tip rolled back to revision %d'
2128 % (oldtip, desc, detail)) 2291 ' (undo %s: %s)\n'
2292 ) % (oldtip, desc, detail)
2129 else: 2293 else:
2130 msg = (_('repository tip rolled back to revision %d' 2294 msg = _(
2131 ' (undo %s)\n') 2295 'repository tip rolled back to revision %d' ' (undo %s)\n'
2132 % (oldtip, desc)) 2296 ) % (oldtip, desc)
2133 except IOError: 2297 except IOError:
2134 msg = _('rolling back unknown transaction\n') 2298 msg = _('rolling back unknown transaction\n')
2135 desc = None 2299 desc = None
2136 2300
2137 if not force and self['.'] != self['tip'] and desc == 'commit': 2301 if not force and self['.'] != self['tip'] and desc == 'commit':
2138 raise error.Abort( 2302 raise error.Abort(
2139 _('rollback of last commit while not checked out ' 2303 _(
2140 'may lose data'), hint=_('use -f to force')) 2304 'rollback of last commit while not checked out '
2305 'may lose data'
2306 ),
2307 hint=_('use -f to force'),
2308 )
2141 2309
2142 ui.status(msg) 2310 ui.status(msg)
2143 if dryrun: 2311 if dryrun:
2144 return 0 2312 return 0
2145 2313
2146 parents = self.dirstate.parents() 2314 parents = self.dirstate.parents()
2147 self.destroying() 2315 self.destroying()
2148 vfsmap = {'plain': self.vfs, '': self.svfs} 2316 vfsmap = {'plain': self.vfs, '': self.svfs}
2149 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn, 2317 transaction.rollback(
2150 checkambigfiles=_cachedfiles) 2318 self.svfs, vfsmap, 'undo', ui.warn, checkambigfiles=_cachedfiles
2319 )
2151 bookmarksvfs = bookmarks.bookmarksvfs(self) 2320 bookmarksvfs = bookmarks.bookmarksvfs(self)
2152 if bookmarksvfs.exists('undo.bookmarks'): 2321 if bookmarksvfs.exists('undo.bookmarks'):
2153 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True) 2322 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2154 if self.svfs.exists('undo.phaseroots'): 2323 if self.svfs.exists('undo.phaseroots'):
2155 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True) 2324 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2165 self.dirstate.restorebackup(None, 'undo.dirstate') 2334 self.dirstate.restorebackup(None, 'undo.dirstate')
2166 try: 2335 try:
2167 branch = self.vfs.read('undo.branch') 2336 branch = self.vfs.read('undo.branch')
2168 self.dirstate.setbranch(encoding.tolocal(branch)) 2337 self.dirstate.setbranch(encoding.tolocal(branch))
2169 except IOError: 2338 except IOError:
2170 ui.warn(_('named branch could not be reset: ' 2339 ui.warn(
2171 'current branch is still \'%s\'\n') 2340 _(
2172 % self.dirstate.branch()) 2341 'named branch could not be reset: '
2342 'current branch is still \'%s\'\n'
2343 )
2344 % self.dirstate.branch()
2345 )
2173 2346
2174 parents = tuple([p.rev() for p in self[None].parents()]) 2347 parents = tuple([p.rev() for p in self[None].parents()])
2175 if len(parents) > 1: 2348 if len(parents) > 1:
2176 ui.status(_('working directory now based on ' 2349 ui.status(
2177 'revisions %d and %d\n') % parents) 2350 _('working directory now based on ' 'revisions %d and %d\n')
2351 % parents
2352 )
2178 else: 2353 else:
2179 ui.status(_('working directory now based on ' 2354 ui.status(
2180 'revision %d\n') % parents) 2355 _('working directory now based on ' 'revision %d\n')
2356 % parents
2357 )
2181 mergemod.mergestate.clean(self, self['.'].node()) 2358 mergemod.mergestate.clean(self, self['.'].node())
2182 2359
2183 # TODO: if we know which new heads may result from this rollback, pass 2360 # TODO: if we know which new heads may result from this rollback, pass
2184 # them to destroy(), which will prevent the branchhead cache from being 2361 # them to destroy(), which will prevent the branchhead cache from being
2185 # invalidated. 2362 # invalidated.
2193 this logic. For this purpose, the created transaction is passed to the 2370 this logic. For this purpose, the created transaction is passed to the
2194 method. 2371 method.
2195 """ 2372 """
2196 # we must avoid cyclic reference between repo and transaction. 2373 # we must avoid cyclic reference between repo and transaction.
2197 reporef = weakref.ref(self) 2374 reporef = weakref.ref(self)
2375
2198 def updater(tr): 2376 def updater(tr):
2199 repo = reporef() 2377 repo = reporef()
2200 repo.updatecaches(tr) 2378 repo.updatecaches(tr)
2379
2201 return updater 2380 return updater
2202 2381
2203 @unfilteredmethod 2382 @unfilteredmethod
2204 def updatecaches(self, tr=None, full=False): 2383 def updatecaches(self, tr=None, full=False):
2205 """warm appropriate caches 2384 """warm appropriate caches
2286 If a transaction is running, invalidation of store is omitted, 2465 If a transaction is running, invalidation of store is omitted,
2287 because discarding in-memory changes might cause inconsistency 2466 because discarding in-memory changes might cause inconsistency
2288 (e.g. incomplete fncache causes unintentional failure, but 2467 (e.g. incomplete fncache causes unintentional failure, but
2289 redundant one doesn't). 2468 redundant one doesn't).
2290 ''' 2469 '''
2291 unfiltered = self.unfiltered() # all file caches are stored unfiltered 2470 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2292 for k in list(self._filecache.keys()): 2471 for k in list(self._filecache.keys()):
2293 # dirstate is invalidated separately in invalidatedirstate() 2472 # dirstate is invalidated separately in invalidatedirstate()
2294 if k == 'dirstate': 2473 if k == 'dirstate':
2295 continue 2474 continue
2296 if (k == 'changelog' and 2475 if (
2297 self.currenttransaction() and 2476 k == 'changelog'
2298 self.changelog._delayed): 2477 and self.currenttransaction()
2478 and self.changelog._delayed
2479 ):
2299 # The changelog object may store unwritten revisions. We don't 2480 # The changelog object may store unwritten revisions. We don't
2300 # want to lose them. 2481 # want to lose them.
2301 # TODO: Solve the problem instead of working around it. 2482 # TODO: Solve the problem instead of working around it.
2302 continue 2483 continue
2303 2484
2328 k = pycompat.sysstr(k) 2509 k = pycompat.sysstr(k)
2329 if k == r'dirstate' or k not in self.__dict__: 2510 if k == r'dirstate' or k not in self.__dict__:
2330 continue 2511 continue
2331 ce.refresh() 2512 ce.refresh()
2332 2513
2333 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc, 2514 def _lock(
2334 inheritchecker=None, parentenvvar=None): 2515 self,
2516 vfs,
2517 lockname,
2518 wait,
2519 releasefn,
2520 acquirefn,
2521 desc,
2522 inheritchecker=None,
2523 parentenvvar=None,
2524 ):
2335 parentlock = None 2525 parentlock = None
2336 # the contents of parentenvvar are used by the underlying lock to 2526 # the contents of parentenvvar are used by the underlying lock to
2337 # determine whether it can be inherited 2527 # determine whether it can be inherited
2338 if parentenvvar is not None: 2528 if parentenvvar is not None:
2339 parentlock = encoding.environ.get(parentenvvar) 2529 parentlock = encoding.environ.get(parentenvvar)
2344 timeout = self.ui.configint("ui", "timeout") 2534 timeout = self.ui.configint("ui", "timeout")
2345 warntimeout = self.ui.configint("ui", "timeout.warn") 2535 warntimeout = self.ui.configint("ui", "timeout.warn")
2346 # internal config: ui.signal-safe-lock 2536 # internal config: ui.signal-safe-lock
2347 signalsafe = self.ui.configbool('ui', 'signal-safe-lock') 2537 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2348 2538
2349 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout, 2539 l = lockmod.trylock(
2350 releasefn=releasefn, 2540 self.ui,
2351 acquirefn=acquirefn, desc=desc, 2541 vfs,
2352 inheritchecker=inheritchecker, 2542 lockname,
2353 parentlock=parentlock, 2543 timeout,
2354 signalsafe=signalsafe) 2544 warntimeout,
2545 releasefn=releasefn,
2546 acquirefn=acquirefn,
2547 desc=desc,
2548 inheritchecker=inheritchecker,
2549 parentlock=parentlock,
2550 signalsafe=signalsafe,
2551 )
2355 return l 2552 return l
2356 2553
2357 def _afterlock(self, callback): 2554 def _afterlock(self, callback):
2358 """add a callback to be run when the repository is fully unlocked 2555 """add a callback to be run when the repository is fully unlocked
2359 2556
2362 for ref in (self._wlockref, self._lockref): 2559 for ref in (self._wlockref, self._lockref):
2363 l = ref and ref() 2560 l = ref and ref()
2364 if l and l.held: 2561 if l and l.held:
2365 l.postrelease.append(callback) 2562 l.postrelease.append(callback)
2366 break 2563 break
2367 else: # no lock have been found. 2564 else: # no lock have been found.
2368 callback() 2565 callback()
2369 2566
2370 def lock(self, wait=True): 2567 def lock(self, wait=True):
2371 '''Lock the repository store (.hg/store) and return a weak reference 2568 '''Lock the repository store (.hg/store) and return a weak reference
2372 to the lock. Use this before modifying the store (e.g. committing or 2569 to the lock. Use this before modifying the store (e.g. committing or
2377 l = self._currentlock(self._lockref) 2574 l = self._currentlock(self._lockref)
2378 if l is not None: 2575 if l is not None:
2379 l.lock() 2576 l.lock()
2380 return l 2577 return l
2381 2578
2382 l = self._lock(vfs=self.svfs, 2579 l = self._lock(
2383 lockname="lock", 2580 vfs=self.svfs,
2384 wait=wait, 2581 lockname="lock",
2385 releasefn=None, 2582 wait=wait,
2386 acquirefn=self.invalidate, 2583 releasefn=None,
2387 desc=_('repository %s') % self.origroot) 2584 acquirefn=self.invalidate,
2585 desc=_('repository %s') % self.origroot,
2586 )
2388 self._lockref = weakref.ref(l) 2587 self._lockref = weakref.ref(l)
2389 return l 2588 return l
2390 2589
2391 def _wlockchecktransaction(self): 2590 def _wlockchecktransaction(self):
2392 if self.currenttransaction() is not None: 2591 if self.currenttransaction() is not None:
2393 raise error.LockInheritanceContractViolation( 2592 raise error.LockInheritanceContractViolation(
2394 'wlock cannot be inherited in the middle of a transaction') 2593 'wlock cannot be inherited in the middle of a transaction'
2594 )
2395 2595
2396 def wlock(self, wait=True): 2596 def wlock(self, wait=True):
2397 '''Lock the non-store parts of the repository (everything under 2597 '''Lock the non-store parts of the repository (everything under
2398 .hg except .hg/store) and return a weak reference to the lock. 2598 .hg except .hg/store) and return a weak reference to the lock.
2399 2599
2406 l.lock() 2606 l.lock()
2407 return l 2607 return l
2408 2608
2409 # We do not need to check for non-waiting lock acquisition. Such 2609 # We do not need to check for non-waiting lock acquisition. Such
2410 # acquisition would not cause dead-lock as they would just fail. 2610 # acquisition would not cause dead-lock as they would just fail.
2411 if wait and (self.ui.configbool('devel', 'all-warnings') 2611 if wait and (
2412 or self.ui.configbool('devel', 'check-locks')): 2612 self.ui.configbool('devel', 'all-warnings')
2613 or self.ui.configbool('devel', 'check-locks')
2614 ):
2413 if self._currentlock(self._lockref) is not None: 2615 if self._currentlock(self._lockref) is not None:
2414 self.ui.develwarn('"wlock" acquired after "lock"') 2616 self.ui.develwarn('"wlock" acquired after "lock"')
2415 2617
2416 def unlock(): 2618 def unlock():
2417 if self.dirstate.pendingparentchange(): 2619 if self.dirstate.pendingparentchange():
2419 else: 2621 else:
2420 self.dirstate.write(None) 2622 self.dirstate.write(None)
2421 2623
2422 self._filecache['dirstate'].refresh() 2624 self._filecache['dirstate'].refresh()
2423 2625
2424 l = self._lock(self.vfs, "wlock", wait, unlock, 2626 l = self._lock(
2425 self.invalidatedirstate, _('working directory of %s') % 2627 self.vfs,
2426 self.origroot, 2628 "wlock",
2427 inheritchecker=self._wlockchecktransaction, 2629 wait,
2428 parentenvvar='HG_WLOCK_LOCKER') 2630 unlock,
2631 self.invalidatedirstate,
2632 _('working directory of %s') % self.origroot,
2633 inheritchecker=self._wlockchecktransaction,
2634 parentenvvar='HG_WLOCK_LOCKER',
2635 )
2429 self._wlockref = weakref.ref(l) 2636 self._wlockref = weakref.ref(l)
2430 return l 2637 return l
2431 2638
2432 def _currentlock(self, lockref): 2639 def _currentlock(self, lockref):
2433 """Returns the lock if it's held, or None if it's not.""" 2640 """Returns the lock if it's held, or None if it's not."""
2440 2647
2441 def currentwlock(self): 2648 def currentwlock(self):
2442 """Returns the wlock if it's held, or None if it's not.""" 2649 """Returns the wlock if it's held, or None if it's not."""
2443 return self._currentlock(self._wlockref) 2650 return self._currentlock(self._wlockref)
2444 2651
2445 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist, 2652 def _filecommit(
2446 includecopymeta): 2653 self,
2654 fctx,
2655 manifest1,
2656 manifest2,
2657 linkrev,
2658 tr,
2659 changelist,
2660 includecopymeta,
2661 ):
2447 """ 2662 """
2448 commit an individual file as part of a larger transaction 2663 commit an individual file as part of a larger transaction
2449 """ 2664 """
2450 2665
2451 fname = fctx.path() 2666 fname = fctx.path()
2453 fparent2 = manifest2.get(fname, nullid) 2668 fparent2 = manifest2.get(fname, nullid)
2454 if isinstance(fctx, context.filectx): 2669 if isinstance(fctx, context.filectx):
2455 node = fctx.filenode() 2670 node = fctx.filenode()
2456 if node in [fparent1, fparent2]: 2671 if node in [fparent1, fparent2]:
2457 self.ui.debug('reusing %s filelog entry\n' % fname) 2672 self.ui.debug('reusing %s filelog entry\n' % fname)
2458 if ((fparent1 != nullid and 2673 if (
2459 manifest1.flags(fname) != fctx.flags()) or 2674 fparent1 != nullid
2460 (fparent2 != nullid and 2675 and manifest1.flags(fname) != fctx.flags()
2461 manifest2.flags(fname) != fctx.flags())): 2676 ) or (
2677 fparent2 != nullid
2678 and manifest2.flags(fname) != fctx.flags()
2679 ):
2462 changelist.append(fname) 2680 changelist.append(fname)
2463 return node 2681 return node
2464 2682
2465 flog = self.file(fname) 2683 flog = self.file(fname)
2466 meta = {} 2684 meta = {}
2486 # 2704 #
2487 2705
2488 cnode = manifest1.get(cfname) 2706 cnode = manifest1.get(cfname)
2489 newfparent = fparent2 2707 newfparent = fparent2
2490 2708
2491 if manifest2: # branch merge 2709 if manifest2: # branch merge
2492 if fparent2 == nullid or cnode is None: # copied on remote side 2710 if fparent2 == nullid or cnode is None: # copied on remote side
2493 if cfname in manifest2: 2711 if cfname in manifest2:
2494 cnode = manifest2[cfname] 2712 cnode = manifest2[cfname]
2495 newfparent = fparent1 2713 newfparent = fparent1
2496 2714
2497 # Here, we used to search backwards through history to try to find 2715 # Here, we used to search backwards through history to try to find
2508 if includecopymeta: 2726 if includecopymeta:
2509 meta["copy"] = cfname 2727 meta["copy"] = cfname
2510 meta["copyrev"] = hex(cnode) 2728 meta["copyrev"] = hex(cnode)
2511 fparent1, fparent2 = nullid, newfparent 2729 fparent1, fparent2 = nullid, newfparent
2512 else: 2730 else:
2513 self.ui.warn(_("warning: can't find ancestor for '%s' " 2731 self.ui.warn(
2514 "copied from '%s'!\n") % (fname, cfname)) 2732 _(
2733 "warning: can't find ancestor for '%s' "
2734 "copied from '%s'!\n"
2735 )
2736 % (fname, cfname)
2737 )
2515 2738
2516 elif fparent1 == nullid: 2739 elif fparent1 == nullid:
2517 fparent1, fparent2 = fparent2, nullid 2740 fparent1, fparent2 = fparent2, nullid
2518 elif fparent2 != nullid: 2741 elif fparent2 != nullid:
2519 # is one parent an ancestor of the other? 2742 # is one parent an ancestor of the other?
2543 f = self.dirstate.normalize(f) 2766 f = self.dirstate.normalize(f)
2544 if f == '.' or f in matched or f in wctx.substate: 2767 if f == '.' or f in matched or f in wctx.substate:
2545 continue 2768 continue
2546 if f in status.deleted: 2769 if f in status.deleted:
2547 fail(f, _('file not found!')) 2770 fail(f, _('file not found!'))
2548 if f in vdirs: # visited directory 2771 if f in vdirs: # visited directory
2549 d = f + '/' 2772 d = f + '/'
2550 for mf in matched: 2773 for mf in matched:
2551 if mf.startswith(d): 2774 if mf.startswith(d):
2552 break 2775 break
2553 else: 2776 else:
2554 fail(f, _("no match under directory!")) 2777 fail(f, _("no match under directory!"))
2555 elif f not in self.dirstate: 2778 elif f not in self.dirstate:
2556 fail(f, _("file not tracked!")) 2779 fail(f, _("file not tracked!"))
2557 2780
2558 @unfilteredmethod 2781 @unfilteredmethod
2559 def commit(self, text="", user=None, date=None, match=None, force=False, 2782 def commit(
2560 editor=False, extra=None): 2783 self,
2784 text="",
2785 user=None,
2786 date=None,
2787 match=None,
2788 force=False,
2789 editor=False,
2790 extra=None,
2791 ):
2561 """Add a new revision to current repository. 2792 """Add a new revision to current repository.
2562 2793
2563 Revision information is gathered from the working directory, 2794 Revision information is gathered from the working directory,
2564 match can be used to filter the committed files. If editor is 2795 match can be used to filter the committed files. If editor is
2565 supplied, it is called to get a commit message. 2796 supplied, it is called to get a commit message.
2582 with self.wlock(), self.lock(): 2813 with self.wlock(), self.lock():
2583 wctx = self[None] 2814 wctx = self[None]
2584 merge = len(wctx.parents()) > 1 2815 merge = len(wctx.parents()) > 1
2585 2816
2586 if not force and merge and not match.always(): 2817 if not force and merge and not match.always():
2587 raise error.Abort(_('cannot partially commit a merge ' 2818 raise error.Abort(
2588 '(do not specify files or patterns)')) 2819 _(
2820 'cannot partially commit a merge '
2821 '(do not specify files or patterns)'
2822 )
2823 )
2589 2824
2590 status = self.status(match=match, clean=force) 2825 status = self.status(match=match, clean=force)
2591 if force: 2826 if force:
2592 status.modified.extend(status.clean) # mq may commit clean files 2827 status.modified.extend(
2828 status.clean
2829 ) # mq may commit clean files
2593 2830
2594 # check subrepos 2831 # check subrepos
2595 subs, commitsubs, newstate = subrepoutil.precommit( 2832 subs, commitsubs, newstate = subrepoutil.precommit(
2596 self.ui, wctx, status, match, force=force) 2833 self.ui, wctx, status, match, force=force
2834 )
2597 2835
2598 # make sure all explicit patterns are matched 2836 # make sure all explicit patterns are matched
2599 if not force: 2837 if not force:
2600 self.checkcommitpatterns(wctx, vdirs, match, status, fail) 2838 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2601 2839
2602 cctx = context.workingcommitctx(self, status, 2840 cctx = context.workingcommitctx(
2603 text, user, date, extra) 2841 self, status, text, user, date, extra
2842 )
2604 2843
2605 # internal config: ui.allowemptycommit 2844 # internal config: ui.allowemptycommit
2606 allowemptycommit = (wctx.branch() != wctx.p1().branch() 2845 allowemptycommit = (
2607 or extra.get('close') or merge or cctx.files() 2846 wctx.branch() != wctx.p1().branch()
2608 or self.ui.configbool('ui', 'allowemptycommit')) 2847 or extra.get('close')
2848 or merge
2849 or cctx.files()
2850 or self.ui.configbool('ui', 'allowemptycommit')
2851 )
2609 if not allowemptycommit: 2852 if not allowemptycommit:
2610 return None 2853 return None
2611 2854
2612 if merge and cctx.deleted(): 2855 if merge and cctx.deleted():
2613 raise error.Abort(_("cannot commit merge with missing files")) 2856 raise error.Abort(_("cannot commit merge with missing files"))
2615 ms = mergemod.mergestate.read(self) 2858 ms = mergemod.mergestate.read(self)
2616 mergeutil.checkunresolved(ms) 2859 mergeutil.checkunresolved(ms)
2617 2860
2618 if editor: 2861 if editor:
2619 cctx._text = editor(self, cctx, subs) 2862 cctx._text = editor(self, cctx, subs)
2620 edited = (text != cctx._text) 2863 edited = text != cctx._text
2621 2864
2622 # Save commit message in case this transaction gets rolled back 2865 # Save commit message in case this transaction gets rolled back
2623 # (e.g. by a pretxncommit hook). Leave the content alone on 2866 # (e.g. by a pretxncommit hook). Leave the content alone on
2624 # the assumption that the user will use the same editor again. 2867 # the assumption that the user will use the same editor again.
2625 msgfn = self.savecommitmessage(cctx._text) 2868 msgfn = self.savecommitmessage(cctx._text)
2627 # commit subs and write new state 2870 # commit subs and write new state
2628 if subs: 2871 if subs:
2629 uipathfn = scmutil.getuipathfn(self) 2872 uipathfn = scmutil.getuipathfn(self)
2630 for s in sorted(commitsubs): 2873 for s in sorted(commitsubs):
2631 sub = wctx.sub(s) 2874 sub = wctx.sub(s)
2632 self.ui.status(_('committing subrepository %s\n') % 2875 self.ui.status(
2633 uipathfn(subrepoutil.subrelpath(sub))) 2876 _('committing subrepository %s\n')
2877 % uipathfn(subrepoutil.subrelpath(sub))
2878 )
2634 sr = sub.commit(cctx._text, user, date) 2879 sr = sub.commit(cctx._text, user, date)
2635 newstate[s] = (newstate[s][0], sr) 2880 newstate[s] = (newstate[s][0], sr)
2636 subrepoutil.writestate(self, newstate) 2881 subrepoutil.writestate(self, newstate)
2637 2882
2638 p1, p2 = self.dirstate.parents() 2883 p1, p2 = self.dirstate.parents()
2639 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '') 2884 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2640 try: 2885 try:
2641 self.hook("precommit", throw=True, parent1=hookp1, 2886 self.hook(
2642 parent2=hookp2) 2887 "precommit", throw=True, parent1=hookp1, parent2=hookp2
2888 )
2643 with self.transaction('commit'): 2889 with self.transaction('commit'):
2644 ret = self.commitctx(cctx, True) 2890 ret = self.commitctx(cctx, True)
2645 # update bookmarks, dirstate and mergestate 2891 # update bookmarks, dirstate and mergestate
2646 bookmarks.update(self, [p1, p2], ret) 2892 bookmarks.update(self, [p1, p2], ret)
2647 cctx.markcommitted(ret) 2893 cctx.markcommitted(ret)
2648 ms.reset() 2894 ms.reset()
2649 except: # re-raises 2895 except: # re-raises
2650 if edited: 2896 if edited:
2651 self.ui.write( 2897 self.ui.write(
2652 _('note: commit message saved in %s\n') % msgfn) 2898 _('note: commit message saved in %s\n') % msgfn
2899 )
2653 raise 2900 raise
2654 2901
2655 def commithook(): 2902 def commithook():
2656 # hack for command that use a temporary commit (eg: histedit) 2903 # hack for command that use a temporary commit (eg: histedit)
2657 # temporary commit got stripped before hook release 2904 # temporary commit got stripped before hook release
2658 if self.changelog.hasnode(ret): 2905 if self.changelog.hasnode(ret):
2659 self.hook("commit", node=hex(ret), parent1=hookp1, 2906 self.hook(
2660 parent2=hookp2) 2907 "commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2908 )
2909
2661 self._afterlock(commithook) 2910 self._afterlock(commithook)
2662 return ret 2911 return ret
2663 2912
2664 @unfilteredmethod 2913 @unfilteredmethod
2665 def commitctx(self, ctx, error=False, origctx=None): 2914 def commitctx(self, ctx, error=False, origctx=None):
2681 p1, p2 = ctx.p1(), ctx.p2() 2930 p1, p2 = ctx.p1(), ctx.p2()
2682 user = ctx.user() 2931 user = ctx.user()
2683 2932
2684 writecopiesto = self.ui.config('experimental', 'copies.write-to') 2933 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2685 writefilecopymeta = writecopiesto != 'changeset-only' 2934 writefilecopymeta = writecopiesto != 'changeset-only'
2686 writechangesetcopy = (writecopiesto in 2935 writechangesetcopy = writecopiesto in (
2687 ('changeset-only', 'compatibility')) 2936 'changeset-only',
2937 'compatibility',
2938 )
2688 p1copies, p2copies = None, None 2939 p1copies, p2copies = None, None
2689 if writechangesetcopy: 2940 if writechangesetcopy:
2690 p1copies = ctx.p1copies() 2941 p1copies = ctx.p1copies()
2691 p2copies = ctx.p2copies() 2942 p2copies = ctx.p2copies()
2692 filesadded, filesremoved = None, None 2943 filesadded, filesremoved = None, None
2723 fctx = ctx[f] 2974 fctx = ctx[f]
2724 if fctx is None: 2975 if fctx is None:
2725 removed.append(f) 2976 removed.append(f)
2726 else: 2977 else:
2727 added.append(f) 2978 added.append(f)
2728 m[f] = self._filecommit(fctx, m1, m2, linkrev, 2979 m[f] = self._filecommit(
2729 trp, changed, 2980 fctx,
2730 writefilecopymeta) 2981 m1,
2982 m2,
2983 linkrev,
2984 trp,
2985 changed,
2986 writefilecopymeta,
2987 )
2731 m.setflag(f, fctx.flags()) 2988 m.setflag(f, fctx.flags())
2732 except OSError: 2989 except OSError:
2733 self.ui.warn(_("trouble committing %s!\n") % 2990 self.ui.warn(
2734 uipathfn(f)) 2991 _("trouble committing %s!\n") % uipathfn(f)
2992 )
2735 raise 2993 raise
2736 except IOError as inst: 2994 except IOError as inst:
2737 errcode = getattr(inst, 'errno', errno.ENOENT) 2995 errcode = getattr(inst, 'errno', errno.ENOENT)
2738 if error or errcode and errcode != errno.ENOENT: 2996 if error or errcode and errcode != errno.ENOENT:
2739 self.ui.warn(_("trouble committing %s!\n") % 2997 self.ui.warn(
2740 uipathfn(f)) 2998 _("trouble committing %s!\n") % uipathfn(f)
2999 )
2741 raise 3000 raise
2742 3001
2743 # update manifest 3002 # update manifest
2744 removed = [f for f in removed if f in m1 or f in m2] 3003 removed = [f for f in removed if f in m1 or f in m2]
2745 drop = sorted([f for f in removed if f in m]) 3004 drop = sorted([f for f in removed if f in m])
2746 for f in drop: 3005 for f in drop:
2747 del m[f] 3006 del m[f]
2748 if p2.rev() != nullrev: 3007 if p2.rev() != nullrev:
3008
2749 @util.cachefunc 3009 @util.cachefunc
2750 def mas(): 3010 def mas():
2751 p1n = p1.node() 3011 p1n = p1.node()
2752 p2n = p2.node() 3012 p2n = p2.node()
2753 cahs = self.changelog.commonancestorsheads(p1n, p2n) 3013 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2754 if not cahs: 3014 if not cahs:
2755 cahs = [nullrev] 3015 cahs = [nullrev]
2756 return [self[r].manifest() for r in cahs] 3016 return [self[r].manifest() for r in cahs]
3017
2757 def deletionfromparent(f): 3018 def deletionfromparent(f):
2758 # When a file is removed relative to p1 in a merge, this 3019 # When a file is removed relative to p1 in a merge, this
2759 # function determines whether the absence is due to a 3020 # function determines whether the absence is due to a
2760 # deletion from a parent, or whether the merge commit 3021 # deletion from a parent, or whether the merge commit
2761 # itself deletes the file. We decide this by doing a 3022 # itself deletes the file. We decide this by doing a
2774 # described above is not done directly in _filecommit 3035 # described above is not done directly in _filecommit
2775 # when creating the list of changed files, however 3036 # when creating the list of changed files, however
2776 # it does something very similar by comparing filelog 3037 # it does something very similar by comparing filelog
2777 # nodes. 3038 # nodes.
2778 if f in m1: 3039 if f in m1:
2779 return (f not in m2 3040 return f not in m2 and all(
2780 and all(f in ma and ma.find(f) == m1.find(f) 3041 f in ma and ma.find(f) == m1.find(f)
2781 for ma in mas())) 3042 for ma in mas()
3043 )
2782 elif f in m2: 3044 elif f in m2:
2783 return all(f in ma and ma.find(f) == m2.find(f) 3045 return all(
2784 for ma in mas()) 3046 f in ma and ma.find(f) == m2.find(f)
3047 for ma in mas()
3048 )
2785 else: 3049 else:
2786 return True 3050 return True
3051
2787 removed = [f for f in removed if not deletionfromparent(f)] 3052 removed = [f for f in removed if not deletionfromparent(f)]
2788 3053
2789 files = changed + removed 3054 files = changed + removed
2790 md = None 3055 md = None
2791 if not files: 3056 if not files:
2792 # if no "files" actually changed in terms of the changelog, 3057 # if no "files" actually changed in terms of the changelog,
2793 # try hard to detect unmodified manifest entry so that the 3058 # try hard to detect unmodified manifest entry so that the
2794 # exact same commit can be reproduced later on convert. 3059 # exact same commit can be reproduced later on convert.
2795 md = m1.diff(m, scmutil.matchfiles(self, ctx.files())) 3060 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2796 if not files and md: 3061 if not files and md:
2797 self.ui.debug('not reusing manifest (no file change in ' 3062 self.ui.debug(
2798 'changelog, but manifest differs)\n') 3063 'not reusing manifest (no file change in '
3064 'changelog, but manifest differs)\n'
3065 )
2799 if files or md: 3066 if files or md:
2800 self.ui.note(_("committing manifest\n")) 3067 self.ui.note(_("committing manifest\n"))
2801 # we're using narrowmatch here since it's already applied at 3068 # we're using narrowmatch here since it's already applied at
2802 # other stages (such as dirstate.walk), so we're already 3069 # other stages (such as dirstate.walk), so we're already
2803 # ignoring things outside of narrowspec in most cases. The 3070 # ignoring things outside of narrowspec in most cases. The
2804 # one case where we might have files outside the narrowspec 3071 # one case where we might have files outside the narrowspec
2805 # at this point is merges, and we already error out in the 3072 # at this point is merges, and we already error out in the
2806 # case where the merge has files outside of the narrowspec, 3073 # case where the merge has files outside of the narrowspec,
2807 # so this is safe. 3074 # so this is safe.
2808 mn = mctx.write(trp, linkrev, 3075 mn = mctx.write(
2809 p1.manifestnode(), p2.manifestnode(), 3076 trp,
2810 added, drop, match=self.narrowmatch()) 3077 linkrev,
3078 p1.manifestnode(),
3079 p2.manifestnode(),
3080 added,
3081 drop,
3082 match=self.narrowmatch(),
3083 )
2811 3084
2812 if writechangesetcopy: 3085 if writechangesetcopy:
2813 filesadded = [f for f in changed 3086 filesadded = [
2814 if not (f in m1 or f in m2)] 3087 f for f in changed if not (f in m1 or f in m2)
3088 ]
2815 filesremoved = removed 3089 filesremoved = removed
2816 else: 3090 else:
2817 self.ui.debug('reusing manifest from p1 (listed files ' 3091 self.ui.debug(
2818 'actually unchanged)\n') 3092 'reusing manifest from p1 (listed files '
3093 'actually unchanged)\n'
3094 )
2819 mn = p1.manifestnode() 3095 mn = p1.manifestnode()
2820 else: 3096 else:
2821 self.ui.debug('reusing manifest from p1 (no file change)\n') 3097 self.ui.debug('reusing manifest from p1 (no file change)\n')
2822 mn = p1.manifestnode() 3098 mn = p1.manifestnode()
2823 files = [] 3099 files = []
2836 files = origctx.files() 3112 files = origctx.files()
2837 3113
2838 # update changelog 3114 # update changelog
2839 self.ui.note(_("committing changelog\n")) 3115 self.ui.note(_("committing changelog\n"))
2840 self.changelog.delayupdate(tr) 3116 self.changelog.delayupdate(tr)
2841 n = self.changelog.add(mn, files, ctx.description(), 3117 n = self.changelog.add(
2842 trp, p1.node(), p2.node(), 3118 mn,
2843 user, ctx.date(), ctx.extra().copy(), 3119 files,
2844 p1copies, p2copies, filesadded, filesremoved) 3120 ctx.description(),
3121 trp,
3122 p1.node(),
3123 p2.node(),
3124 user,
3125 ctx.date(),
3126 ctx.extra().copy(),
3127 p1copies,
3128 p2copies,
3129 filesadded,
3130 filesremoved,
3131 )
2845 xp1, xp2 = p1.hex(), p2 and p2.hex() or '' 3132 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2846 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, 3133 self.hook(
2847 parent2=xp2) 3134 'pretxncommit',
3135 throw=True,
3136 node=hex(n),
3137 parent1=xp1,
3138 parent2=xp2,
3139 )
2848 # set the new commit is proper phase 3140 # set the new commit is proper phase
2849 targetphase = subrepoutil.newcommitphase(self.ui, ctx) 3141 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2850 if targetphase: 3142 if targetphase:
2851 # retract boundary do not alter parent changeset. 3143 # retract boundary do not alter parent changeset.
2852 # if a parent have higher the resulting phase will 3144 # if a parent have higher the resulting phase will
2904 # head, refresh the tag cache, then immediately add a new head. 3196 # head, refresh the tag cache, then immediately add a new head.
2905 # But I think doing it this way is necessary for the "instant 3197 # But I think doing it this way is necessary for the "instant
2906 # tag cache retrieval" case to work. 3198 # tag cache retrieval" case to work.
2907 self.invalidate() 3199 self.invalidate()
2908 3200
2909 def status(self, node1='.', node2=None, match=None, 3201 def status(
2910 ignored=False, clean=False, unknown=False, 3202 self,
2911 listsubrepos=False): 3203 node1='.',
3204 node2=None,
3205 match=None,
3206 ignored=False,
3207 clean=False,
3208 unknown=False,
3209 listsubrepos=False,
3210 ):
2912 '''a convenience method that calls node1.status(node2)''' 3211 '''a convenience method that calls node1.status(node2)'''
2913 return self[node1].status(node2, match, ignored, clean, unknown, 3212 return self[node1].status(
2914 listsubrepos) 3213 node2, match, ignored, clean, unknown, listsubrepos
3214 )
2915 3215
2916 def addpostdsstatus(self, ps): 3216 def addpostdsstatus(self, ps):
2917 """Add a callback to run within the wlock, at the point at which status 3217 """Add a callback to run within the wlock, at the point at which status
2918 fixups happen. 3218 fixups happen.
2919 3219
3037 if exc.hint: 3337 if exc.hint:
3038 self.ui.write_err(_("(%s)\n") % exc.hint) 3338 self.ui.write_err(_("(%s)\n") % exc.hint)
3039 return False 3339 return False
3040 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key)) 3340 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
3041 ret = pushkey.push(self, namespace, key, old, new) 3341 ret = pushkey.push(self, namespace, key, old, new)
3342
3042 def runhook(): 3343 def runhook():
3043 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new, 3344 self.hook(
3044 ret=ret) 3345 'pushkey',
3346 namespace=namespace,
3347 key=key,
3348 old=old,
3349 new=new,
3350 ret=ret,
3351 )
3352
3045 self._afterlock(runhook) 3353 self._afterlock(runhook)
3046 return ret 3354 return ret
3047 3355
3048 def listkeys(self, namespace): 3356 def listkeys(self, namespace):
3049 self.hook('prelistkeys', throw=True, namespace=namespace) 3357 self.hook('prelistkeys', throw=True, namespace=namespace)
3052 self.hook('listkeys', namespace=namespace, values=values) 3360 self.hook('listkeys', namespace=namespace, values=values)
3053 return values 3361 return values
3054 3362
3055 def debugwireargs(self, one, two, three=None, four=None, five=None): 3363 def debugwireargs(self, one, two, three=None, four=None, five=None):
3056 '''used to test argument passing over the wire''' 3364 '''used to test argument passing over the wire'''
3057 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three), 3365 return "%s %s %s %s %s" % (
3058 pycompat.bytestr(four), 3366 one,
3059 pycompat.bytestr(five)) 3367 two,
3368 pycompat.bytestr(three),
3369 pycompat.bytestr(four),
3370 pycompat.bytestr(five),
3371 )
3060 3372
3061 def savecommitmessage(self, text): 3373 def savecommitmessage(self, text):
3062 fp = self.vfs('last-message.txt', 'wb') 3374 fp = self.vfs('last-message.txt', 'wb')
3063 try: 3375 try:
3064 fp.write(text) 3376 fp.write(text)
3065 finally: 3377 finally:
3066 fp.close() 3378 fp.close()
3067 return self.pathto(fp.name[len(self.root) + 1:]) 3379 return self.pathto(fp.name[len(self.root) + 1 :])
3380
3068 3381
3069 # used to avoid circular references so destructors work 3382 # used to avoid circular references so destructors work
3070 def aftertrans(files): 3383 def aftertrans(files):
3071 renamefiles = [tuple(t) for t in files] 3384 renamefiles = [tuple(t) for t in files]
3385
3072 def a(): 3386 def a():
3073 for vfs, src, dest in renamefiles: 3387 for vfs, src, dest in renamefiles:
3074 # if src and dest refer to a same file, vfs.rename is a no-op, 3388 # if src and dest refer to a same file, vfs.rename is a no-op,
3075 # leaving both src and dest on disk. delete dest to make sure 3389 # leaving both src and dest on disk. delete dest to make sure
3076 # the rename couldn't be such a no-op. 3390 # the rename couldn't be such a no-op.
3077 vfs.tryunlink(dest) 3391 vfs.tryunlink(dest)
3078 try: 3392 try:
3079 vfs.rename(src, dest) 3393 vfs.rename(src, dest)
3080 except OSError: # journal file does not yet exist 3394 except OSError: # journal file does not yet exist
3081 pass 3395 pass
3396
3082 return a 3397 return a
3398
3083 3399
3084 def undoname(fn): 3400 def undoname(fn):
3085 base, name = os.path.split(fn) 3401 base, name = os.path.split(fn)
3086 assert name.startswith('journal') 3402 assert name.startswith('journal')
3087 return os.path.join(base, name.replace('journal', 'undo', 1)) 3403 return os.path.join(base, name.replace('journal', 'undo', 1))
3088 3404
3405
3089 def instance(ui, path, create, intents=None, createopts=None): 3406 def instance(ui, path, create, intents=None, createopts=None):
3090 localpath = util.urllocalpath(path) 3407 localpath = util.urllocalpath(path)
3091 if create: 3408 if create:
3092 createrepository(ui, localpath, createopts=createopts) 3409 createrepository(ui, localpath, createopts=createopts)
3093 3410
3094 return makelocalrepository(ui, localpath, intents=intents) 3411 return makelocalrepository(ui, localpath, intents=intents)
3095 3412
3413
3096 def islocal(path): 3414 def islocal(path):
3097 return True 3415 return True
3416
3098 3417
3099 def defaultcreateopts(ui, createopts=None): 3418 def defaultcreateopts(ui, createopts=None):
3100 """Populate the default creation options for a repository. 3419 """Populate the default creation options for a repository.
3101 3420
3102 A dictionary of explicitly requested creation options can be passed 3421 A dictionary of explicitly requested creation options can be passed
3107 if 'backend' not in createopts: 3426 if 'backend' not in createopts:
3108 # experimental config: storage.new-repo-backend 3427 # experimental config: storage.new-repo-backend
3109 createopts['backend'] = ui.config('storage', 'new-repo-backend') 3428 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3110 3429
3111 return createopts 3430 return createopts
3431
3112 3432
3113 def newreporequirements(ui, createopts): 3433 def newreporequirements(ui, createopts):
3114 """Determine the set of requirements for a new local repository. 3434 """Determine the set of requirements for a new local repository.
3115 3435
3116 Extensions can wrap this function to specify custom requirements for 3436 Extensions can wrap this function to specify custom requirements for
3126 requirements.add('shared') 3446 requirements.add('shared')
3127 3447
3128 return requirements 3448 return requirements
3129 3449
3130 if 'backend' not in createopts: 3450 if 'backend' not in createopts:
3131 raise error.ProgrammingError('backend key not present in createopts; ' 3451 raise error.ProgrammingError(
3132 'was defaultcreateopts() called?') 3452 'backend key not present in createopts; '
3453 'was defaultcreateopts() called?'
3454 )
3133 3455
3134 if createopts['backend'] != 'revlogv1': 3456 if createopts['backend'] != 'revlogv1':
3135 raise error.Abort(_('unable to determine repository requirements for ' 3457 raise error.Abort(
3136 'storage backend: %s') % createopts['backend']) 3458 _(
3459 'unable to determine repository requirements for '
3460 'storage backend: %s'
3461 )
3462 % createopts['backend']
3463 )
3137 3464
3138 requirements = {'revlogv1'} 3465 requirements = {'revlogv1'}
3139 if ui.configbool('format', 'usestore'): 3466 if ui.configbool('format', 'usestore'):
3140 requirements.add('store') 3467 requirements.add('store')
3141 if ui.configbool('format', 'usefncache'): 3468 if ui.configbool('format', 'usefncache'):
3143 if ui.configbool('format', 'dotencode'): 3470 if ui.configbool('format', 'dotencode'):
3144 requirements.add('dotencode') 3471 requirements.add('dotencode')
3145 3472
3146 compengine = ui.config('format', 'revlog-compression') 3473 compengine = ui.config('format', 'revlog-compression')
3147 if compengine not in util.compengines: 3474 if compengine not in util.compengines:
3148 raise error.Abort(_('compression engine %s defined by ' 3475 raise error.Abort(
3149 'format.revlog-compression not available') % 3476 _(
3150 compengine, 3477 'compression engine %s defined by '
3151 hint=_('run "hg debuginstall" to list available ' 3478 'format.revlog-compression not available'
3152 'compression engines')) 3479 )
3480 % compengine,
3481 hint=_(
3482 'run "hg debuginstall" to list available ' 'compression engines'
3483 ),
3484 )
3153 3485
3154 # zlib is the historical default and doesn't need an explicit requirement. 3486 # zlib is the historical default and doesn't need an explicit requirement.
3155 elif compengine == 'zstd': 3487 elif compengine == 'zstd':
3156 requirements.add('revlog-compression-zstd') 3488 requirements.add('revlog-compression-zstd')
3157 elif compengine != 'zlib': 3489 elif compengine != 'zlib':
3186 3518
3187 if ui.configbool('format', 'bookmarks-in-store'): 3519 if ui.configbool('format', 'bookmarks-in-store'):
3188 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) 3520 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3189 3521
3190 return requirements 3522 return requirements
3523
3191 3524
3192 def filterknowncreateopts(ui, createopts): 3525 def filterknowncreateopts(ui, createopts):
3193 """Filters a dict of repo creation options against options that are known. 3526 """Filters a dict of repo creation options against options that are known.
3194 3527
3195 Receives a dict of repo creation options and returns a dict of those 3528 Receives a dict of repo creation options and returns a dict of those
3212 'shareditems', 3545 'shareditems',
3213 'shallowfilestore', 3546 'shallowfilestore',
3214 } 3547 }
3215 3548
3216 return {k: v for k, v in createopts.items() if k not in known} 3549 return {k: v for k, v in createopts.items() if k not in known}
3550
3217 3551
3218 def createrepository(ui, path, createopts=None): 3552 def createrepository(ui, path, createopts=None):
3219 """Create a new repository in a vfs. 3553 """Create a new repository in a vfs.
3220 3554
3221 ``path`` path to the new repo's working directory. 3555 ``path`` path to the new repo's working directory.
3245 createopts = defaultcreateopts(ui, createopts=createopts) 3579 createopts = defaultcreateopts(ui, createopts=createopts)
3246 3580
3247 unknownopts = filterknowncreateopts(ui, createopts) 3581 unknownopts = filterknowncreateopts(ui, createopts)
3248 3582
3249 if not isinstance(unknownopts, dict): 3583 if not isinstance(unknownopts, dict):
3250 raise error.ProgrammingError('filterknowncreateopts() did not return ' 3584 raise error.ProgrammingError(
3251 'a dict') 3585 'filterknowncreateopts() did not return ' 'a dict'
3586 )
3252 3587
3253 if unknownopts: 3588 if unknownopts:
3254 raise error.Abort(_('unable to create repository because of unknown ' 3589 raise error.Abort(
3255 'creation option: %s') % 3590 _(
3256 ', '.join(sorted(unknownopts)), 3591 'unable to create repository because of unknown '
3257 hint=_('is a required extension not loaded?')) 3592 'creation option: %s'
3593 )
3594 % ', '.join(sorted(unknownopts)),
3595 hint=_('is a required extension not loaded?'),
3596 )
3258 3597
3259 requirements = newreporequirements(ui, createopts=createopts) 3598 requirements = newreporequirements(ui, createopts=createopts)
3260 3599
3261 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) 3600 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3262 3601
3271 try: 3610 try:
3272 sharedpath = os.path.relpath(sharedpath, hgvfs.base) 3611 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3273 except (IOError, ValueError) as e: 3612 except (IOError, ValueError) as e:
3274 # ValueError is raised on Windows if the drive letters differ 3613 # ValueError is raised on Windows if the drive letters differ
3275 # on each path. 3614 # on each path.
3276 raise error.Abort(_('cannot calculate relative path'), 3615 raise error.Abort(
3277 hint=stringutil.forcebytestr(e)) 3616 _('cannot calculate relative path'),
3617 hint=stringutil.forcebytestr(e),
3618 )
3278 3619
3279 if not wdirvfs.exists(): 3620 if not wdirvfs.exists():
3280 wdirvfs.makedirs() 3621 wdirvfs.makedirs()
3281 3622
3282 hgvfs.makedir(notindexed=True) 3623 hgvfs.makedir(notindexed=True)
3293 # effectively locks out old clients and prevents them from 3634 # effectively locks out old clients and prevents them from
3294 # mucking with a repo in an unknown format. 3635 # mucking with a repo in an unknown format.
3295 # 3636 #
3296 # The revlog header has version 2, which won't be recognized by 3637 # The revlog header has version 2, which won't be recognized by
3297 # such old clients. 3638 # such old clients.
3298 hgvfs.append(b'00changelog.i', 3639 hgvfs.append(
3299 b'\0\0\0\2 dummy changelog to prevent using the old repo ' 3640 b'00changelog.i',
3300 b'layout') 3641 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3642 b'layout',
3643 )
3301 3644
3302 scmutil.writerequires(hgvfs, requirements) 3645 scmutil.writerequires(hgvfs, requirements)
3303 3646
3304 # Write out file telling readers where to find the shared store. 3647 # Write out file telling readers where to find the shared store.
3305 if 'sharedrepo' in createopts: 3648 if 'sharedrepo' in createopts:
3306 hgvfs.write(b'sharedpath', sharedpath) 3649 hgvfs.write(b'sharedpath', sharedpath)
3307 3650
3308 if createopts.get('shareditems'): 3651 if createopts.get('shareditems'):
3309 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n' 3652 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3310 hgvfs.write(b'shared', shared) 3653 hgvfs.write(b'shared', shared)
3654
3311 3655
3312 def poisonrepository(repo): 3656 def poisonrepository(repo):
3313 """Poison a repository instance so it can no longer be used.""" 3657 """Poison a repository instance so it can no longer be used."""
3314 # Perform any cleanup on the instance. 3658 # Perform any cleanup on the instance.
3315 repo.close() 3659 repo.close()
3322 class poisonedrepository(object): 3666 class poisonedrepository(object):
3323 def __getattribute__(self, item): 3667 def __getattribute__(self, item):
3324 if item == r'close': 3668 if item == r'close':
3325 return object.__getattribute__(self, item) 3669 return object.__getattribute__(self, item)
3326 3670
3327 raise error.ProgrammingError('repo instances should not be used ' 3671 raise error.ProgrammingError(
3328 'after unshare') 3672 'repo instances should not be used ' 'after unshare'
3673 )
3329 3674
3330 def close(self): 3675 def close(self):
3331 pass 3676 pass
3332 3677
3333 # We may have a repoview, which intercepts __setattr__. So be sure 3678 # We may have a repoview, which intercepts __setattr__. So be sure