comparison mercurial/obsolete.py @ 43076:2372284d9457

formatting: blacken the codebase This is using my patch to black (https://github.com/psf/black/pull/826) so we don't un-wrap collection literals. Done with: hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S # skip-blame mass-reformatting only # no-check-commit reformats foo_bar functions Differential Revision: https://phab.mercurial-scm.org/D6971
author Augie Fackler <augie@google.com>
date Sun, 06 Oct 2019 09:45:02 -0400
parents ca762c2bbe6b
children 687b865b95ad
comparison
equal deleted inserted replaced
43075:57875cf423c9 43076:2372284d9457
96 # Options for obsolescence 96 # Options for obsolescence
97 createmarkersopt = 'createmarkers' 97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable' 98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange' 99 exchangeopt = 'exchange'
100 100
101
101 def _getoptionvalue(repo, option): 102 def _getoptionvalue(repo, option):
102 """Returns True if the given repository has the given obsolete option 103 """Returns True if the given repository has the given obsolete option
103 enabled. 104 enabled.
104 """ 105 """
105 configkey = 'evolution.%s' % option 106 configkey = 'evolution.%s' % option
125 if newconfig: 126 if newconfig:
126 result.add('createmarkers') 127 result.add('createmarkers')
127 128
128 return option in result 129 return option in result
129 130
131
130 def getoptions(repo): 132 def getoptions(repo):
131 """Returns dicts showing state of obsolescence features.""" 133 """Returns dicts showing state of obsolescence features."""
132 134
133 createmarkersvalue = _getoptionvalue(repo, createmarkersopt) 135 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
134 unstablevalue = _getoptionvalue(repo, allowunstableopt) 136 unstablevalue = _getoptionvalue(repo, allowunstableopt)
135 exchangevalue = _getoptionvalue(repo, exchangeopt) 137 exchangevalue = _getoptionvalue(repo, exchangeopt)
136 138
137 # createmarkers must be enabled if other options are enabled 139 # createmarkers must be enabled if other options are enabled
138 if ((unstablevalue or exchangevalue) and not createmarkersvalue): 140 if (unstablevalue or exchangevalue) and not createmarkersvalue:
139 raise error.Abort(_("'createmarkers' obsolete option must be enabled " 141 raise error.Abort(
140 "if other obsolete options are enabled")) 142 _(
143 "'createmarkers' obsolete option must be enabled "
144 "if other obsolete options are enabled"
145 )
146 )
141 147
142 return { 148 return {
143 createmarkersopt: createmarkersvalue, 149 createmarkersopt: createmarkersvalue,
144 allowunstableopt: unstablevalue, 150 allowunstableopt: unstablevalue,
145 exchangeopt: exchangevalue, 151 exchangeopt: exchangevalue,
146 } 152 }
147 153
154
148 def isenabled(repo, option): 155 def isenabled(repo, option):
149 """Returns True if the given repository has the given obsolete option 156 """Returns True if the given repository has the given obsolete option
150 enabled. 157 enabled.
151 """ 158 """
152 return getoptions(repo)[option] 159 return getoptions(repo)[option]
153 160
161
154 # Creating aliases for marker flags because evolve extension looks for 162 # Creating aliases for marker flags because evolve extension looks for
155 # bumpedfix in obsolete.py 163 # bumpedfix in obsolete.py
156 bumpedfix = obsutil.bumpedfix 164 bumpedfix = obsutil.bumpedfix
157 usingsha256 = obsutil.usingsha256 165 usingsha256 = obsutil.usingsha256
158 166
175 # - M bytes: metadata as a sequence of nul-terminated strings. Each 183 # - M bytes: metadata as a sequence of nul-terminated strings. Each
176 # string contains a key and a value, separated by a colon ':', without 184 # string contains a key and a value, separated by a colon ':', without
177 # additional encoding. Keys cannot contain '\0' or ':' and values 185 # additional encoding. Keys cannot contain '\0' or ':' and values
178 # cannot contain '\0'. 186 # cannot contain '\0'.
179 _fm0version = 0 187 _fm0version = 0
180 _fm0fixed = '>BIB20s' 188 _fm0fixed = '>BIB20s'
181 _fm0node = '20s' 189 _fm0node = '20s'
182 _fm0fsize = _calcsize(_fm0fixed) 190 _fm0fsize = _calcsize(_fm0fixed)
183 _fm0fnodesize = _calcsize(_fm0node) 191 _fm0fnodesize = _calcsize(_fm0node)
192
184 193
185 def _fm0readmarkers(data, off, stop): 194 def _fm0readmarkers(data, off, stop):
186 # Loop on markers 195 # Loop on markers
187 while off < stop: 196 while off < stop:
188 # read fixed part 197 # read fixed part
189 cur = data[off:off + _fm0fsize] 198 cur = data[off : off + _fm0fsize]
190 off += _fm0fsize 199 off += _fm0fsize
191 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) 200 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
192 # read replacement 201 # read replacement
193 sucs = () 202 sucs = ()
194 if numsuc: 203 if numsuc:
195 s = (_fm0fnodesize * numsuc) 204 s = _fm0fnodesize * numsuc
196 cur = data[off:off + s] 205 cur = data[off : off + s]
197 sucs = _unpack(_fm0node * numsuc, cur) 206 sucs = _unpack(_fm0node * numsuc, cur)
198 off += s 207 off += s
199 # read metadata 208 # read metadata
200 # (metadata will be decoded on demand) 209 # (metadata will be decoded on demand)
201 metadata = data[off:off + mdsize] 210 metadata = data[off : off + mdsize]
202 if len(metadata) != mdsize: 211 if len(metadata) != mdsize:
203 raise error.Abort(_('parsing obsolete marker: metadata is too ' 212 raise error.Abort(
204 'short, %d bytes expected, got %d') 213 _(
205 % (mdsize, len(metadata))) 214 'parsing obsolete marker: metadata is too '
215 'short, %d bytes expected, got %d'
216 )
217 % (mdsize, len(metadata))
218 )
206 off += mdsize 219 off += mdsize
207 metadata = _fm0decodemeta(metadata) 220 metadata = _fm0decodemeta(metadata)
208 try: 221 try:
209 when, offset = metadata.pop('date', '0 0').split(' ') 222 when, offset = metadata.pop('date', '0 0').split(' ')
210 date = float(when), int(offset) 223 date = float(when), int(offset)
211 except ValueError: 224 except ValueError:
212 date = (0., 0) 225 date = (0.0, 0)
213 parents = None 226 parents = None
214 if 'p2' in metadata: 227 if 'p2' in metadata:
215 parents = (metadata.pop('p1', None), metadata.pop('p2', None)) 228 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
216 elif 'p1' in metadata: 229 elif 'p1' in metadata:
217 parents = (metadata.pop('p1', None),) 230 parents = (metadata.pop('p1', None),)
230 parents = None 243 parents = None
231 244
232 metadata = tuple(sorted(metadata.iteritems())) 245 metadata = tuple(sorted(metadata.iteritems()))
233 246
234 yield (pre, sucs, flags, metadata, date, parents) 247 yield (pre, sucs, flags, metadata, date, parents)
248
235 249
236 def _fm0encodeonemarker(marker): 250 def _fm0encodeonemarker(marker):
237 pre, sucs, flags, metadata, date, parents = marker 251 pre, sucs, flags, metadata, date, parents = marker
238 if flags & usingsha256: 252 if flags & usingsha256:
239 raise error.Abort(_('cannot handle sha256 with old obsstore format')) 253 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
251 format = _fm0fixed + (_fm0node * numsuc) 265 format = _fm0fixed + (_fm0node * numsuc)
252 data = [numsuc, len(metadata), flags, pre] 266 data = [numsuc, len(metadata), flags, pre]
253 data.extend(sucs) 267 data.extend(sucs)
254 return _pack(format, *data) + metadata 268 return _pack(format, *data) + metadata
255 269
270
256 def _fm0encodemeta(meta): 271 def _fm0encodemeta(meta):
257 """Return encoded metadata string to string mapping. 272 """Return encoded metadata string to string mapping.
258 273
259 Assume no ':' in key and no '\0' in both key and value.""" 274 Assume no ':' in key and no '\0' in both key and value."""
260 for key, value in meta.iteritems(): 275 for key, value in meta.iteritems():
261 if ':' in key or '\0' in key: 276 if ':' in key or '\0' in key:
262 raise ValueError("':' and '\0' are forbidden in metadata key'") 277 raise ValueError("':' and '\0' are forbidden in metadata key'")
263 if '\0' in value: 278 if '\0' in value:
264 raise ValueError("':' is forbidden in metadata value'") 279 raise ValueError("':' is forbidden in metadata value'")
265 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) 280 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
281
266 282
267 def _fm0decodemeta(data): 283 def _fm0decodemeta(data):
268 """Return string to string dictionary from encoded version.""" 284 """Return string to string dictionary from encoded version."""
269 d = {} 285 d = {}
270 for l in data.split('\0'): 286 for l in data.split('\0'):
271 if l: 287 if l:
272 key, value = l.split(':', 1) 288 key, value = l.split(':', 1)
273 d[key] = value 289 d[key] = value
274 return d 290 return d
291
275 292
276 ## Parsing and writing of version "1" 293 ## Parsing and writing of version "1"
277 # 294 #
278 # The header is followed by the markers. Each marker is made of: 295 # The header is followed by the markers. Each marker is made of:
279 # 296 #
314 _fm1nodesha1size = _calcsize(_fm1nodesha1) 331 _fm1nodesha1size = _calcsize(_fm1nodesha1)
315 _fm1nodesha256size = _calcsize(_fm1nodesha256) 332 _fm1nodesha256size = _calcsize(_fm1nodesha256)
316 _fm1fsize = _calcsize(_fm1fixed) 333 _fm1fsize = _calcsize(_fm1fixed)
317 _fm1parentnone = 3 334 _fm1parentnone = 3
318 _fm1parentshift = 14 335 _fm1parentshift = 14
319 _fm1parentmask = (_fm1parentnone << _fm1parentshift) 336 _fm1parentmask = _fm1parentnone << _fm1parentshift
320 _fm1metapair = 'BB' 337 _fm1metapair = 'BB'
321 _fm1metapairsize = _calcsize(_fm1metapair) 338 _fm1metapairsize = _calcsize(_fm1metapair)
339
322 340
323 def _fm1purereadmarkers(data, off, stop): 341 def _fm1purereadmarkers(data, off, stop):
324 # make some global constants local for performance 342 # make some global constants local for performance
325 noneflag = _fm1parentnone 343 noneflag = _fm1parentnone
326 sha2flag = usingsha256 344 sha2flag = usingsha256
392 metadata.append((data[off:o1], data[o1:o2])) 410 metadata.append((data[off:o1], data[o1:o2]))
393 off = o2 411 off = o2
394 412
395 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents) 413 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
396 414
415
397 def _fm1encodeonemarker(marker): 416 def _fm1encodeonemarker(marker):
398 pre, sucs, flags, metadata, date, parents = marker 417 pre, sucs, flags, metadata, date, parents = marker
399 # determine node size 418 # determine node size
400 _fm1node = _fm1nodesha1 419 _fm1node = _fm1nodesha1
401 if flags & usingsha256: 420 if flags & usingsha256:
409 numextranodes += numpar 428 numextranodes += numpar
410 formatnodes = _fm1node * numextranodes 429 formatnodes = _fm1node * numextranodes
411 formatmeta = _fm1metapair * len(metadata) 430 formatmeta = _fm1metapair * len(metadata)
412 format = _fm1fixed + formatnodes + formatmeta 431 format = _fm1fixed + formatnodes + formatmeta
413 # tz is stored in minutes so we divide by 60 432 # tz is stored in minutes so we divide by 60
414 tz = date[1]//60 433 tz = date[1] // 60
415 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre] 434 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
416 data.extend(sucs) 435 data.extend(sucs)
417 if parents is not None: 436 if parents is not None:
418 data.extend(parents) 437 data.extend(parents)
419 totalsize = _calcsize(format) 438 totalsize = _calcsize(format)
420 for key, value in metadata: 439 for key, value in metadata:
421 lk = len(key) 440 lk = len(key)
422 lv = len(value) 441 lv = len(value)
423 if lk > 255: 442 if lk > 255:
424 msg = ('obsstore metadata key cannot be longer than 255 bytes' 443 msg = (
425 ' (key "%s" is %u bytes)') % (key, lk) 444 'obsstore metadata key cannot be longer than 255 bytes'
445 ' (key "%s" is %u bytes)'
446 ) % (key, lk)
426 raise error.ProgrammingError(msg) 447 raise error.ProgrammingError(msg)
427 if lv > 255: 448 if lv > 255:
428 msg = ('obsstore metadata value cannot be longer than 255 bytes' 449 msg = (
429 ' (value "%s" for key "%s" is %u bytes)') % (value, key, lv) 450 'obsstore metadata value cannot be longer than 255 bytes'
451 ' (value "%s" for key "%s" is %u bytes)'
452 ) % (value, key, lv)
430 raise error.ProgrammingError(msg) 453 raise error.ProgrammingError(msg)
431 data.append(lk) 454 data.append(lk)
432 data.append(lv) 455 data.append(lv)
433 totalsize += lk + lv 456 totalsize += lk + lv
434 data[0] = totalsize 457 data[0] = totalsize
436 for key, value in metadata: 459 for key, value in metadata:
437 data.append(key) 460 data.append(key)
438 data.append(value) 461 data.append(value)
439 return ''.join(data) 462 return ''.join(data)
440 463
464
441 def _fm1readmarkers(data, off, stop): 465 def _fm1readmarkers(data, off, stop):
442 native = getattr(parsers, 'fm1readmarkers', None) 466 native = getattr(parsers, 'fm1readmarkers', None)
443 if not native: 467 if not native:
444 return _fm1purereadmarkers(data, off, stop) 468 return _fm1purereadmarkers(data, off, stop)
445 return native(data, off, stop) 469 return native(data, off, stop)
446 470
471
447 # mapping to read/write various marker formats 472 # mapping to read/write various marker formats
448 # <version> -> (decoder, encoder) 473 # <version> -> (decoder, encoder)
449 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker), 474 formats = {
450 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)} 475 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
476 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
477 }
478
451 479
452 def _readmarkerversion(data): 480 def _readmarkerversion(data):
453 return _unpack('>B', data[0:1])[0] 481 return _unpack('>B', data[0:1])[0]
482
454 483
455 @util.nogc 484 @util.nogc
456 def _readmarkers(data, off=None, stop=None): 485 def _readmarkers(data, off=None, stop=None):
457 """Read and enumerate markers from raw data""" 486 """Read and enumerate markers from raw data"""
458 diskversion = _readmarkerversion(data) 487 diskversion = _readmarkerversion(data)
463 if diskversion not in formats: 492 if diskversion not in formats:
464 msg = _('parsing obsolete marker: unknown version %r') % diskversion 493 msg = _('parsing obsolete marker: unknown version %r') % diskversion
465 raise error.UnknownVersion(msg, version=diskversion) 494 raise error.UnknownVersion(msg, version=diskversion)
466 return diskversion, formats[diskversion][0](data, off, stop) 495 return diskversion, formats[diskversion][0](data, off, stop)
467 496
497
468 def encodeheader(version=_fm0version): 498 def encodeheader(version=_fm0version):
469 return _pack('>B', version) 499 return _pack('>B', version)
500
470 501
471 def encodemarkers(markers, addheader=False, version=_fm0version): 502 def encodemarkers(markers, addheader=False, version=_fm0version):
472 # Kept separate from flushmarkers(), it will be reused for 503 # Kept separate from flushmarkers(), it will be reused for
473 # markers exchange. 504 # markers exchange.
474 encodeone = formats[version][1] 505 encodeone = formats[version][1]
475 if addheader: 506 if addheader:
476 yield encodeheader(version) 507 yield encodeheader(version)
477 for marker in markers: 508 for marker in markers:
478 yield encodeone(marker) 509 yield encodeone(marker)
479 510
511
480 @util.nogc 512 @util.nogc
481 def _addsuccessors(successors, markers): 513 def _addsuccessors(successors, markers):
482 for mark in markers: 514 for mark in markers:
483 successors.setdefault(mark[0], set()).add(mark) 515 successors.setdefault(mark[0], set()).add(mark)
516
484 517
485 @util.nogc 518 @util.nogc
486 def _addpredecessors(predecessors, markers): 519 def _addpredecessors(predecessors, markers):
487 for mark in markers: 520 for mark in markers:
488 for suc in mark[1]: 521 for suc in mark[1]:
489 predecessors.setdefault(suc, set()).add(mark) 522 predecessors.setdefault(suc, set()).add(mark)
523
490 524
491 @util.nogc 525 @util.nogc
492 def _addchildren(children, markers): 526 def _addchildren(children, markers):
493 for mark in markers: 527 for mark in markers:
494 parents = mark[5] 528 parents = mark[5]
495 if parents is not None: 529 if parents is not None:
496 for p in parents: 530 for p in parents:
497 children.setdefault(p, set()).add(mark) 531 children.setdefault(p, set()).add(mark)
498 532
533
499 def _checkinvalidmarkers(markers): 534 def _checkinvalidmarkers(markers):
500 """search for marker with invalid data and raise error if needed 535 """search for marker with invalid data and raise error if needed
501 536
502 Exist as a separated function to allow the evolve extension for a more 537 Exist as a separated function to allow the evolve extension for a more
503 subtle handling. 538 subtle handling.
504 """ 539 """
505 for mark in markers: 540 for mark in markers:
506 if node.nullid in mark[1]: 541 if node.nullid in mark[1]:
507 raise error.Abort(_('bad obsolescence marker detected: ' 542 raise error.Abort(
508 'invalid successors nullid')) 543 _(
544 'bad obsolescence marker detected: '
545 'invalid successors nullid'
546 )
547 )
548
509 549
510 class obsstore(object): 550 class obsstore(object):
511 """Store obsolete markers 551 """Store obsolete markers
512 552
513 Markers can be accessed with two mappings: 553 Markers can be accessed with two mappings:
556 """True if marker creation is disabled 596 """True if marker creation is disabled
557 597
558 Remove me in the future when obsolete marker is always on.""" 598 Remove me in the future when obsolete marker is always on."""
559 return self._readonly 599 return self._readonly
560 600
561 def create(self, transaction, prec, succs=(), flag=0, parents=None, 601 def create(
562 date=None, metadata=None, ui=None): 602 self,
603 transaction,
604 prec,
605 succs=(),
606 flag=0,
607 parents=None,
608 date=None,
609 metadata=None,
610 ui=None,
611 ):
563 """obsolete: add a new obsolete marker 612 """obsolete: add a new obsolete marker
564 613
565 * ensuring it is hashable 614 * ensuring it is hashable
566 * check mandatory metadata 615 * check mandatory metadata
567 * encode metadata 616 * encode metadata
589 for succ in succs: 638 for succ in succs:
590 if len(succ) != 20: 639 if len(succ) != 20:
591 raise ValueError(succ) 640 raise ValueError(succ)
592 if prec in succs: 641 if prec in succs:
593 raise ValueError( 642 raise ValueError(
594 r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))) 643 r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
644 )
595 645
596 metadata = tuple(sorted(metadata.iteritems())) 646 metadata = tuple(sorted(metadata.iteritems()))
597 for k, v in metadata: 647 for k, v in metadata:
598 try: 648 try:
599 # might be better to reject non-ASCII keys 649 # might be better to reject non-ASCII keys
601 v.decode('utf-8') 651 v.decode('utf-8')
602 except UnicodeDecodeError: 652 except UnicodeDecodeError:
603 raise error.ProgrammingError( 653 raise error.ProgrammingError(
604 'obsstore metadata must be valid UTF-8 sequence ' 654 'obsstore metadata must be valid UTF-8 sequence '
605 '(key = %r, value = %r)' 655 '(key = %r, value = %r)'
606 % (pycompat.bytestr(k), pycompat.bytestr(v))) 656 % (pycompat.bytestr(k), pycompat.bytestr(v))
657 )
607 658
608 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents) 659 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
609 return bool(self.add(transaction, [marker])) 660 return bool(self.add(transaction, [marker]))
610 661
611 def add(self, transaction, markers): 662 def add(self, transaction, markers):
612 """Add new markers to the store 663 """Add new markers to the store
613 664
614 Take care of filtering duplicate. 665 Take care of filtering duplicate.
615 Return the number of new marker.""" 666 Return the number of new marker."""
616 if self._readonly: 667 if self._readonly:
617 raise error.Abort(_('creating obsolete markers is not enabled on ' 668 raise error.Abort(
618 'this repo')) 669 _('creating obsolete markers is not enabled on ' 'this repo')
670 )
619 known = set() 671 known = set()
620 getsuccessors = self.successors.get 672 getsuccessors = self.successors.get
621 new = [] 673 new = []
622 for m in markers: 674 for m in markers:
623 if m not in getsuccessors(m[0], ()) and m not in known: 675 if m not in getsuccessors(m[0], ()) and m not in known:
694 746
695 def _cached(self, attr): 747 def _cached(self, attr):
696 return attr in self.__dict__ 748 return attr in self.__dict__
697 749
698 def _addmarkers(self, markers, rawdata): 750 def _addmarkers(self, markers, rawdata):
699 markers = list(markers) # to allow repeated iteration 751 markers = list(markers) # to allow repeated iteration
700 self._data = self._data + rawdata 752 self._data = self._data + rawdata
701 self._all.extend(markers) 753 self._all.extend(markers)
702 if self._cached(r'successors'): 754 if self._cached(r'successors'):
703 _addsuccessors(self.successors, markers) 755 _addsuccessors(self.successors, markers)
704 if self._cached(r'predecessors'): 756 if self._cached(r'predecessors'):
738 seenmarkers |= direct 790 seenmarkers |= direct
739 pendingnodes -= seennodes 791 pendingnodes -= seennodes
740 seennodes |= pendingnodes 792 seennodes |= pendingnodes
741 return seenmarkers 793 return seenmarkers
742 794
795
743 def makestore(ui, repo): 796 def makestore(ui, repo):
744 """Create an obsstore instance from a repo.""" 797 """Create an obsstore instance from a repo."""
745 # read default format for new obsstore. 798 # read default format for new obsstore.
746 # developer config: format.obsstore-version 799 # developer config: format.obsstore-version
747 defaultformat = ui.configint('format', 'obsstore-version') 800 defaultformat = ui.configint('format', 'obsstore-version')
750 if defaultformat is not None: 803 if defaultformat is not None:
751 kwargs[r'defaultformat'] = defaultformat 804 kwargs[r'defaultformat'] = defaultformat
752 readonly = not isenabled(repo, createmarkersopt) 805 readonly = not isenabled(repo, createmarkersopt)
753 store = obsstore(repo.svfs, readonly=readonly, **kwargs) 806 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
754 if store and readonly: 807 if store and readonly:
755 ui.warn(_('obsolete feature not enabled but %i markers found!\n') 808 ui.warn(
756 % len(list(store))) 809 _('obsolete feature not enabled but %i markers found!\n')
810 % len(list(store))
811 )
757 return store 812 return store
813
758 814
759 def commonversion(versions): 815 def commonversion(versions):
760 """Return the newest version listed in both versions and our local formats. 816 """Return the newest version listed in both versions and our local formats.
761 817
762 Returns None if no common version exists. 818 Returns None if no common version exists.
766 for v in versions: 822 for v in versions:
767 if v in formats: 823 if v in formats:
768 return v 824 return v
769 return None 825 return None
770 826
827
771 # arbitrary picked to fit into 8K limit from HTTP server 828 # arbitrary picked to fit into 8K limit from HTTP server
772 # you have to take in account: 829 # you have to take in account:
773 # - the version header 830 # - the version header
774 # - the base85 encoding 831 # - the base85 encoding
775 _maxpayload = 5300 832 _maxpayload = 5300
833
776 834
777 def _pushkeyescape(markers): 835 def _pushkeyescape(markers):
778 """encode markers into a dict suitable for pushkey exchange 836 """encode markers into a dict suitable for pushkey exchange
779 837
780 - binary data is base85 encoded 838 - binary data is base85 encoded
782 keys = {} 840 keys = {}
783 parts = [] 841 parts = []
784 currentlen = _maxpayload * 2 # ensure we create a new part 842 currentlen = _maxpayload * 2 # ensure we create a new part
785 for marker in markers: 843 for marker in markers:
786 nextdata = _fm0encodeonemarker(marker) 844 nextdata = _fm0encodeonemarker(marker)
787 if (len(nextdata) + currentlen > _maxpayload): 845 if len(nextdata) + currentlen > _maxpayload:
788 currentpart = [] 846 currentpart = []
789 currentlen = 0 847 currentlen = 0
790 parts.append(currentpart) 848 parts.append(currentpart)
791 currentpart.append(nextdata) 849 currentpart.append(nextdata)
792 currentlen += len(nextdata) 850 currentlen += len(nextdata)
793 for idx, part in enumerate(reversed(parts)): 851 for idx, part in enumerate(reversed(parts)):
794 data = ''.join([_pack('>B', _fm0version)] + part) 852 data = ''.join([_pack('>B', _fm0version)] + part)
795 keys['dump%i' % idx] = util.b85encode(data) 853 keys['dump%i' % idx] = util.b85encode(data)
796 return keys 854 return keys
797 855
856
798 def listmarkers(repo): 857 def listmarkers(repo):
799 """List markers over pushkey""" 858 """List markers over pushkey"""
800 if not repo.obsstore: 859 if not repo.obsstore:
801 return {} 860 return {}
802 return _pushkeyescape(sorted(repo.obsstore)) 861 return _pushkeyescape(sorted(repo.obsstore))
862
803 863
804 def pushmarker(repo, key, old, new): 864 def pushmarker(repo, key, old, new):
805 """Push markers over pushkey""" 865 """Push markers over pushkey"""
806 if not key.startswith('dump'): 866 if not key.startswith('dump'):
807 repo.ui.warn(_('unknown key: %r') % key) 867 repo.ui.warn(_('unknown key: %r') % key)
813 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr: 873 with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
814 repo.obsstore.mergemarkers(tr, data) 874 repo.obsstore.mergemarkers(tr, data)
815 repo.invalidatevolatilesets() 875 repo.invalidatevolatilesets()
816 return True 876 return True
817 877
878
818 # mapping of 'set-name' -> <function to compute this set> 879 # mapping of 'set-name' -> <function to compute this set>
819 cachefuncs = {} 880 cachefuncs = {}
881
882
820 def cachefor(name): 883 def cachefor(name):
821 """Decorator to register a function as computing the cache for a set""" 884 """Decorator to register a function as computing the cache for a set"""
885
822 def decorator(func): 886 def decorator(func):
823 if name in cachefuncs: 887 if name in cachefuncs:
824 msg = "duplicated registration for volatileset '%s' (existing: %r)" 888 msg = "duplicated registration for volatileset '%s' (existing: %r)"
825 raise error.ProgrammingError(msg % (name, cachefuncs[name])) 889 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
826 cachefuncs[name] = func 890 cachefuncs[name] = func
827 return func 891 return func
892
828 return decorator 893 return decorator
894
829 895
830 def getrevs(repo, name): 896 def getrevs(repo, name):
831 """Return the set of revision that belong to the <name> set 897 """Return the set of revision that belong to the <name> set
832 898
833 Such access may compute the set and cache it for future use""" 899 Such access may compute the set and cache it for future use"""
836 return frozenset() 902 return frozenset()
837 if name not in repo.obsstore.caches: 903 if name not in repo.obsstore.caches:
838 repo.obsstore.caches[name] = cachefuncs[name](repo) 904 repo.obsstore.caches[name] = cachefuncs[name](repo)
839 return repo.obsstore.caches[name] 905 return repo.obsstore.caches[name]
840 906
907
841 # To be simple we need to invalidate obsolescence cache when: 908 # To be simple we need to invalidate obsolescence cache when:
842 # 909 #
843 # - new changeset is added: 910 # - new changeset is added:
844 # - public phase is changed 911 # - public phase is changed
845 # - obsolescence marker are added 912 # - obsolescence marker are added
854 clearing)""" 921 clearing)"""
855 # only clear cache is there is obsstore data in this repo 922 # only clear cache is there is obsstore data in this repo
856 if 'obsstore' in repo._filecache: 923 if 'obsstore' in repo._filecache:
857 repo.obsstore.caches.clear() 924 repo.obsstore.caches.clear()
858 925
926
859 def _mutablerevs(repo): 927 def _mutablerevs(repo):
860 """the set of mutable revision in the repository""" 928 """the set of mutable revision in the repository"""
861 return repo._phasecache.getrevset(repo, phases.mutablephases) 929 return repo._phasecache.getrevset(repo, phases.mutablephases)
930
862 931
863 @cachefor('obsolete') 932 @cachefor('obsolete')
864 def _computeobsoleteset(repo): 933 def _computeobsoleteset(repo):
865 """the set of obsolete revisions""" 934 """the set of obsolete revisions"""
866 getnode = repo.changelog.node 935 getnode = repo.changelog.node
867 notpublic = _mutablerevs(repo) 936 notpublic = _mutablerevs(repo)
868 isobs = repo.obsstore.successors.__contains__ 937 isobs = repo.obsstore.successors.__contains__
869 obs = set(r for r in notpublic if isobs(getnode(r))) 938 obs = set(r for r in notpublic if isobs(getnode(r)))
870 return obs 939 return obs
940
871 941
872 @cachefor('orphan') 942 @cachefor('orphan')
873 def _computeorphanset(repo): 943 def _computeorphanset(repo):
874 """the set of non obsolete revisions with obsolete parents""" 944 """the set of non obsolete revisions with obsolete parents"""
875 pfunc = repo.changelog.parentrevs 945 pfunc = repo.changelog.parentrevs
884 if p in obsolete or p in unstable: 954 if p in obsolete or p in unstable:
885 unstable.add(r) 955 unstable.add(r)
886 break 956 break
887 return unstable 957 return unstable
888 958
959
889 @cachefor('suspended') 960 @cachefor('suspended')
890 def _computesuspendedset(repo): 961 def _computesuspendedset(repo):
891 """the set of obsolete parents with non obsolete descendants""" 962 """the set of obsolete parents with non obsolete descendants"""
892 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan')) 963 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
893 return set(r for r in getrevs(repo, 'obsolete') if r in suspended) 964 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
894 965
966
895 @cachefor('extinct') 967 @cachefor('extinct')
896 def _computeextinctset(repo): 968 def _computeextinctset(repo):
897 """the set of obsolete parents without non obsolete descendants""" 969 """the set of obsolete parents without non obsolete descendants"""
898 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') 970 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
971
899 972
900 @cachefor('phasedivergent') 973 @cachefor('phasedivergent')
901 def _computephasedivergentset(repo): 974 def _computephasedivergentset(repo):
902 """the set of revs trying to obsolete public revisions""" 975 """the set of revs trying to obsolete public revisions"""
903 bumped = set() 976 bumped = set()
904 # util function (avoid attribute lookup in the loop) 977 # util function (avoid attribute lookup in the loop)
905 phase = repo._phasecache.phase # would be faster to grab the full list 978 phase = repo._phasecache.phase # would be faster to grab the full list
906 public = phases.public 979 public = phases.public
907 cl = repo.changelog 980 cl = repo.changelog
908 torev = cl.nodemap.get 981 torev = cl.nodemap.get
909 tonode = cl.node 982 tonode = cl.node
910 obsstore = repo.obsstore 983 obsstore = repo.obsstore
911 for rev in repo.revs('(not public()) and (not obsolete())'): 984 for rev in repo.revs('(not public()) and (not obsolete())'):
912 # We only evaluate mutable, non-obsolete revision 985 # We only evaluate mutable, non-obsolete revision
913 node = tonode(rev) 986 node = tonode(rev)
914 # (future) A cache of predecessors may worth if split is very common 987 # (future) A cache of predecessors may worth if split is very common
915 for pnode in obsutil.allpredecessors(obsstore, [node], 988 for pnode in obsutil.allpredecessors(
916 ignoreflags=bumpedfix): 989 obsstore, [node], ignoreflags=bumpedfix
917 prev = torev(pnode) # unfiltered! but so is phasecache 990 ):
991 prev = torev(pnode) # unfiltered! but so is phasecache
918 if (prev is not None) and (phase(repo, prev) <= public): 992 if (prev is not None) and (phase(repo, prev) <= public):
919 # we have a public predecessor 993 # we have a public predecessor
920 bumped.add(rev) 994 bumped.add(rev)
921 break # Next draft! 995 break # Next draft!
922 return bumped 996 return bumped
997
923 998
924 @cachefor('contentdivergent') 999 @cachefor('contentdivergent')
925 def _computecontentdivergentset(repo): 1000 def _computecontentdivergentset(repo):
926 """the set of rev that compete to be the final successors of some revision. 1001 """the set of rev that compete to be the final successors of some revision.
927 """ 1002 """
935 toprocess = set(mark) 1010 toprocess = set(mark)
936 seen = set() 1011 seen = set()
937 while toprocess: 1012 while toprocess:
938 prec = toprocess.pop()[0] 1013 prec = toprocess.pop()[0]
939 if prec in seen: 1014 if prec in seen:
940 continue # emergency cycle hanging prevention 1015 continue # emergency cycle hanging prevention
941 seen.add(prec) 1016 seen.add(prec)
942 if prec not in newermap: 1017 if prec not in newermap:
943 obsutil.successorssets(repo, prec, cache=newermap) 1018 obsutil.successorssets(repo, prec, cache=newermap)
944 newer = [n for n in newermap[prec] if n] 1019 newer = [n for n in newermap[prec] if n]
945 if len(newer) > 1: 1020 if len(newer) > 1:
946 divergent.add(rev) 1021 divergent.add(rev)
947 break 1022 break
948 toprocess.update(obsstore.predecessors.get(prec, ())) 1023 toprocess.update(obsstore.predecessors.get(prec, ()))
949 return divergent 1024 return divergent
950 1025
1026
951 def makefoldid(relation, user): 1027 def makefoldid(relation, user):
952 1028
953 folddigest = hashlib.sha1(user) 1029 folddigest = hashlib.sha1(user)
954 for p in relation[0] + relation[1]: 1030 for p in relation[0] + relation[1]:
955 folddigest.update('%d' % p.rev()) 1031 folddigest.update('%d' % p.rev())
956 folddigest.update(p.node()) 1032 folddigest.update(p.node())
957 # Since fold only has to compete against fold for the same successors, it 1033 # Since fold only has to compete against fold for the same successors, it
958 # seems fine to use a small ID. Smaller ID save space. 1034 # seems fine to use a small ID. Smaller ID save space.
959 return node.hex(folddigest.digest())[:8] 1035 return node.hex(folddigest.digest())[:8]
960 1036
961 def createmarkers(repo, relations, flag=0, date=None, metadata=None, 1037
962 operation=None): 1038 def createmarkers(
1039 repo, relations, flag=0, date=None, metadata=None, operation=None
1040 ):
963 """Add obsolete markers between changesets in a repo 1041 """Add obsolete markers between changesets in a repo
964 1042
965 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}]) 1043 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
966 tuple. `old` and `news` are changectx. metadata is an optional dictionary 1044 tuple. `old` and `news` are changectx. metadata is an optional dictionary
967 containing metadata for this marker only. It is merged with the global 1045 containing metadata for this marker only. It is merged with the global
982 if 'user' not in metadata: 1060 if 'user' not in metadata:
983 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username() 1061 luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
984 metadata['user'] = encoding.fromlocal(luser) 1062 metadata['user'] = encoding.fromlocal(luser)
985 1063
986 # Operation metadata handling 1064 # Operation metadata handling
987 useoperation = repo.ui.configbool('experimental', 1065 useoperation = repo.ui.configbool(
988 'evolution.track-operation') 1066 'experimental', 'evolution.track-operation'
1067 )
989 if useoperation and operation: 1068 if useoperation and operation:
990 metadata['operation'] = operation 1069 metadata['operation'] = operation
991 1070
992 # Effect flag metadata handling 1071 # Effect flag metadata handling
993 saveeffectflag = repo.ui.configbool('experimental', 1072 saveeffectflag = repo.ui.configbool(
994 'evolution.effect-flags') 1073 'experimental', 'evolution.effect-flags'
1074 )
995 1075
996 with repo.transaction('add-obsolescence-marker') as tr: 1076 with repo.transaction('add-obsolescence-marker') as tr:
997 markerargs = [] 1077 markerargs = []
998 for rel in relations: 1078 for rel in relations:
999 predecessors = rel[0] 1079 predecessors = rel[0]
1016 localmetadata['fold-id'] = foldid 1096 localmetadata['fold-id'] = foldid
1017 localmetadata['fold-idx'] = '%d' % foldidx 1097 localmetadata['fold-idx'] = '%d' % foldidx
1018 localmetadata['fold-size'] = '%d' % foldsize 1098 localmetadata['fold-size'] = '%d' % foldsize
1019 1099
1020 if not prec.mutable(): 1100 if not prec.mutable():
1021 raise error.Abort(_("cannot obsolete public changeset: %s") 1101 raise error.Abort(
1022 % prec, 1102 _("cannot obsolete public changeset: %s") % prec,
1023 hint="see 'hg help phases' for details") 1103 hint="see 'hg help phases' for details",
1104 )
1024 nprec = prec.node() 1105 nprec = prec.node()
1025 nsucs = tuple(s.node() for s in sucs) 1106 nsucs = tuple(s.node() for s in sucs)
1026 npare = None 1107 npare = None
1027 if not nsucs: 1108 if not nsucs:
1028 npare = tuple(p.node() for p in prec.parents()) 1109 npare = tuple(p.node() for p in prec.parents())
1029 if nprec in nsucs: 1110 if nprec in nsucs:
1030 raise error.Abort(_("changeset %s cannot obsolete itself") 1111 raise error.Abort(
1031 % prec) 1112 _("changeset %s cannot obsolete itself") % prec
1113 )
1032 1114
1033 # Effect flag can be different by relation 1115 # Effect flag can be different by relation
1034 if saveeffectflag: 1116 if saveeffectflag:
1035 # The effect flag is saved in a versioned field name for 1117 # The effect flag is saved in a versioned field name for
1036 # future evolution 1118 # future evolution
1043 # prepare all of the args first, then create the markers. 1125 # prepare all of the args first, then create the markers.
1044 markerargs.append((nprec, nsucs, npare, localmetadata)) 1126 markerargs.append((nprec, nsucs, npare, localmetadata))
1045 1127
1046 for args in markerargs: 1128 for args in markerargs:
1047 nprec, nsucs, npare, localmetadata = args 1129 nprec, nsucs, npare, localmetadata = args
1048 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, 1130 repo.obsstore.create(
1049 date=date, metadata=localmetadata, 1131 tr,
1050 ui=repo.ui) 1132 nprec,
1133 nsucs,
1134 flag,
1135 parents=npare,
1136 date=date,
1137 metadata=localmetadata,
1138 ui=repo.ui,
1139 )
1051 repo.filteredrevcache.clear() 1140 repo.filteredrevcache.clear()