comparison tests/simplestorerepo.py @ 43076:2372284d9457

formatting: blacken the codebase This is using my patch to black (https://github.com/psf/black/pull/826) so we don't un-wrap collection literals. Done with: hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S # skip-blame mass-reformatting only # no-check-commit reformats foo_bar functions Differential Revision: https://phab.mercurial-scm.org/D6971
author Augie Fackler <augie@google.com>
date Sun, 06 Oct 2019 09:45:02 -0400
parents 73288e7abe9b
children 8ca92bcb3083
comparison
equal deleted inserted replaced
43075:57875cf423c9 43076:2372284d9457
19 bin, 19 bin,
20 hex, 20 hex,
21 nullid, 21 nullid,
22 nullrev, 22 nullrev,
23 ) 23 )
24 from mercurial.thirdparty import ( 24 from mercurial.thirdparty import attr
25 attr,
26 )
27 from mercurial import ( 25 from mercurial import (
28 ancestor, 26 ancestor,
29 bundlerepo, 27 bundlerepo,
30 error, 28 error,
31 extensions, 29 extensions,
42 ) 40 )
43 from mercurial.utils import ( 41 from mercurial.utils import (
44 cborutil, 42 cborutil,
45 storageutil, 43 storageutil,
46 ) 44 )
47 from mercurial.revlogutils import ( 45 from mercurial.revlogutils import flagutil
48 flagutil,
49 )
50 46
51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for 47 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should 48 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
53 # be specifying the version(s) of Mercurial they are tested with, or 49 # be specifying the version(s) of Mercurial they are tested with, or
54 # leave the attribute unspecified. 50 # leave the attribute unspecified.
55 testedwith = 'ships-with-hg-core' 51 testedwith = 'ships-with-hg-core'
56 52
57 REQUIREMENT = 'testonly-simplestore' 53 REQUIREMENT = 'testonly-simplestore'
58 54
55
59 def validatenode(node): 56 def validatenode(node):
60 if isinstance(node, int): 57 if isinstance(node, int):
61 raise ValueError('expected node; got int') 58 raise ValueError('expected node; got int')
62 59
63 if len(node) != 20: 60 if len(node) != 20:
64 raise ValueError('expected 20 byte node') 61 raise ValueError('expected 20 byte node')
65 62
63
66 def validaterev(rev): 64 def validaterev(rev):
67 if not isinstance(rev, int): 65 if not isinstance(rev, int):
68 raise ValueError('expected int') 66 raise ValueError('expected int')
69 67
68
70 class simplestoreerror(error.StorageError): 69 class simplestoreerror(error.StorageError):
71 pass 70 pass
71
72 72
73 @interfaceutil.implementer(repository.irevisiondelta) 73 @interfaceutil.implementer(repository.irevisiondelta)
74 @attr.s(slots=True) 74 @attr.s(slots=True)
75 class simplestorerevisiondelta(object): 75 class simplestorerevisiondelta(object):
76 node = attr.ib() 76 node = attr.ib()
81 baserevisionsize = attr.ib() 81 baserevisionsize = attr.ib()
82 revision = attr.ib() 82 revision = attr.ib()
83 delta = attr.ib() 83 delta = attr.ib()
84 linknode = attr.ib(default=None) 84 linknode = attr.ib(default=None)
85 85
86
86 @interfaceutil.implementer(repository.iverifyproblem) 87 @interfaceutil.implementer(repository.iverifyproblem)
87 @attr.s(frozen=True) 88 @attr.s(frozen=True)
88 class simplefilestoreproblem(object): 89 class simplefilestoreproblem(object):
89 warning = attr.ib(default=None) 90 warning = attr.ib(default=None)
90 error = attr.ib(default=None) 91 error = attr.ib(default=None)
91 node = attr.ib(default=None) 92 node = attr.ib(default=None)
93
92 94
93 @interfaceutil.implementer(repository.ifilestorage) 95 @interfaceutil.implementer(repository.ifilestorage)
94 class filestorage(object): 96 class filestorage(object):
95 """Implements storage for a tracked path. 97 """Implements storage for a tracked path.
96 98
150 152
151 for i, entry in enumerate(self._indexdata): 153 for i, entry in enumerate(self._indexdata):
152 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node'])) 154 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
153 155
154 # start, length, rawsize, chainbase, linkrev, p1, p2, node 156 # start, length, rawsize, chainbase, linkrev, p1, p2, node
155 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, 157 self._index.append(
156 entry[b'node'])) 158 (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
159 )
157 160
158 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid)) 161 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
159 162
160 def __len__(self): 163 def __len__(self):
161 return len(self._indexdata) 164 return len(self._indexdata)
259 262
260 def _candelta(self, baserev, rev): 263 def _candelta(self, baserev, rev):
261 validaterev(baserev) 264 validaterev(baserev)
262 validaterev(rev) 265 validaterev(rev)
263 266
264 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS) 267 if (self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS) or (
265 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)): 268 self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS
269 ):
266 return False 270 return False
267 271
268 return True 272 return True
269 273
270 def checkhash(self, text, node, p1=None, p2=None, rev=None): 274 def checkhash(self, text, node, p1=None, p2=None, rev=None):
271 if p1 is None and p2 is None: 275 if p1 is None and p2 is None:
272 p1, p2 = self.parents(node) 276 p1, p2 = self.parents(node)
273 if node != storageutil.hashrevisionsha1(text, p1, p2): 277 if node != storageutil.hashrevisionsha1(text, p1, p2):
274 raise simplestoreerror(_("integrity check failed on %s") % 278 raise simplestoreerror(
275 self._path) 279 _("integrity check failed on %s") % self._path
280 )
276 281
277 def revision(self, nodeorrev, raw=False): 282 def revision(self, nodeorrev, raw=False):
278 if isinstance(nodeorrev, int): 283 if isinstance(nodeorrev, int):
279 node = self.node(nodeorrev) 284 node = self.node(nodeorrev)
280 else: 285 else:
311 316
312 if not revision.startswith(b'\1\n'): 317 if not revision.startswith(b'\1\n'):
313 return revision 318 return revision
314 319
315 start = revision.index(b'\1\n', 2) 320 start = revision.index(b'\1\n', 2)
316 return revision[start + 2:] 321 return revision[start + 2 :]
317 322
318 def renamed(self, node): 323 def renamed(self, node):
319 validatenode(node) 324 validatenode(node)
320 325
321 if self.parents(node)[0] != nullid: 326 if self.parents(node)[0] != nullid:
403 # recording. 408 # recording.
404 entries = [f for f in entries if not f.startswith('undo.backup.')] 409 entries = [f for f in entries if not f.startswith('undo.backup.')]
405 410
406 return [b'/'.join((self._storepath, f)) for f in entries] 411 return [b'/'.join((self._storepath, f)) for f in entries]
407 412
408 def storageinfo(self, exclusivefiles=False, sharedfiles=False, 413 def storageinfo(
409 revisionscount=False, trackedsize=False, 414 self,
410 storedsize=False): 415 exclusivefiles=False,
416 sharedfiles=False,
417 revisionscount=False,
418 trackedsize=False,
419 storedsize=False,
420 ):
411 # TODO do a real implementation of this 421 # TODO do a real implementation of this
412 return { 422 return {
413 'exclusivefiles': [], 423 'exclusivefiles': [],
414 'sharedfiles': [], 424 'sharedfiles': [],
415 'revisionscount': len(self), 425 'revisionscount': len(self),
423 node = self.node(rev) 433 node = self.node(rev)
424 try: 434 try:
425 self.revision(node) 435 self.revision(node)
426 except Exception as e: 436 except Exception as e:
427 yield simplefilestoreproblem( 437 yield simplefilestoreproblem(
428 error='unpacking %s: %s' % (node, e), 438 error='unpacking %s: %s' % (node, e), node=node
429 node=node) 439 )
430 state['skipread'].add(node) 440 state['skipread'].add(node)
431 441
432 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, 442 def emitrevisions(
433 assumehaveparentrevisions=False, 443 self,
434 deltamode=repository.CG_DELTAMODE_STD): 444 nodes,
445 nodesorder=None,
446 revisiondata=False,
447 assumehaveparentrevisions=False,
448 deltamode=repository.CG_DELTAMODE_STD,
449 ):
435 # TODO this will probably break on some ordering options. 450 # TODO this will probably break on some ordering options.
436 nodes = [n for n in nodes if n != nullid] 451 nodes = [n for n in nodes if n != nullid]
437 if not nodes: 452 if not nodes:
438 return 453 return
439 for delta in storageutil.emitrevisions( 454 for delta in storageutil.emitrevisions(
440 self, nodes, nodesorder, simplestorerevisiondelta, 455 self,
441 revisiondata=revisiondata, 456 nodes,
442 assumehaveparentrevisions=assumehaveparentrevisions, 457 nodesorder,
443 deltamode=deltamode): 458 simplestorerevisiondelta,
459 revisiondata=revisiondata,
460 assumehaveparentrevisions=assumehaveparentrevisions,
461 deltamode=deltamode,
462 ):
444 yield delta 463 yield delta
445 464
446 def add(self, text, meta, transaction, linkrev, p1, p2): 465 def add(self, text, meta, transaction, linkrev, p1, p2):
447 if meta or text.startswith(b'\1\n'): 466 if meta or text.startswith(b'\1\n'):
448 text = storageutil.packmeta(meta, text) 467 text = storageutil.packmeta(meta, text)
449 468
450 return self.addrevision(text, transaction, linkrev, p1, p2) 469 return self.addrevision(text, transaction, linkrev, p1, p2)
451 470
452 def addrevision(self, text, transaction, linkrev, p1, p2, node=None, 471 def addrevision(
453 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None): 472 self,
473 text,
474 transaction,
475 linkrev,
476 p1,
477 p2,
478 node=None,
479 flags=revlog.REVIDX_DEFAULT_FLAGS,
480 cachedelta=None,
481 ):
454 validatenode(p1) 482 validatenode(p1)
455 validatenode(p2) 483 validatenode(p2)
456 484
457 if flags: 485 if flags:
458 node = node or storageutil.hashrevisionsha1(text, p1, p2) 486 node = node or storageutil.hashrevisionsha1(text, p1, p2)
465 return node 493 return node
466 494
467 if validatehash: 495 if validatehash:
468 self.checkhash(rawtext, node, p1=p1, p2=p2) 496 self.checkhash(rawtext, node, p1=p1, p2=p2)
469 497
470 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2, 498 return self._addrawrevision(
471 flags) 499 node, rawtext, transaction, linkrev, p1, p2, flags
500 )
472 501
473 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags): 502 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
474 transaction.addbackup(self._indexpath) 503 transaction.addbackup(self._indexpath)
475 504
476 path = b'/'.join([self._storepath, hex(node)]) 505 path = b'/'.join([self._storepath, hex(node)])
477 506
478 self._svfs.write(path, rawtext) 507 self._svfs.write(path, rawtext)
479 508
480 self._indexdata.append({ 509 self._indexdata.append(
481 b'node': node, 510 {
482 b'p1': p1, 511 b'node': node,
483 b'p2': p2, 512 b'p1': p1,
484 b'linkrev': link, 513 b'p2': p2,
485 b'flags': flags, 514 b'linkrev': link,
486 }) 515 b'flags': flags,
516 }
517 )
487 518
488 self._reflectindexupdate() 519 self._reflectindexupdate()
489 520
490 return node 521 return node
491 522
492 def _reflectindexupdate(self): 523 def _reflectindexupdate(self):
493 self._refreshindex() 524 self._refreshindex()
494 self._svfs.write(self._indexpath, 525 self._svfs.write(
495 ''.join(cborutil.streamencode(self._indexdata))) 526 self._indexpath, ''.join(cborutil.streamencode(self._indexdata))
496 527 )
497 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None, 528
498 maybemissingparents=False): 529 def addgroup(
530 self,
531 deltas,
532 linkmapper,
533 transaction,
534 addrevisioncb=None,
535 maybemissingparents=False,
536 ):
499 if maybemissingparents: 537 if maybemissingparents:
500 raise error.Abort(_('simple store does not support missing parents ' 538 raise error.Abort(
501 'write mode')) 539 _('simple store does not support missing parents ' 'write mode')
540 )
502 541
503 nodes = [] 542 nodes = []
504 543
505 transaction.addbackup(self._indexpath) 544 transaction.addbackup(self._indexpath)
506 545
517 if deltabase == nullid: 556 if deltabase == nullid:
518 text = mdiff.patch(b'', delta) 557 text = mdiff.patch(b'', delta)
519 else: 558 else:
520 text = mdiff.patch(self.revision(deltabase), delta) 559 text = mdiff.patch(self.revision(deltabase), delta)
521 560
522 self._addrawrevision(node, text, transaction, linkrev, p1, p2, 561 self._addrawrevision(
523 flags) 562 node, text, transaction, linkrev, p1, p2, flags
563 )
524 564
525 if addrevisioncb: 565 if addrevisioncb:
526 addrevisioncb(self, node) 566 addrevisioncb(self, node)
527 return nodes 567 return nodes
528 568
533 for rev, entry in self._indexbyrev.items(): 573 for rev, entry in self._indexbyrev.items():
534 # Unset head flag for all seen parents. 574 # Unset head flag for all seen parents.
535 revishead[self.rev(entry[b'p1'])] = False 575 revishead[self.rev(entry[b'p1'])] = False
536 revishead[self.rev(entry[b'p2'])] = False 576 revishead[self.rev(entry[b'p2'])] = False
537 577
538 return [rev for rev, ishead in sorted(revishead.items()) 578 return [rev for rev, ishead in sorted(revishead.items()) if ishead]
539 if ishead]
540 579
541 def heads(self, start=None, stop=None): 580 def heads(self, start=None, stop=None):
542 # This is copied from revlog.py. 581 # This is copied from revlog.py.
543 if start is None and stop is None: 582 if start is None and stop is None:
544 if not len(self): 583 if not len(self):
582 c.append(self.node(r)) 621 c.append(self.node(r))
583 return c 622 return c
584 623
585 def getstrippoint(self, minlink): 624 def getstrippoint(self, minlink):
586 return storageutil.resolvestripinfo( 625 return storageutil.resolvestripinfo(
587 minlink, len(self) - 1, self._headrevs(), self.linkrev, 626 minlink,
588 self.parentrevs) 627 len(self) - 1,
628 self._headrevs(),
629 self.linkrev,
630 self.parentrevs,
631 )
589 632
590 def strip(self, minlink, transaction): 633 def strip(self, minlink, transaction):
591 if not len(self): 634 if not len(self):
592 return 635 return
593 636
597 640
598 # Purge index data starting at the requested revision. 641 # Purge index data starting at the requested revision.
599 self._indexdata[rev:] = [] 642 self._indexdata[rev:] = []
600 self._reflectindexupdate() 643 self._reflectindexupdate()
601 644
645
602 def issimplestorefile(f, kind, st): 646 def issimplestorefile(f, kind, st):
603 if kind != stat.S_IFREG: 647 if kind != stat.S_IFREG:
604 return False 648 return False
605 649
606 if store.isrevlog(f, kind, st): 650 if store.isrevlog(f, kind, st):
610 if f.startswith('undo.'): 654 if f.startswith('undo.'):
611 return False 655 return False
612 656
613 # Otherwise assume it belongs to the simple store. 657 # Otherwise assume it belongs to the simple store.
614 return True 658 return True
659
615 660
616 class simplestore(store.encodedstore): 661 class simplestore(store.encodedstore):
617 def datafiles(self): 662 def datafiles(self):
618 for x in super(simplestore, self).datafiles(): 663 for x in super(simplestore, self).datafiles():
619 yield x 664 yield x
627 except KeyError: 672 except KeyError:
628 unencoded = None 673 unencoded = None
629 674
630 yield unencoded, encoded, size 675 yield unencoded, encoded, size
631 676
677
632 def reposetup(ui, repo): 678 def reposetup(ui, repo):
633 if not repo.local(): 679 if not repo.local():
634 return 680 return
635 681
636 if isinstance(repo, bundlerepo.bundlerepository): 682 if isinstance(repo, bundlerepo.bundlerepository):
640 def file(self, f): 686 def file(self, f):
641 return filestorage(self.svfs, f) 687 return filestorage(self.svfs, f)
642 688
643 repo.__class__ = simplestorerepo 689 repo.__class__ = simplestorerepo
644 690
691
645 def featuresetup(ui, supported): 692 def featuresetup(ui, supported):
646 supported.add(REQUIREMENT) 693 supported.add(REQUIREMENT)
694
647 695
648 def newreporequirements(orig, ui, createopts): 696 def newreporequirements(orig, ui, createopts):
649 """Modifies default requirements for new repos to use the simple store.""" 697 """Modifies default requirements for new repos to use the simple store."""
650 requirements = orig(ui, createopts) 698 requirements = orig(ui, createopts)
651 699
652 # These requirements are only used to affect creation of the store 700 # These requirements are only used to affect creation of the store
653 # object. We have our own store. So we can remove them. 701 # object. We have our own store. So we can remove them.
654 # TODO do this once we feel like taking the test hit. 702 # TODO do this once we feel like taking the test hit.
655 #if 'fncache' in requirements: 703 # if 'fncache' in requirements:
656 # requirements.remove('fncache') 704 # requirements.remove('fncache')
657 #if 'dotencode' in requirements: 705 # if 'dotencode' in requirements:
658 # requirements.remove('dotencode') 706 # requirements.remove('dotencode')
659 707
660 requirements.add(REQUIREMENT) 708 requirements.add(REQUIREMENT)
661 709
662 return requirements 710 return requirements
711
663 712
664 def makestore(orig, requirements, path, vfstype): 713 def makestore(orig, requirements, path, vfstype):
665 if REQUIREMENT not in requirements: 714 if REQUIREMENT not in requirements:
666 return orig(requirements, path, vfstype) 715 return orig(requirements, path, vfstype)
667 716
668 return simplestore(path, vfstype) 717 return simplestore(path, vfstype)
669 718
719
670 def verifierinit(orig, self, *args, **kwargs): 720 def verifierinit(orig, self, *args, **kwargs):
671 orig(self, *args, **kwargs) 721 orig(self, *args, **kwargs)
672 722
673 # We don't care that files in the store don't align with what is 723 # We don't care that files in the store don't align with what is
674 # advertised. So suppress these warnings. 724 # advertised. So suppress these warnings.
675 self.warnorphanstorefiles = False 725 self.warnorphanstorefiles = False
676 726
727
677 def extsetup(ui): 728 def extsetup(ui):
678 localrepo.featuresetupfuncs.add(featuresetup) 729 localrepo.featuresetupfuncs.add(featuresetup)
679 730
680 extensions.wrapfunction(localrepo, 'newreporequirements', 731 extensions.wrapfunction(
681 newreporequirements) 732 localrepo, 'newreporequirements', newreporequirements
733 )
682 extensions.wrapfunction(localrepo, 'makestore', makestore) 734 extensions.wrapfunction(localrepo, 'makestore', makestore)
683 extensions.wrapfunction(verify.verifier, '__init__', verifierinit) 735 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)