comparison hgext/sqlitestore.py @ 43076:2372284d9457

formatting: blacken the codebase This is using my patch to black (https://github.com/psf/black/pull/826) so we don't un-wrap collection literals. Done with: hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S # skip-blame mass-reformatting only # no-check-commit reformats foo_bar functions Differential Revision: https://phab.mercurial-scm.org/D6971
author Augie Fackler <augie@google.com>
date Sun, 06 Oct 2019 09:45:02 -0400
parents 2c4f656c8e9f
children 687b865b95ad
comparison
equal deleted inserted replaced
43075:57875cf423c9 43076:2372284d9457
55 from mercurial.node import ( 55 from mercurial.node import (
56 nullid, 56 nullid,
57 nullrev, 57 nullrev,
58 short, 58 short,
59 ) 59 )
60 from mercurial.thirdparty import ( 60 from mercurial.thirdparty import attr
61 attr,
62 )
63 from mercurial import ( 61 from mercurial import (
64 ancestor, 62 ancestor,
65 dagop, 63 dagop,
66 encoding, 64 encoding,
67 error, 65 error,
75 ) 73 )
76 from mercurial.interfaces import ( 74 from mercurial.interfaces import (
77 repository, 75 repository,
78 util as interfaceutil, 76 util as interfaceutil,
79 ) 77 )
80 from mercurial.utils import ( 78 from mercurial.utils import storageutil
81 storageutil,
82 )
83 79
84 try: 80 try:
85 from mercurial import zstd 81 from mercurial import zstd
82
86 zstd.__version__ 83 zstd.__version__
87 except ImportError: 84 except ImportError:
88 zstd = None 85 zstd = None
89 86
90 configtable = {} 87 configtable = {}
91 configitem = registrar.configitem(configtable) 88 configitem = registrar.configitem(configtable)
92 89
93 # experimental config: storage.sqlite.compression 90 # experimental config: storage.sqlite.compression
94 configitem('storage', 'sqlite.compression', 91 configitem(
95 default='zstd' if zstd else 'zlib', 92 'storage',
96 experimental=True) 93 'sqlite.compression',
94 default='zstd' if zstd else 'zlib',
95 experimental=True,
96 )
97 97
98 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for 98 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
99 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should 99 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
100 # be specifying the version(s) of Mercurial they are tested with, or 100 # be specifying the version(s) of Mercurial they are tested with, or
101 # leave the attribute unspecified. 101 # leave the attribute unspecified.
119 119
120 CREATE_SCHEMA = [ 120 CREATE_SCHEMA = [
121 # Deltas are stored as content-indexed blobs. 121 # Deltas are stored as content-indexed blobs.
122 # compression column holds COMPRESSION_* constant for how the 122 # compression column holds COMPRESSION_* constant for how the
123 # delta is encoded. 123 # delta is encoded.
124
125 r'CREATE TABLE delta (' 124 r'CREATE TABLE delta ('
126 r' id INTEGER PRIMARY KEY, ' 125 r' id INTEGER PRIMARY KEY, '
127 r' compression INTEGER NOT NULL, ' 126 r' compression INTEGER NOT NULL, '
128 r' hash BLOB UNIQUE ON CONFLICT ABORT, ' 127 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
129 r' delta BLOB NOT NULL ' 128 r' delta BLOB NOT NULL '
130 r')', 129 r')',
131
132 # Tracked paths are denormalized to integers to avoid redundant 130 # Tracked paths are denormalized to integers to avoid redundant
133 # storage of the path name. 131 # storage of the path name.
134 r'CREATE TABLE filepath (' 132 r'CREATE TABLE filepath ('
135 r' id INTEGER PRIMARY KEY, ' 133 r' id INTEGER PRIMARY KEY, '
136 r' path BLOB NOT NULL ' 134 r' path BLOB NOT NULL '
137 r')', 135 r')',
138 136 r'CREATE UNIQUE INDEX filepath_path ' r' ON filepath (path)',
139 r'CREATE UNIQUE INDEX filepath_path '
140 r' ON filepath (path)',
141
142 # We have a single table for all file revision data. 137 # We have a single table for all file revision data.
143 # Each file revision is uniquely described by a (path, rev) and 138 # Each file revision is uniquely described by a (path, rev) and
144 # (path, node). 139 # (path, node).
145 # 140 #
146 # Revision data is stored as a pointer to the delta producing this 141 # Revision data is stored as a pointer to the delta producing this
160 r' flags INTEGER NOT NULL, ' 155 r' flags INTEGER NOT NULL, '
161 r' deltaid INTEGER REFERENCES delta(id), ' 156 r' deltaid INTEGER REFERENCES delta(id), '
162 r' deltabaseid INTEGER REFERENCES fileindex(id), ' 157 r' deltabaseid INTEGER REFERENCES fileindex(id), '
163 r' node BLOB NOT NULL ' 158 r' node BLOB NOT NULL '
164 r')', 159 r')',
165
166 r'CREATE UNIQUE INDEX fileindex_pathrevnum ' 160 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
167 r' ON fileindex (pathid, revnum)', 161 r' ON fileindex (pathid, revnum)',
168
169 r'CREATE UNIQUE INDEX fileindex_pathnode ' 162 r'CREATE UNIQUE INDEX fileindex_pathnode '
170 r' ON fileindex (pathid, node)', 163 r' ON fileindex (pathid, node)',
171
172 # Provide a view over all file data for convenience. 164 # Provide a view over all file data for convenience.
173 r'CREATE VIEW filedata AS ' 165 r'CREATE VIEW filedata AS '
174 r'SELECT ' 166 r'SELECT '
175 r' fileindex.id AS id, ' 167 r' fileindex.id AS id, '
176 r' filepath.id AS pathid, ' 168 r' filepath.id AS pathid, '
183 r' fileindex.flags AS flags, ' 175 r' fileindex.flags AS flags, '
184 r' fileindex.deltaid AS deltaid, ' 176 r' fileindex.deltaid AS deltaid, '
185 r' fileindex.deltabaseid AS deltabaseid ' 177 r' fileindex.deltabaseid AS deltabaseid '
186 r'FROM filepath, fileindex ' 178 r'FROM filepath, fileindex '
187 r'WHERE fileindex.pathid=filepath.id', 179 r'WHERE fileindex.pathid=filepath.id',
188
189 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION, 180 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
190 ] 181 ]
191 182
192 def resolvedeltachain(db, pathid, node, revisioncache, 183
193 stoprids, zstddctx=None): 184 def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
194 """Resolve a delta chain for a file node.""" 185 """Resolve a delta chain for a file node."""
195 186
196 # TODO the "not in ({stops})" here is possibly slowing down the query 187 # TODO the "not in ({stops})" here is possibly slowing down the query
197 # because it needs to perform the lookup on every recursive invocation. 188 # because it needs to perform the lookup on every recursive invocation.
198 # This could possibly be faster if we created a temporary query with 189 # This could possibly be faster if we created a temporary query with
212 r' AND fileindex.id NOT IN ({stops}) ' 203 r' AND fileindex.id NOT IN ({stops}) '
213 r' ) ' 204 r' ) '
214 r'SELECT deltachain.baseid, compression, delta ' 205 r'SELECT deltachain.baseid, compression, delta '
215 r'FROM deltachain, delta ' 206 r'FROM deltachain, delta '
216 r'WHERE delta.id=deltachain.deltaid'.format( 207 r'WHERE delta.id=deltachain.deltaid'.format(
217 stops=r','.join([r'?'] * len(stoprids))), 208 stops=r','.join([r'?'] * len(stoprids))
218 tuple([pathid, node] + list(stoprids.keys()))) 209 ),
210 tuple([pathid, node] + list(stoprids.keys())),
211 )
219 212
220 deltas = [] 213 deltas = []
221 lastdeltabaseid = None 214 lastdeltabaseid = None
222 215
223 for deltabaseid, compression, delta in res: 216 for deltabaseid, compression, delta in res:
228 elif compression == COMPRESSION_NONE: 221 elif compression == COMPRESSION_NONE:
229 delta = delta 222 delta = delta
230 elif compression == COMPRESSION_ZLIB: 223 elif compression == COMPRESSION_ZLIB:
231 delta = zlib.decompress(delta) 224 delta = zlib.decompress(delta)
232 else: 225 else:
233 raise SQLiteStoreError('unhandled compression type: %d' % 226 raise SQLiteStoreError(
234 compression) 227 'unhandled compression type: %d' % compression
228 )
235 229
236 deltas.append(delta) 230 deltas.append(delta)
237 231
238 if lastdeltabaseid in stoprids: 232 if lastdeltabaseid in stoprids:
239 basetext = revisioncache[stoprids[lastdeltabaseid]] 233 basetext = revisioncache[stoprids[lastdeltabaseid]]
249 if not isinstance(fulltext, bytes): 243 if not isinstance(fulltext, bytes):
250 fulltext = bytes(delta) 244 fulltext = bytes(delta)
251 245
252 return fulltext 246 return fulltext
253 247
248
254 def insertdelta(db, compression, hash, delta): 249 def insertdelta(db, compression, hash, delta):
255 try: 250 try:
256 return db.execute( 251 return db.execute(
257 r'INSERT INTO delta (compression, hash, delta) ' 252 r'INSERT INTO delta (compression, hash, delta) '
258 r'VALUES (?, ?, ?)', 253 r'VALUES (?, ?, ?)',
259 (compression, hash, delta)).lastrowid 254 (compression, hash, delta),
255 ).lastrowid
260 except sqlite3.IntegrityError: 256 except sqlite3.IntegrityError:
261 return db.execute( 257 return db.execute(
262 r'SELECT id FROM delta WHERE hash=?', 258 r'SELECT id FROM delta WHERE hash=?', (hash,)
263 (hash,)).fetchone()[0] 259 ).fetchone()[0]
260
264 261
265 class SQLiteStoreError(error.StorageError): 262 class SQLiteStoreError(error.StorageError):
266 pass 263 pass
264
267 265
268 @attr.s 266 @attr.s
269 class revisionentry(object): 267 class revisionentry(object):
270 rid = attr.ib() 268 rid = attr.ib()
271 rev = attr.ib() 269 rev = attr.ib()
274 p2rev = attr.ib() 272 p2rev = attr.ib()
275 p1node = attr.ib() 273 p1node = attr.ib()
276 p2node = attr.ib() 274 p2node = attr.ib()
277 linkrev = attr.ib() 275 linkrev = attr.ib()
278 flags = attr.ib() 276 flags = attr.ib()
277
279 278
280 @interfaceutil.implementer(repository.irevisiondelta) 279 @interfaceutil.implementer(repository.irevisiondelta)
281 @attr.s(slots=True) 280 @attr.s(slots=True)
282 class sqliterevisiondelta(object): 281 class sqliterevisiondelta(object):
283 node = attr.ib() 282 node = attr.ib()
288 baserevisionsize = attr.ib() 287 baserevisionsize = attr.ib()
289 revision = attr.ib() 288 revision = attr.ib()
290 delta = attr.ib() 289 delta = attr.ib()
291 linknode = attr.ib(default=None) 290 linknode = attr.ib(default=None)
292 291
292
293 @interfaceutil.implementer(repository.iverifyproblem) 293 @interfaceutil.implementer(repository.iverifyproblem)
294 @attr.s(frozen=True) 294 @attr.s(frozen=True)
295 class sqliteproblem(object): 295 class sqliteproblem(object):
296 warning = attr.ib(default=None) 296 warning = attr.ib(default=None)
297 error = attr.ib(default=None) 297 error = attr.ib(default=None)
298 node = attr.ib(default=None) 298 node = attr.ib(default=None)
299 299
300
300 @interfaceutil.implementer(repository.ifilestorage) 301 @interfaceutil.implementer(repository.ifilestorage)
301 class sqlitefilestore(object): 302 class sqlitefilestore(object):
302 """Implements storage for an individual tracked path.""" 303 """Implements storage for an individual tracked path."""
303 304
304 def __init__(self, db, path, compression): 305 def __init__(self, db, path, compression):
330 def _refreshindex(self): 331 def _refreshindex(self):
331 self._revtonode = {} 332 self._revtonode = {}
332 self._nodetorev = {} 333 self._nodetorev = {}
333 self._revisions = {} 334 self._revisions = {}
334 335
335 res = list(self._db.execute( 336 res = list(
336 r'SELECT id FROM filepath WHERE path=?', (self._path,))) 337 self._db.execute(
338 r'SELECT id FROM filepath WHERE path=?', (self._path,)
339 )
340 )
337 341
338 if not res: 342 if not res:
339 self._pathid = None 343 self._pathid = None
340 return 344 return
341 345
344 res = self._db.execute( 348 res = self._db.execute(
345 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags ' 349 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
346 r'FROM fileindex ' 350 r'FROM fileindex '
347 r'WHERE pathid=? ' 351 r'WHERE pathid=? '
348 r'ORDER BY revnum ASC', 352 r'ORDER BY revnum ASC',
349 (self._pathid,)) 353 (self._pathid,),
354 )
350 355
351 for i, row in enumerate(res): 356 for i, row in enumerate(res):
352 rid, rev, node, p1rev, p2rev, linkrev, flags = row 357 rid, rev, node, p1rev, p2rev, linkrev, flags = row
353 358
354 if i != rev: 359 if i != rev:
355 raise SQLiteStoreError(_('sqlite database has inconsistent ' 360 raise SQLiteStoreError(
356 'revision numbers')) 361 _('sqlite database has inconsistent ' 'revision numbers')
362 )
357 363
358 if p1rev == nullrev: 364 if p1rev == nullrev:
359 p1node = nullid 365 p1node = nullid
360 else: 366 else:
361 p1node = self._revtonode[p1rev] 367 p1node = self._revtonode[p1rev]
372 p1rev=p1rev, 378 p1rev=p1rev,
373 p2rev=p2rev, 379 p2rev=p2rev,
374 p1node=p1node, 380 p1node=p1node,
375 p2node=p2node, 381 p2node=p2node,
376 linkrev=linkrev, 382 linkrev=linkrev,
377 flags=flags) 383 flags=flags,
384 )
378 385
379 self._revtonode[rev] = node 386 self._revtonode[rev] = node
380 self._nodetorev[node] = rev 387 self._nodetorev[node] = rev
381 self._revisions[node] = entry 388 self._revisions[node] = entry
382 389
393 return False 400 return False
394 401
395 return node in self._nodetorev 402 return node in self._nodetorev
396 403
397 def revs(self, start=0, stop=None): 404 def revs(self, start=0, stop=None):
398 return storageutil.iterrevs(len(self._revisions), start=start, 405 return storageutil.iterrevs(
399 stop=stop) 406 len(self._revisions), start=start, stop=stop
407 )
400 408
401 def parents(self, node): 409 def parents(self, node):
402 if node == nullid: 410 if node == nullid:
403 return nullid, nullid 411 return nullid, nullid
404 412
476 return [nullid] 484 return [nullid]
477 485
478 startrev = self.rev(start) if start is not None else nullrev 486 startrev = self.rev(start) if start is not None else nullrev
479 stoprevs = {self.rev(n) for n in stop or []} 487 stoprevs = {self.rev(n) for n in stop or []}
480 488
481 revs = dagop.headrevssubset(self.revs, self.parentrevs, 489 revs = dagop.headrevssubset(
482 startrev=startrev, stoprevs=stoprevs) 490 self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
491 )
483 492
484 return [self.node(rev) for rev in revs] 493 return [self.node(rev) for rev in revs]
485 494
486 def children(self, node): 495 def children(self, node):
487 rev = self.rev(node) 496 rev = self.rev(node)
490 r'SELECT' 499 r'SELECT'
491 r' node ' 500 r' node '
492 r' FROM filedata ' 501 r' FROM filedata '
493 r' WHERE path=? AND (p1rev=? OR p2rev=?) ' 502 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
494 r' ORDER BY revnum ASC', 503 r' ORDER BY revnum ASC',
495 (self._path, rev, rev)) 504 (self._path, rev, rev),
505 )
496 506
497 return [row[0] for row in res] 507 return [row[0] for row in res]
498 508
499 # End of ifileindex interface. 509 # End of ifileindex interface.
500 510
529 539
530 # Because we have a fulltext revision cache, we are able to 540 # Because we have a fulltext revision cache, we are able to
531 # short-circuit delta chain traversal and decompression as soon as 541 # short-circuit delta chain traversal and decompression as soon as
532 # we encounter a revision in the cache. 542 # we encounter a revision in the cache.
533 543
534 stoprids = {self._revisions[n].rid: n 544 stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
535 for n in self._revisioncache}
536 545
537 if not stoprids: 546 if not stoprids:
538 stoprids[-1] = None 547 stoprids[-1] = None
539 548
540 fulltext = resolvedeltachain(self._db, self._pathid, node, 549 fulltext = resolvedeltachain(
541 self._revisioncache, stoprids, 550 self._db,
542 zstddctx=self._dctx) 551 self._pathid,
552 node,
553 self._revisioncache,
554 stoprids,
555 zstddctx=self._dctx,
556 )
543 557
544 # Don't verify hashes if parent nodes were rewritten, as the hash 558 # Don't verify hashes if parent nodes were rewritten, as the hash
545 # wouldn't verify. 559 # wouldn't verify.
546 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2): 560 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
547 _verifyhash = False 561 _verifyhash = False
562 return storageutil.filerevisioncopied(self, node) 576 return storageutil.filerevisioncopied(self, node)
563 577
564 def cmp(self, node, fulltext): 578 def cmp(self, node, fulltext):
565 return not storageutil.filedataequivalent(self, node, fulltext) 579 return not storageutil.filedataequivalent(self, node, fulltext)
566 580
567 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, 581 def emitrevisions(
568 assumehaveparentrevisions=False, 582 self,
569 deltamode=repository.CG_DELTAMODE_STD): 583 nodes,
584 nodesorder=None,
585 revisiondata=False,
586 assumehaveparentrevisions=False,
587 deltamode=repository.CG_DELTAMODE_STD,
588 ):
570 if nodesorder not in ('nodes', 'storage', 'linear', None): 589 if nodesorder not in ('nodes', 'storage', 'linear', None):
571 raise error.ProgrammingError('unhandled value for nodesorder: %s' % 590 raise error.ProgrammingError(
572 nodesorder) 591 'unhandled value for nodesorder: %s' % nodesorder
592 )
573 593
574 nodes = [n for n in nodes if n != nullid] 594 nodes = [n for n in nodes if n != nullid]
575 595
576 if not nodes: 596 if not nodes:
577 return 597 return
579 # TODO perform in a single query. 599 # TODO perform in a single query.
580 res = self._db.execute( 600 res = self._db.execute(
581 r'SELECT revnum, deltaid FROM fileindex ' 601 r'SELECT revnum, deltaid FROM fileindex '
582 r'WHERE pathid=? ' 602 r'WHERE pathid=? '
583 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))), 603 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
584 tuple([self._pathid] + nodes)) 604 tuple([self._pathid] + nodes),
605 )
585 606
586 deltabases = {} 607 deltabases = {}
587 608
588 for rev, deltaid in res: 609 for rev, deltaid in res:
589 res = self._db.execute( 610 res = self._db.execute(
590 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?', 611 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
591 (self._pathid, deltaid)) 612 (self._pathid, deltaid),
613 )
592 deltabases[rev] = res.fetchone()[0] 614 deltabases[rev] = res.fetchone()[0]
593 615
594 # TODO define revdifffn so we can use delta from storage. 616 # TODO define revdifffn so we can use delta from storage.
595 for delta in storageutil.emitrevisions( 617 for delta in storageutil.emitrevisions(
596 self, nodes, nodesorder, sqliterevisiondelta, 618 self,
619 nodes,
620 nodesorder,
621 sqliterevisiondelta,
597 deltaparentfn=deltabases.__getitem__, 622 deltaparentfn=deltabases.__getitem__,
598 revisiondata=revisiondata, 623 revisiondata=revisiondata,
599 assumehaveparentrevisions=assumehaveparentrevisions, 624 assumehaveparentrevisions=assumehaveparentrevisions,
600 deltamode=deltamode): 625 deltamode=deltamode,
626 ):
601 627
602 yield delta 628 yield delta
603 629
604 # End of ifiledata interface. 630 # End of ifiledata interface.
605 631
609 if meta or filedata.startswith(b'\x01\n'): 635 if meta or filedata.startswith(b'\x01\n'):
610 filedata = storageutil.packmeta(meta, filedata) 636 filedata = storageutil.packmeta(meta, filedata)
611 637
612 return self.addrevision(filedata, transaction, linkrev, p1, p2) 638 return self.addrevision(filedata, transaction, linkrev, p1, p2)
613 639
614 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None, 640 def addrevision(
615 flags=0, cachedelta=None): 641 self,
642 revisiondata,
643 transaction,
644 linkrev,
645 p1,
646 p2,
647 node=None,
648 flags=0,
649 cachedelta=None,
650 ):
616 if flags: 651 if flags:
617 raise SQLiteStoreError(_('flags not supported on revisions')) 652 raise SQLiteStoreError(_('flags not supported on revisions'))
618 653
619 validatehash = node is not None 654 validatehash = node is not None
620 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2) 655 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
623 self._checkhash(revisiondata, node, p1, p2) 658 self._checkhash(revisiondata, node, p1, p2)
624 659
625 if node in self._nodetorev: 660 if node in self._nodetorev:
626 return node 661 return node
627 662
628 node = self._addrawrevision(node, revisiondata, transaction, linkrev, 663 node = self._addrawrevision(
629 p1, p2) 664 node, revisiondata, transaction, linkrev, p1, p2
665 )
630 666
631 self._revisioncache[node] = revisiondata 667 self._revisioncache[node] = revisiondata
632 return node 668 return node
633 669
634 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None, 670 def addgroup(
635 maybemissingparents=False): 671 self,
672 deltas,
673 linkmapper,
674 transaction,
675 addrevisioncb=None,
676 maybemissingparents=False,
677 ):
636 nodes = [] 678 nodes = []
637 679
638 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas: 680 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
639 storeflags = 0 681 storeflags = 0
640 682
661 hlen = struct.calcsize('>lll') 703 hlen = struct.calcsize('>lll')
662 oldlen = len(self.rawdata(deltabase, _verifyhash=False)) 704 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
663 newlen = len(delta) - hlen 705 newlen = len(delta) - hlen
664 706
665 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): 707 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
666 raise error.CensoredBaseError(self._path, 708 raise error.CensoredBaseError(self._path, deltabase)
667 deltabase) 709
668 710 if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
669 if (not (storeflags & FLAG_CENSORED) 711 delta, baserev, lambda x: len(self.rawdata(x))
670 and storageutil.deltaiscensored( 712 ):
671 delta, baserev, lambda x: len(self.rawdata(x)))):
672 storeflags |= FLAG_CENSORED 713 storeflags |= FLAG_CENSORED
673 714
674 linkrev = linkmapper(linknode) 715 linkrev = linkmapper(linknode)
675 716
676 nodes.append(node) 717 nodes.append(node)
683 entry.p1node = p1 724 entry.p1node = p1
684 entry.p1rev = self._nodetorev[p1] 725 entry.p1rev = self._nodetorev[p1]
685 entry.flags &= ~FLAG_MISSING_P1 726 entry.flags &= ~FLAG_MISSING_P1
686 727
687 self._db.execute( 728 self._db.execute(
688 r'UPDATE fileindex SET p1rev=?, flags=? ' 729 r'UPDATE fileindex SET p1rev=?, flags=? ' r'WHERE id=?',
689 r'WHERE id=?', 730 (self._nodetorev[p1], entry.flags, entry.rid),
690 (self._nodetorev[p1], entry.flags, entry.rid)) 731 )
691 732
692 if entry.flags & FLAG_MISSING_P2 and p2 != nullid: 733 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
693 entry.p2node = p2 734 entry.p2node = p2
694 entry.p2rev = self._nodetorev[p2] 735 entry.p2rev = self._nodetorev[p2]
695 entry.flags &= ~FLAG_MISSING_P2 736 entry.flags &= ~FLAG_MISSING_P2
696 737
697 self._db.execute( 738 self._db.execute(
698 r'UPDATE fileindex SET p2rev=?, flags=? ' 739 r'UPDATE fileindex SET p2rev=?, flags=? ' r'WHERE id=?',
699 r'WHERE id=?', 740 (self._nodetorev[p1], entry.flags, entry.rid),
700 (self._nodetorev[p1], entry.flags, entry.rid)) 741 )
701 742
702 continue 743 continue
703 744
704 if deltabase == nullid: 745 if deltabase == nullid:
705 text = mdiff.patch(b'', delta) 746 text = mdiff.patch(b'', delta)
706 storedelta = None 747 storedelta = None
707 else: 748 else:
708 text = None 749 text = None
709 storedelta = (deltabase, delta) 750 storedelta = (deltabase, delta)
710 751
711 self._addrawrevision(node, text, transaction, linkrev, p1, p2, 752 self._addrawrevision(
712 storedelta=storedelta, flags=storeflags) 753 node,
754 text,
755 transaction,
756 linkrev,
757 p1,
758 p2,
759 storedelta=storedelta,
760 flags=storeflags,
761 )
713 762
714 if addrevisioncb: 763 if addrevisioncb:
715 addrevisioncb(self, node) 764 addrevisioncb(self, node)
716 765
717 return nodes 766 return nodes
720 tombstone = storageutil.packmeta({b'censored': tombstone}, b'') 769 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
721 770
722 # This restriction is cargo culted from revlogs and makes no sense for 771 # This restriction is cargo culted from revlogs and makes no sense for
723 # SQLite, since columns can be resized at will. 772 # SQLite, since columns can be resized at will.
724 if len(tombstone) > len(self.rawdata(censornode)): 773 if len(tombstone) > len(self.rawdata(censornode)):
725 raise error.Abort(_('censor tombstone must be no longer than ' 774 raise error.Abort(
726 'censored data')) 775 _('censor tombstone must be no longer than ' 'censored data')
776 )
727 777
728 # We need to replace the censored revision's data with the tombstone. 778 # We need to replace the censored revision's data with the tombstone.
729 # But replacing that data will have implications for delta chains that 779 # But replacing that data will have implications for delta chains that
730 # reference it. 780 # reference it.
731 # 781 #
736 # revision and insert a replacement. 786 # revision and insert a replacement.
737 787
738 # Find the delta to be censored. 788 # Find the delta to be censored.
739 censoreddeltaid = self._db.execute( 789 censoreddeltaid = self._db.execute(
740 r'SELECT deltaid FROM fileindex WHERE id=?', 790 r'SELECT deltaid FROM fileindex WHERE id=?',
741 (self._revisions[censornode].rid,)).fetchone()[0] 791 (self._revisions[censornode].rid,),
792 ).fetchone()[0]
742 793
743 # Find all its delta chain children. 794 # Find all its delta chain children.
744 # TODO once we support storing deltas for !files, we'll need to look 795 # TODO once we support storing deltas for !files, we'll need to look
745 # for those delta chains too. 796 # for those delta chains too.
746 rows = list(self._db.execute( 797 rows = list(
747 r'SELECT id, pathid, node FROM fileindex ' 798 self._db.execute(
748 r'WHERE deltabaseid=? OR deltaid=?', 799 r'SELECT id, pathid, node FROM fileindex '
749 (censoreddeltaid, censoreddeltaid))) 800 r'WHERE deltabaseid=? OR deltaid=?',
801 (censoreddeltaid, censoreddeltaid),
802 )
803 )
750 804
751 for row in rows: 805 for row in rows:
752 rid, pathid, node = row 806 rid, pathid, node = row
753 807
754 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None}, 808 fulltext = resolvedeltachain(
755 zstddctx=self._dctx) 809 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
810 )
756 811
757 deltahash = hashlib.sha1(fulltext).digest() 812 deltahash = hashlib.sha1(fulltext).digest()
758 813
759 if self._compengine == 'zstd': 814 if self._compengine == 'zstd':
760 deltablob = self._cctx.compress(fulltext) 815 deltablob = self._cctx.compress(fulltext)
764 compression = COMPRESSION_ZLIB 819 compression = COMPRESSION_ZLIB
765 elif self._compengine == 'none': 820 elif self._compengine == 'none':
766 deltablob = fulltext 821 deltablob = fulltext
767 compression = COMPRESSION_NONE 822 compression = COMPRESSION_NONE
768 else: 823 else:
769 raise error.ProgrammingError('unhandled compression engine: %s' 824 raise error.ProgrammingError(
770 % self._compengine) 825 'unhandled compression engine: %s' % self._compengine
826 )
771 827
772 if len(deltablob) >= len(fulltext): 828 if len(deltablob) >= len(fulltext):
773 deltablob = fulltext 829 deltablob = fulltext
774 compression = COMPRESSION_NONE 830 compression = COMPRESSION_NONE
775 831
776 deltaid = insertdelta(self._db, compression, deltahash, deltablob) 832 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
777 833
778 self._db.execute( 834 self._db.execute(
779 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL ' 835 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
780 r'WHERE id=?', (deltaid, rid)) 836 r'WHERE id=?',
837 (deltaid, rid),
838 )
781 839
782 # Now create the tombstone delta and replace the delta on the censored 840 # Now create the tombstone delta and replace the delta on the censored
783 # node. 841 # node.
784 deltahash = hashlib.sha1(tombstone).digest() 842 deltahash = hashlib.sha1(tombstone).digest()
785 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE, 843 tombstonedeltaid = insertdelta(
786 deltahash, tombstone) 844 self._db, COMPRESSION_NONE, deltahash, tombstone
845 )
787 846
788 flags = self._revisions[censornode].flags 847 flags = self._revisions[censornode].flags
789 flags |= FLAG_CENSORED 848 flags |= FLAG_CENSORED
790 849
791 self._db.execute( 850 self._db.execute(
792 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL ' 851 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
793 r'WHERE pathid=? AND node=?', 852 r'WHERE pathid=? AND node=?',
794 (flags, tombstonedeltaid, self._pathid, censornode)) 853 (flags, tombstonedeltaid, self._pathid, censornode),
795 854 )
796 self._db.execute( 855
797 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,)) 856 self._db.execute(r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
798 857
799 self._refreshindex() 858 self._refreshindex()
800 self._revisioncache.clear() 859 self._revisioncache.clear()
801 860
802 def getstrippoint(self, minlink): 861 def getstrippoint(self, minlink):
803 return storageutil.resolvestripinfo(minlink, len(self) - 1, 862 return storageutil.resolvestripinfo(
804 [self.rev(n) for n in self.heads()], 863 minlink,
805 self.linkrev, 864 len(self) - 1,
806 self.parentrevs) 865 [self.rev(n) for n in self.heads()],
866 self.linkrev,
867 self.parentrevs,
868 )
807 869
808 def strip(self, minlink, transaction): 870 def strip(self, minlink, transaction):
809 if not len(self): 871 if not len(self):
810 return 872 return
811 873
815 return 877 return
816 878
817 for rev in self.revs(rev): 879 for rev in self.revs(rev):
818 self._db.execute( 880 self._db.execute(
819 r'DELETE FROM fileindex WHERE pathid=? AND node=?', 881 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
820 (self._pathid, self.node(rev))) 882 (self._pathid, self.node(rev)),
883 )
821 884
822 # TODO how should we garbage collect data in delta table? 885 # TODO how should we garbage collect data in delta table?
823 886
824 self._refreshindex() 887 self._refreshindex()
825 888
828 # Start of ifilestorage interface. 891 # Start of ifilestorage interface.
829 892
830 def files(self): 893 def files(self):
831 return [] 894 return []
832 895
833 def storageinfo(self, exclusivefiles=False, sharedfiles=False, 896 def storageinfo(
834 revisionscount=False, trackedsize=False, 897 self,
835 storedsize=False): 898 exclusivefiles=False,
899 sharedfiles=False,
900 revisionscount=False,
901 trackedsize=False,
902 storedsize=False,
903 ):
836 d = {} 904 d = {}
837 905
838 if exclusivefiles: 906 if exclusivefiles:
839 d['exclusivefiles'] = [] 907 d['exclusivefiles'] = []
840 908
844 912
845 if revisionscount: 913 if revisionscount:
846 d['revisionscount'] = len(self) 914 d['revisionscount'] = len(self)
847 915
848 if trackedsize: 916 if trackedsize:
849 d['trackedsize'] = sum(len(self.revision(node)) 917 d['trackedsize'] = sum(
850 for node in self._nodetorev) 918 len(self.revision(node)) for node in self._nodetorev
919 )
851 920
852 if storedsize: 921 if storedsize:
853 # TODO implement this? 922 # TODO implement this?
854 d['storedsize'] = None 923 d['storedsize'] = None
855 924
863 932
864 try: 933 try:
865 self.revision(node) 934 self.revision(node)
866 except Exception as e: 935 except Exception as e:
867 yield sqliteproblem( 936 yield sqliteproblem(
868 error=_('unpacking %s: %s') % (short(node), e), 937 error=_('unpacking %s: %s') % (short(node), e), node=node
869 node=node) 938 )
870 939
871 state['skipread'].add(node) 940 state['skipread'].add(node)
872 941
873 # End of ifilestorage interface. 942 # End of ifilestorage interface.
874 943
885 pass 954 pass
886 955
887 if storageutil.iscensoredtext(fulltext): 956 if storageutil.iscensoredtext(fulltext):
888 raise error.CensoredNodeError(self._path, node, fulltext) 957 raise error.CensoredNodeError(self._path, node, fulltext)
889 958
890 raise SQLiteStoreError(_('integrity check failed on %s') % 959 raise SQLiteStoreError(_('integrity check failed on %s') % self._path)
891 self._path) 960
892 961 def _addrawrevision(
893 def _addrawrevision(self, node, revisiondata, transaction, linkrev, 962 self,
894 p1, p2, storedelta=None, flags=0): 963 node,
964 revisiondata,
965 transaction,
966 linkrev,
967 p1,
968 p2,
969 storedelta=None,
970 flags=0,
971 ):
895 if self._pathid is None: 972 if self._pathid is None:
896 res = self._db.execute( 973 res = self._db.execute(
897 r'INSERT INTO filepath (path) VALUES (?)', (self._path,)) 974 r'INSERT INTO filepath (path) VALUES (?)', (self._path,)
975 )
898 self._pathid = res.lastrowid 976 self._pathid = res.lastrowid
899 977
900 # For simplicity, always store a delta against p1. 978 # For simplicity, always store a delta against p1.
901 # TODO we need a lot more logic here to make behavior reasonable. 979 # TODO we need a lot more logic here to make behavior reasonable.
902 980
911 deltabase = p1 989 deltabase = p1
912 990
913 if deltabase == nullid: 991 if deltabase == nullid:
914 delta = revisiondata 992 delta = revisiondata
915 else: 993 else:
916 delta = mdiff.textdiff(self.revision(self.rev(deltabase)), 994 delta = mdiff.textdiff(
917 revisiondata) 995 self.revision(self.rev(deltabase)), revisiondata
996 )
918 997
919 # File index stores a pointer to its delta and the parent delta. 998 # File index stores a pointer to its delta and the parent delta.
920 # The parent delta is stored via a pointer to the fileindex PK. 999 # The parent delta is stored via a pointer to the fileindex PK.
921 if deltabase == nullid: 1000 if deltabase == nullid:
922 baseid = None 1001 baseid = None
937 compression = COMPRESSION_ZLIB 1016 compression = COMPRESSION_ZLIB
938 elif self._compengine == 'none': 1017 elif self._compengine == 'none':
939 deltablob = delta 1018 deltablob = delta
940 compression = COMPRESSION_NONE 1019 compression = COMPRESSION_NONE
941 else: 1020 else:
942 raise error.ProgrammingError('unhandled compression engine: %s' % 1021 raise error.ProgrammingError(
943 self._compengine) 1022 'unhandled compression engine: %s' % self._compengine
1023 )
944 1024
945 # Don't store compressed data if it isn't practical. 1025 # Don't store compressed data if it isn't practical.
946 if len(deltablob) >= len(delta): 1026 if len(deltablob) >= len(delta):
947 deltablob = delta 1027 deltablob = delta
948 compression = COMPRESSION_NONE 1028 compression = COMPRESSION_NONE
964 rid = self._db.execute( 1044 rid = self._db.execute(
965 r'INSERT INTO fileindex (' 1045 r'INSERT INTO fileindex ('
966 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, ' 1046 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
967 r' deltaid, deltabaseid) ' 1047 r' deltaid, deltabaseid) '
968 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', 1048 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
969 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags, 1049 (
970 deltaid, baseid) 1050 self._pathid,
1051 rev,
1052 node,
1053 p1rev,
1054 p2rev,
1055 linkrev,
1056 flags,
1057 deltaid,
1058 baseid,
1059 ),
971 ).lastrowid 1060 ).lastrowid
972 1061
973 entry = revisionentry( 1062 entry = revisionentry(
974 rid=rid, 1063 rid=rid,
975 rev=rev, 1064 rev=rev,
977 p1rev=p1rev, 1066 p1rev=p1rev,
978 p2rev=p2rev, 1067 p2rev=p2rev,
979 p1node=p1, 1068 p1node=p1,
980 p2node=p2, 1069 p2node=p2,
981 linkrev=linkrev, 1070 linkrev=linkrev,
982 flags=flags) 1071 flags=flags,
1072 )
983 1073
984 self._nodetorev[node] = rev 1074 self._nodetorev[node] = rev
985 self._revtonode[rev] = node 1075 self._revtonode[rev] = node
986 self._revisions[node] = entry 1076 self._revisions[node] = entry
987 1077
988 return node 1078 return node
1079
989 1080
990 class sqliterepository(localrepo.localrepository): 1081 class sqliterepository(localrepo.localrepository):
991 def cancopy(self): 1082 def cancopy(self):
992 return False 1083 return False
993 1084
1022 db = makedb(self.svfs.join('db.sqlite')) 1113 db = makedb(self.svfs.join('db.sqlite'))
1023 self._db = (tid, db) 1114 self._db = (tid, db)
1024 1115
1025 return db 1116 return db
1026 1117
1118
1027 def makedb(path): 1119 def makedb(path):
1028 """Construct a database handle for a database at path.""" 1120 """Construct a database handle for a database at path."""
1029 1121
1030 db = sqlite3.connect(encoding.strfromlocal(path)) 1122 db = sqlite3.connect(encoding.strfromlocal(path))
1031 db.text_factory = bytes 1123 db.text_factory = bytes
1047 1139
1048 db.execute(r'PRAGMA journal_mode=WAL') 1140 db.execute(r'PRAGMA journal_mode=WAL')
1049 1141
1050 return db 1142 return db
1051 1143
1144
1052 def featuresetup(ui, supported): 1145 def featuresetup(ui, supported):
1053 supported.add(REQUIREMENT) 1146 supported.add(REQUIREMENT)
1054 1147
1055 if zstd: 1148 if zstd:
1056 supported.add(REQUIREMENT_ZSTD) 1149 supported.add(REQUIREMENT_ZSTD)
1058 supported.add(REQUIREMENT_ZLIB) 1151 supported.add(REQUIREMENT_ZLIB)
1059 supported.add(REQUIREMENT_NONE) 1152 supported.add(REQUIREMENT_NONE)
1060 supported.add(REQUIREMENT_SHALLOW_FILES) 1153 supported.add(REQUIREMENT_SHALLOW_FILES)
1061 supported.add(repository.NARROW_REQUIREMENT) 1154 supported.add(repository.NARROW_REQUIREMENT)
1062 1155
1156
1063 def newreporequirements(orig, ui, createopts): 1157 def newreporequirements(orig, ui, createopts):
1064 if createopts['backend'] != 'sqlite': 1158 if createopts['backend'] != 'sqlite':
1065 return orig(ui, createopts) 1159 return orig(ui, createopts)
1066 1160
1067 # This restriction can be lifted once we have more confidence. 1161 # This restriction can be lifted once we have more confidence.
1068 if 'sharedrepo' in createopts: 1162 if 'sharedrepo' in createopts:
1069 raise error.Abort(_('shared repositories not supported with SQLite ' 1163 raise error.Abort(
1070 'store')) 1164 _('shared repositories not supported with SQLite ' 'store')
1165 )
1071 1166
1072 # This filtering is out of an abundance of caution: we want to ensure 1167 # This filtering is out of an abundance of caution: we want to ensure
1073 # we honor creation options and we do that by annotating exactly the 1168 # we honor creation options and we do that by annotating exactly the
1074 # creation options we recognize. 1169 # creation options we recognize.
1075 known = { 1170 known = {
1078 'shallowfilestore', 1173 'shallowfilestore',
1079 } 1174 }
1080 1175
1081 unsupported = set(createopts) - known 1176 unsupported = set(createopts) - known
1082 if unsupported: 1177 if unsupported:
1083 raise error.Abort(_('SQLite store does not support repo creation ' 1178 raise error.Abort(
1084 'option: %s') % ', '.join(sorted(unsupported))) 1179 _('SQLite store does not support repo creation ' 'option: %s')
1180 % ', '.join(sorted(unsupported))
1181 )
1085 1182
1086 # Since we're a hybrid store that still relies on revlogs, we fall back 1183 # Since we're a hybrid store that still relies on revlogs, we fall back
1087 # to using the revlogv1 backend's storage requirements then adding our 1184 # to using the revlogv1 backend's storage requirements then adding our
1088 # own requirement. 1185 # own requirement.
1089 createopts['backend'] = 'revlogv1' 1186 createopts['backend'] = 'revlogv1'
1091 requirements.add(REQUIREMENT) 1188 requirements.add(REQUIREMENT)
1092 1189
1093 compression = ui.config('storage', 'sqlite.compression') 1190 compression = ui.config('storage', 'sqlite.compression')
1094 1191
1095 if compression == 'zstd' and not zstd: 1192 if compression == 'zstd' and not zstd:
1096 raise error.Abort(_('storage.sqlite.compression set to "zstd" but ' 1193 raise error.Abort(
1097 'zstandard compression not available to this ' 1194 _(
1098 'Mercurial install')) 1195 'storage.sqlite.compression set to "zstd" but '
1196 'zstandard compression not available to this '
1197 'Mercurial install'
1198 )
1199 )
1099 1200
1100 if compression == 'zstd': 1201 if compression == 'zstd':
1101 requirements.add(REQUIREMENT_ZSTD) 1202 requirements.add(REQUIREMENT_ZSTD)
1102 elif compression == 'zlib': 1203 elif compression == 'zlib':
1103 requirements.add(REQUIREMENT_ZLIB) 1204 requirements.add(REQUIREMENT_ZLIB)
1104 elif compression == 'none': 1205 elif compression == 'none':
1105 requirements.add(REQUIREMENT_NONE) 1206 requirements.add(REQUIREMENT_NONE)
1106 else: 1207 else:
1107 raise error.Abort(_('unknown compression engine defined in ' 1208 raise error.Abort(
1108 'storage.sqlite.compression: %s') % compression) 1209 _(
1210 'unknown compression engine defined in '
1211 'storage.sqlite.compression: %s'
1212 )
1213 % compression
1214 )
1109 1215
1110 if createopts.get('shallowfilestore'): 1216 if createopts.get('shallowfilestore'):
1111 requirements.add(REQUIREMENT_SHALLOW_FILES) 1217 requirements.add(REQUIREMENT_SHALLOW_FILES)
1112 1218
1113 return requirements 1219 return requirements
1220
1114 1221
1115 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) 1222 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1116 class sqlitefilestorage(object): 1223 class sqlitefilestorage(object):
1117 """Repository file storage backed by SQLite.""" 1224 """Repository file storage backed by SQLite."""
1225
1118 def file(self, path): 1226 def file(self, path):
1119 if path[0] == b'/': 1227 if path[0] == b'/':
1120 path = path[1:] 1228 path = path[1:]
1121 1229
1122 if REQUIREMENT_ZSTD in self.requirements: 1230 if REQUIREMENT_ZSTD in self.requirements:
1124 elif REQUIREMENT_ZLIB in self.requirements: 1232 elif REQUIREMENT_ZLIB in self.requirements:
1125 compression = 'zlib' 1233 compression = 'zlib'
1126 elif REQUIREMENT_NONE in self.requirements: 1234 elif REQUIREMENT_NONE in self.requirements:
1127 compression = 'none' 1235 compression = 'none'
1128 else: 1236 else:
1129 raise error.Abort(_('unable to determine what compression engine ' 1237 raise error.Abort(
1130 'to use for SQLite storage')) 1238 _(
1239 'unable to determine what compression engine '
1240 'to use for SQLite storage'
1241 )
1242 )
1131 1243
1132 return sqlitefilestore(self._dbconn, path, compression) 1244 return sqlitefilestore(self._dbconn, path, compression)
1245
1133 1246
1134 def makefilestorage(orig, requirements, features, **kwargs): 1247 def makefilestorage(orig, requirements, features, **kwargs):
1135 """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" 1248 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1136 if REQUIREMENT in requirements: 1249 if REQUIREMENT in requirements:
1137 if REQUIREMENT_SHALLOW_FILES in requirements: 1250 if REQUIREMENT_SHALLOW_FILES in requirements:
1139 1252
1140 return sqlitefilestorage 1253 return sqlitefilestorage
1141 else: 1254 else:
1142 return orig(requirements=requirements, features=features, **kwargs) 1255 return orig(requirements=requirements, features=features, **kwargs)
1143 1256
1257
1144 def makemain(orig, ui, requirements, **kwargs): 1258 def makemain(orig, ui, requirements, **kwargs):
1145 if REQUIREMENT in requirements: 1259 if REQUIREMENT in requirements:
1146 if REQUIREMENT_ZSTD in requirements and not zstd: 1260 if REQUIREMENT_ZSTD in requirements and not zstd:
1147 raise error.Abort(_('repository uses zstandard compression, which ' 1261 raise error.Abort(
1148 'is not available to this Mercurial install')) 1262 _(
1263 'repository uses zstandard compression, which '
1264 'is not available to this Mercurial install'
1265 )
1266 )
1149 1267
1150 return sqliterepository 1268 return sqliterepository
1151 1269
1152 return orig(requirements=requirements, **kwargs) 1270 return orig(requirements=requirements, **kwargs)
1271
1153 1272
1154 def verifierinit(orig, self, *args, **kwargs): 1273 def verifierinit(orig, self, *args, **kwargs):
1155 orig(self, *args, **kwargs) 1274 orig(self, *args, **kwargs)
1156 1275
1157 # We don't care that files in the store don't align with what is 1276 # We don't care that files in the store don't align with what is
1158 # advertised. So suppress these warnings. 1277 # advertised. So suppress these warnings.
1159 self.warnorphanstorefiles = False 1278 self.warnorphanstorefiles = False
1160 1279
1280
1161 def extsetup(ui): 1281 def extsetup(ui):
1162 localrepo.featuresetupfuncs.add(featuresetup) 1282 localrepo.featuresetupfuncs.add(featuresetup)
1163 extensions.wrapfunction(localrepo, 'newreporequirements', 1283 extensions.wrapfunction(
1164 newreporequirements) 1284 localrepo, 'newreporequirements', newreporequirements
1165 extensions.wrapfunction(localrepo, 'makefilestorage', 1285 )
1166 makefilestorage) 1286 extensions.wrapfunction(localrepo, 'makefilestorage', makefilestorage)
1167 extensions.wrapfunction(localrepo, 'makemain', 1287 extensions.wrapfunction(localrepo, 'makemain', makemain)
1168 makemain) 1288 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
1169 extensions.wrapfunction(verify.verifier, '__init__', 1289
1170 verifierinit)
1171 1290
1172 def reposetup(ui, repo): 1291 def reposetup(ui, repo):
1173 if isinstance(repo, sqliterepository): 1292 if isinstance(repo, sqliterepository):
1174 repo._db = None 1293 repo._db = None
1175 1294