comparison mercurial/interfaces/repository.py @ 42813:268662aac075

interfaces: create a new folder for interfaces and move repository.py in it I was trying to understand current interfaces and write new ones and I realized we need to improve how current interfaces are organised. This creates a dedicated folder for defining interfaces and move `repository.py` which defines all the current interfaces inside it. Differential Revision: https://phab.mercurial-scm.org/D6741
author Pulkit Goyal <pulkit@yandex-team.ru>
date Sun, 18 Aug 2019 00:45:33 +0300
parents mercurial/repository.py@83d090ebec0c
children 2c4f656c8e9f
comparison
equal deleted inserted replaced
42807:383fdfa6bba9 42813:268662aac075
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 from ..i18n import _
11 from .. import (
12 error,
13 )
14 from ..utils import (
15 interfaceutil,
16 )
17
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
22 # Local repository feature string.
23
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32 # Files storage may lack data for all ancestors.
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
34
35 REVISION_FLAG_CENSORED = 1 << 15
36 REVISION_FLAG_ELLIPSIS = 1 << 14
37 REVISION_FLAG_EXTSTORED = 1 << 13
38
39 REVISION_FLAGS_KNOWN = (
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
41
42 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_P1 = b'p1'
46
47 class ipeerconnection(interfaceutil.Interface):
48 """Represents a "connection" to a repository.
49
50 This is the base interface for representing a connection to a repository.
51 It holds basic properties and methods applicable to all peer types.
52
53 This is not a complete interface definition and should not be used
54 outside of this module.
55 """
56 ui = interfaceutil.Attribute("""ui.ui instance""")
57
58 def url():
59 """Returns a URL string representing this peer.
60
61 Currently, implementations expose the raw URL used to construct the
62 instance. It may contain credentials as part of the URL. The
63 expectations of the value aren't well-defined and this could lead to
64 data leakage.
65
66 TODO audit/clean consumers and more clearly define the contents of this
67 value.
68 """
69
70 def local():
71 """Returns a local repository instance.
72
73 If the peer represents a local repository, returns an object that
74 can be used to interface with it. Otherwise returns ``None``.
75 """
76
77 def peer():
78 """Returns an object conforming to this interface.
79
80 Most implementations will ``return self``.
81 """
82
83 def canpush():
84 """Returns a boolean indicating if this peer can be pushed to."""
85
86 def close():
87 """Close the connection to this peer.
88
89 This is called when the peer will no longer be used. Resources
90 associated with the peer should be cleaned up.
91 """
92
93 class ipeercapabilities(interfaceutil.Interface):
94 """Peer sub-interface related to capabilities."""
95
96 def capable(name):
97 """Determine support for a named capability.
98
99 Returns ``False`` if capability not supported.
100
101 Returns ``True`` if boolean capability is supported. Returns a string
102 if capability support is non-boolean.
103
104 Capability strings may or may not map to wire protocol capabilities.
105 """
106
107 def requirecap(name, purpose):
108 """Require a capability to be present.
109
110 Raises a ``CapabilityError`` if the capability isn't present.
111 """
112
113 class ipeercommands(interfaceutil.Interface):
114 """Client-side interface for communicating over the wire protocol.
115
116 This interface is used as a gateway to the Mercurial wire protocol.
117 methods commonly call wire protocol commands of the same name.
118 """
119
120 def branchmap():
121 """Obtain heads in named branches.
122
123 Returns a dict mapping branch name to an iterable of nodes that are
124 heads on that branch.
125 """
126
127 def capabilities():
128 """Obtain capabilities of the peer.
129
130 Returns a set of string capabilities.
131 """
132
133 def clonebundles():
134 """Obtains the clone bundles manifest for the repo.
135
136 Returns the manifest as unparsed bytes.
137 """
138
139 def debugwireargs(one, two, three=None, four=None, five=None):
140 """Used to facilitate debugging of arguments passed over the wire."""
141
142 def getbundle(source, **kwargs):
143 """Obtain remote repository data as a bundle.
144
145 This command is how the bulk of repository data is transferred from
146 the peer to the local repository
147
148 Returns a generator of bundle data.
149 """
150
151 def heads():
152 """Determine all known head revisions in the peer.
153
154 Returns an iterable of binary nodes.
155 """
156
157 def known(nodes):
158 """Determine whether multiple nodes are known.
159
160 Accepts an iterable of nodes whose presence to check for.
161
162 Returns an iterable of booleans indicating of the corresponding node
163 at that index is known to the peer.
164 """
165
166 def listkeys(namespace):
167 """Obtain all keys in a pushkey namespace.
168
169 Returns an iterable of key names.
170 """
171
172 def lookup(key):
173 """Resolve a value to a known revision.
174
175 Returns a binary node of the resolved revision on success.
176 """
177
178 def pushkey(namespace, key, old, new):
179 """Set a value using the ``pushkey`` protocol.
180
181 Arguments correspond to the pushkey namespace and key to operate on and
182 the old and new values for that key.
183
184 Returns a string with the peer result. The value inside varies by the
185 namespace.
186 """
187
188 def stream_out():
189 """Obtain streaming clone data.
190
191 Successful result should be a generator of data chunks.
192 """
193
194 def unbundle(bundle, heads, url):
195 """Transfer repository data to the peer.
196
197 This is how the bulk of data during a push is transferred.
198
199 Returns the integer number of heads added to the peer.
200 """
201
202 class ipeerlegacycommands(interfaceutil.Interface):
203 """Interface for implementing support for legacy wire protocol commands.
204
205 Wire protocol commands transition to legacy status when they are no longer
206 used by modern clients. To facilitate identifying which commands are
207 legacy, the interfaces are split.
208 """
209
210 def between(pairs):
211 """Obtain nodes between pairs of nodes.
212
213 ``pairs`` is an iterable of node pairs.
214
215 Returns an iterable of iterables of nodes corresponding to each
216 requested pair.
217 """
218
219 def branches(nodes):
220 """Obtain ancestor changesets of specific nodes back to a branch point.
221
222 For each requested node, the peer finds the first ancestor node that is
223 a DAG root or is a merge.
224
225 Returns an iterable of iterables with the resolved values for each node.
226 """
227
228 def changegroup(nodes, source):
229 """Obtain a changegroup with data for descendants of specified nodes."""
230
231 def changegroupsubset(bases, heads, source):
232 pass
233
234 class ipeercommandexecutor(interfaceutil.Interface):
235 """Represents a mechanism to execute remote commands.
236
237 This is the primary interface for requesting that wire protocol commands
238 be executed. Instances of this interface are active in a context manager
239 and have a well-defined lifetime. When the context manager exits, all
240 outstanding requests are waited on.
241 """
242
243 def callcommand(name, args):
244 """Request that a named command be executed.
245
246 Receives the command name and a dictionary of command arguments.
247
248 Returns a ``concurrent.futures.Future`` that will resolve to the
249 result of that command request. That exact value is left up to
250 the implementation and possibly varies by command.
251
252 Not all commands can coexist with other commands in an executor
253 instance: it depends on the underlying wire protocol transport being
254 used and the command itself.
255
256 Implementations MAY call ``sendcommands()`` automatically if the
257 requested command can not coexist with other commands in this executor.
258
259 Implementations MAY call ``sendcommands()`` automatically when the
260 future's ``result()`` is called. So, consumers using multiple
261 commands with an executor MUST ensure that ``result()`` is not called
262 until all command requests have been issued.
263 """
264
265 def sendcommands():
266 """Trigger submission of queued command requests.
267
268 Not all transports submit commands as soon as they are requested to
269 run. When called, this method forces queued command requests to be
270 issued. It will no-op if all commands have already been sent.
271
272 When called, no more new commands may be issued with this executor.
273 """
274
275 def close():
276 """Signal that this command request is finished.
277
278 When called, no more new commands may be issued. All outstanding
279 commands that have previously been issued are waited on before
280 returning. This not only includes waiting for the futures to resolve,
281 but also waiting for all response data to arrive. In other words,
282 calling this waits for all on-wire state for issued command requests
283 to finish.
284
285 When used as a context manager, this method is called when exiting the
286 context manager.
287
288 This method may call ``sendcommands()`` if there are buffered commands.
289 """
290
291 class ipeerrequests(interfaceutil.Interface):
292 """Interface for executing commands on a peer."""
293
294 limitedarguments = interfaceutil.Attribute(
295 """True if the peer cannot receive large argument value for commands."""
296 )
297
298 def commandexecutor():
299 """A context manager that resolves to an ipeercommandexecutor.
300
301 The object this resolves to can be used to issue command requests
302 to the peer.
303
304 Callers should call its ``callcommand`` method to issue command
305 requests.
306
307 A new executor should be obtained for each distinct set of commands
308 (possibly just a single command) that the consumer wants to execute
309 as part of a single operation or round trip. This is because some
310 peers are half-duplex and/or don't support persistent connections.
311 e.g. in the case of HTTP peers, commands sent to an executor represent
312 a single HTTP request. While some peers may support multiple command
313 sends over the wire per executor, consumers need to code to the least
314 capable peer. So it should be assumed that command executors buffer
315 called commands until they are told to send them and that each
316 command executor could result in a new connection or wire-level request
317 being issued.
318 """
319
320 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
321 """Unified interface for peer repositories.
322
323 All peer instances must conform to this interface.
324 """
325
326 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
327 """Unified peer interface for wire protocol version 2 peers."""
328
329 apidescriptor = interfaceutil.Attribute(
330 """Data structure holding description of server API.""")
331
332 @interfaceutil.implementer(ipeerbase)
333 class peer(object):
334 """Base class for peer repositories."""
335
336 limitedarguments = False
337
338 def capable(self, name):
339 caps = self.capabilities()
340 if name in caps:
341 return True
342
343 name = '%s=' % name
344 for cap in caps:
345 if cap.startswith(name):
346 return cap[len(name):]
347
348 return False
349
350 def requirecap(self, name, purpose):
351 if self.capable(name):
352 return
353
354 raise error.CapabilityError(
355 _('cannot %s; remote repository does not support the '
356 '\'%s\' capability') % (purpose, name))
357
358 class iverifyproblem(interfaceutil.Interface):
359 """Represents a problem with the integrity of the repository.
360
361 Instances of this interface are emitted to describe an integrity issue
362 with a repository (e.g. corrupt storage, missing data, etc).
363
364 Instances are essentially messages associated with severity.
365 """
366 warning = interfaceutil.Attribute(
367 """Message indicating a non-fatal problem.""")
368
369 error = interfaceutil.Attribute(
370 """Message indicating a fatal problem.""")
371
372 node = interfaceutil.Attribute(
373 """Revision encountering the problem.
374
375 ``None`` means the problem doesn't apply to a single revision.
376 """)
377
378 class irevisiondelta(interfaceutil.Interface):
379 """Represents a delta between one revision and another.
380
381 Instances convey enough information to allow a revision to be exchanged
382 with another repository.
383
384 Instances represent the fulltext revision data or a delta against
385 another revision. Therefore the ``revision`` and ``delta`` attributes
386 are mutually exclusive.
387
388 Typically used for changegroup generation.
389 """
390
391 node = interfaceutil.Attribute(
392 """20 byte node of this revision.""")
393
394 p1node = interfaceutil.Attribute(
395 """20 byte node of 1st parent of this revision.""")
396
397 p2node = interfaceutil.Attribute(
398 """20 byte node of 2nd parent of this revision.""")
399
400 linknode = interfaceutil.Attribute(
401 """20 byte node of the changelog revision this node is linked to.""")
402
403 flags = interfaceutil.Attribute(
404 """2 bytes of integer flags that apply to this revision.
405
406 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
407 """)
408
409 basenode = interfaceutil.Attribute(
410 """20 byte node of the revision this data is a delta against.
411
412 ``nullid`` indicates that the revision is a full revision and not
413 a delta.
414 """)
415
416 baserevisionsize = interfaceutil.Attribute(
417 """Size of base revision this delta is against.
418
419 May be ``None`` if ``basenode`` is ``nullid``.
420 """)
421
422 revision = interfaceutil.Attribute(
423 """Raw fulltext of revision data for this node.""")
424
425 delta = interfaceutil.Attribute(
426 """Delta between ``basenode`` and ``node``.
427
428 Stored in the bdiff delta format.
429 """)
430
431 class ifilerevisionssequence(interfaceutil.Interface):
432 """Contains index data for all revisions of a file.
433
434 Types implementing this behave like lists of tuples. The index
435 in the list corresponds to the revision number. The values contain
436 index metadata.
437
438 The *null* revision (revision number -1) is always the last item
439 in the index.
440 """
441
442 def __len__():
443 """The total number of revisions."""
444
445 def __getitem__(rev):
446 """Returns the object having a specific revision number.
447
448 Returns an 8-tuple with the following fields:
449
450 offset+flags
451 Contains the offset and flags for the revision. 64-bit unsigned
452 integer where first 6 bytes are the offset and the next 2 bytes
453 are flags. The offset can be 0 if it is not used by the store.
454 compressed size
455 Size of the revision data in the store. It can be 0 if it isn't
456 needed by the store.
457 uncompressed size
458 Fulltext size. It can be 0 if it isn't needed by the store.
459 base revision
460 Revision number of revision the delta for storage is encoded
461 against. -1 indicates not encoded against a base revision.
462 link revision
463 Revision number of changelog revision this entry is related to.
464 p1 revision
465 Revision number of 1st parent. -1 if no 1st parent.
466 p2 revision
467 Revision number of 2nd parent. -1 if no 1st parent.
468 node
469 Binary node value for this revision number.
470
471 Negative values should index off the end of the sequence. ``-1``
472 should return the null revision. ``-2`` should return the most
473 recent revision.
474 """
475
476 def __contains__(rev):
477 """Whether a revision number exists."""
478
479 def insert(self, i, entry):
480 """Add an item to the index at specific revision."""
481
482 class ifileindex(interfaceutil.Interface):
483 """Storage interface for index data of a single file.
484
485 File storage data is divided into index metadata and data storage.
486 This interface defines the index portion of the interface.
487
488 The index logically consists of:
489
490 * A mapping between revision numbers and nodes.
491 * DAG data (storing and querying the relationship between nodes).
492 * Metadata to facilitate storage.
493 """
494 def __len__():
495 """Obtain the number of revisions stored for this file."""
496
497 def __iter__():
498 """Iterate over revision numbers for this file."""
499
500 def hasnode(node):
501 """Returns a bool indicating if a node is known to this store.
502
503 Implementations must only return True for full, binary node values:
504 hex nodes, revision numbers, and partial node matches must be
505 rejected.
506
507 The null node is never present.
508 """
509
510 def revs(start=0, stop=None):
511 """Iterate over revision numbers for this file, with control."""
512
513 def parents(node):
514 """Returns a 2-tuple of parent nodes for a revision.
515
516 Values will be ``nullid`` if the parent is empty.
517 """
518
519 def parentrevs(rev):
520 """Like parents() but operates on revision numbers."""
521
522 def rev(node):
523 """Obtain the revision number given a node.
524
525 Raises ``error.LookupError`` if the node is not known.
526 """
527
528 def node(rev):
529 """Obtain the node value given a revision number.
530
531 Raises ``IndexError`` if the node is not known.
532 """
533
534 def lookup(node):
535 """Attempt to resolve a value to a node.
536
537 Value can be a binary node, hex node, revision number, or a string
538 that can be converted to an integer.
539
540 Raises ``error.LookupError`` if a node could not be resolved.
541 """
542
543 def linkrev(rev):
544 """Obtain the changeset revision number a revision is linked to."""
545
546 def iscensored(rev):
547 """Return whether a revision's content has been censored."""
548
549 def commonancestorsheads(node1, node2):
550 """Obtain an iterable of nodes containing heads of common ancestors.
551
552 See ``ancestor.commonancestorsheads()``.
553 """
554
555 def descendants(revs):
556 """Obtain descendant revision numbers for a set of revision numbers.
557
558 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
559 """
560
561 def heads(start=None, stop=None):
562 """Obtain a list of nodes that are DAG heads, with control.
563
564 The set of revisions examined can be limited by specifying
565 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
566 iterable of nodes. DAG traversal starts at earlier revision
567 ``start`` and iterates forward until any node in ``stop`` is
568 encountered.
569 """
570
571 def children(node):
572 """Obtain nodes that are children of a node.
573
574 Returns a list of nodes.
575 """
576
577 class ifiledata(interfaceutil.Interface):
578 """Storage interface for data storage of a specific file.
579
580 This complements ``ifileindex`` and provides an interface for accessing
581 data for a tracked file.
582 """
583 def size(rev):
584 """Obtain the fulltext size of file data.
585
586 Any metadata is excluded from size measurements.
587 """
588
589 def revision(node, raw=False):
590 """"Obtain fulltext data for a node.
591
592 By default, any storage transformations are applied before the data
593 is returned. If ``raw`` is True, non-raw storage transformations
594 are not applied.
595
596 The fulltext data may contain a header containing metadata. Most
597 consumers should use ``read()`` to obtain the actual file data.
598 """
599
600 def rawdata(node):
601 """Obtain raw data for a node.
602 """
603
604 def read(node):
605 """Resolve file fulltext data.
606
607 This is similar to ``revision()`` except any metadata in the data
608 headers is stripped.
609 """
610
611 def renamed(node):
612 """Obtain copy metadata for a node.
613
614 Returns ``False`` if no copy metadata is stored or a 2-tuple of
615 (path, node) from which this revision was copied.
616 """
617
618 def cmp(node, fulltext):
619 """Compare fulltext to another revision.
620
621 Returns True if the fulltext is different from what is stored.
622
623 This takes copy metadata into account.
624
625 TODO better document the copy metadata and censoring logic.
626 """
627
628 def emitrevisions(nodes,
629 nodesorder=None,
630 revisiondata=False,
631 assumehaveparentrevisions=False,
632 deltamode=CG_DELTAMODE_STD):
633 """Produce ``irevisiondelta`` for revisions.
634
635 Given an iterable of nodes, emits objects conforming to the
636 ``irevisiondelta`` interface that describe revisions in storage.
637
638 This method is a generator.
639
640 The input nodes may be unordered. Implementations must ensure that a
641 node's parents are emitted before the node itself. Transitively, this
642 means that a node may only be emitted once all its ancestors in
643 ``nodes`` have also been emitted.
644
645 By default, emits "index" data (the ``node``, ``p1node``, and
646 ``p2node`` attributes). If ``revisiondata`` is set, revision data
647 will also be present on the emitted objects.
648
649 With default argument values, implementations can choose to emit
650 either fulltext revision data or a delta. When emitting deltas,
651 implementations must consider whether the delta's base revision
652 fulltext is available to the receiver.
653
654 The base revision fulltext is guaranteed to be available if any of
655 the following are met:
656
657 * Its fulltext revision was emitted by this method call.
658 * A delta for that revision was emitted by this method call.
659 * ``assumehaveparentrevisions`` is True and the base revision is a
660 parent of the node.
661
662 ``nodesorder`` can be used to control the order that revisions are
663 emitted. By default, revisions can be reordered as long as they are
664 in DAG topological order (see above). If the value is ``nodes``,
665 the iteration order from ``nodes`` should be used. If the value is
666 ``storage``, then the native order from the backing storage layer
667 is used. (Not all storage layers will have strong ordering and behavior
668 of this mode is storage-dependent.) ``nodes`` ordering can force
669 revisions to be emitted before their ancestors, so consumers should
670 use it with care.
671
672 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
673 be set and it is the caller's responsibility to resolve it, if needed.
674
675 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
676 all revision data should be emitted as deltas against the revision
677 emitted just prior. The initial revision should be a delta against its
678 1st parent.
679 """
680
681 class ifilemutation(interfaceutil.Interface):
682 """Storage interface for mutation events of a tracked file."""
683
684 def add(filedata, meta, transaction, linkrev, p1, p2):
685 """Add a new revision to the store.
686
687 Takes file data, dictionary of metadata, a transaction, linkrev,
688 and parent nodes.
689
690 Returns the node that was added.
691
692 May no-op if a revision matching the supplied data is already stored.
693 """
694
695 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
696 flags=0, cachedelta=None):
697 """Add a new revision to the store.
698
699 This is similar to ``add()`` except it operates at a lower level.
700
701 The data passed in already contains a metadata header, if any.
702
703 ``node`` and ``flags`` can be used to define the expected node and
704 the flags to use with storage. ``flags`` is a bitwise value composed
705 of the various ``REVISION_FLAG_*`` constants.
706
707 ``add()`` is usually called when adding files from e.g. the working
708 directory. ``addrevision()`` is often called by ``add()`` and for
709 scenarios where revision data has already been computed, such as when
710 applying raw data from a peer repo.
711 """
712
713 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
714 maybemissingparents=False):
715 """Process a series of deltas for storage.
716
717 ``deltas`` is an iterable of 7-tuples of
718 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
719 to add.
720
721 The ``delta`` field contains ``mpatch`` data to apply to a base
722 revision, identified by ``deltabase``. The base node can be
723 ``nullid``, in which case the header from the delta can be ignored
724 and the delta used as the fulltext.
725
726 ``addrevisioncb`` should be called for each node as it is committed.
727
728 ``maybemissingparents`` is a bool indicating whether the incoming
729 data may reference parents/ancestor revisions that aren't present.
730 This flag is set when receiving data into a "shallow" store that
731 doesn't hold all history.
732
733 Returns a list of nodes that were processed. A node will be in the list
734 even if it existed in the store previously.
735 """
736
737 def censorrevision(tr, node, tombstone=b''):
738 """Remove the content of a single revision.
739
740 The specified ``node`` will have its content purged from storage.
741 Future attempts to access the revision data for this node will
742 result in failure.
743
744 A ``tombstone`` message can optionally be stored. This message may be
745 displayed to users when they attempt to access the missing revision
746 data.
747
748 Storage backends may have stored deltas against the previous content
749 in this revision. As part of censoring a revision, these storage
750 backends are expected to rewrite any internally stored deltas such
751 that they no longer reference the deleted content.
752 """
753
754 def getstrippoint(minlink):
755 """Find the minimum revision that must be stripped to strip a linkrev.
756
757 Returns a 2-tuple containing the minimum revision number and a set
758 of all revisions numbers that would be broken by this strip.
759
760 TODO this is highly revlog centric and should be abstracted into
761 a higher-level deletion API. ``repair.strip()`` relies on this.
762 """
763
764 def strip(minlink, transaction):
765 """Remove storage of items starting at a linkrev.
766
767 This uses ``getstrippoint()`` to determine the first node to remove.
768 Then it effectively truncates storage for all revisions after that.
769
770 TODO this is highly revlog centric and should be abstracted into a
771 higher-level deletion API.
772 """
773
774 class ifilestorage(ifileindex, ifiledata, ifilemutation):
775 """Complete storage interface for a single tracked file."""
776
777 def files():
778 """Obtain paths that are backing storage for this file.
779
780 TODO this is used heavily by verify code and there should probably
781 be a better API for that.
782 """
783
784 def storageinfo(exclusivefiles=False, sharedfiles=False,
785 revisionscount=False, trackedsize=False,
786 storedsize=False):
787 """Obtain information about storage for this file's data.
788
789 Returns a dict describing storage for this tracked path. The keys
790 in the dict map to arguments of the same. The arguments are bools
791 indicating whether to calculate and obtain that data.
792
793 exclusivefiles
794 Iterable of (vfs, path) describing files that are exclusively
795 used to back storage for this tracked path.
796
797 sharedfiles
798 Iterable of (vfs, path) describing files that are used to back
799 storage for this tracked path. Those files may also provide storage
800 for other stored entities.
801
802 revisionscount
803 Number of revisions available for retrieval.
804
805 trackedsize
806 Total size in bytes of all tracked revisions. This is a sum of the
807 length of the fulltext of all revisions.
808
809 storedsize
810 Total size in bytes used to store data for all tracked revisions.
811 This is commonly less than ``trackedsize`` due to internal usage
812 of deltas rather than fulltext revisions.
813
814 Not all storage backends may support all queries are have a reasonable
815 value to use. In that case, the value should be set to ``None`` and
816 callers are expected to handle this special value.
817 """
818
819 def verifyintegrity(state):
820 """Verifies the integrity of file storage.
821
822 ``state`` is a dict holding state of the verifier process. It can be
823 used to communicate data between invocations of multiple storage
824 primitives.
825
826 If individual revisions cannot have their revision content resolved,
827 the method is expected to set the ``skipread`` key to a set of nodes
828 that encountered problems.
829
830 The method yields objects conforming to the ``iverifyproblem``
831 interface.
832 """
833
834 class idirs(interfaceutil.Interface):
835 """Interface representing a collection of directories from paths.
836
837 This interface is essentially a derived data structure representing
838 directories from a collection of paths.
839 """
840
841 def addpath(path):
842 """Add a path to the collection.
843
844 All directories in the path will be added to the collection.
845 """
846
847 def delpath(path):
848 """Remove a path from the collection.
849
850 If the removal was the last path in a particular directory, the
851 directory is removed from the collection.
852 """
853
854 def __iter__():
855 """Iterate over the directories in this collection of paths."""
856
857 def __contains__(path):
858 """Whether a specific directory is in this collection."""
859
860 class imanifestdict(interfaceutil.Interface):
861 """Interface representing a manifest data structure.
862
863 A manifest is effectively a dict mapping paths to entries. Each entry
864 consists of a binary node and extra flags affecting that entry.
865 """
866
867 def __getitem__(path):
868 """Returns the binary node value for a path in the manifest.
869
870 Raises ``KeyError`` if the path does not exist in the manifest.
871
872 Equivalent to ``self.find(path)[0]``.
873 """
874
875 def find(path):
876 """Returns the entry for a path in the manifest.
877
878 Returns a 2-tuple of (node, flags).
879
880 Raises ``KeyError`` if the path does not exist in the manifest.
881 """
882
883 def __len__():
884 """Return the number of entries in the manifest."""
885
886 def __nonzero__():
887 """Returns True if the manifest has entries, False otherwise."""
888
889 __bool__ = __nonzero__
890
891 def __setitem__(path, node):
892 """Define the node value for a path in the manifest.
893
894 If the path is already in the manifest, its flags will be copied to
895 the new entry.
896 """
897
898 def __contains__(path):
899 """Whether a path exists in the manifest."""
900
901 def __delitem__(path):
902 """Remove a path from the manifest.
903
904 Raises ``KeyError`` if the path is not in the manifest.
905 """
906
907 def __iter__():
908 """Iterate over paths in the manifest."""
909
910 def iterkeys():
911 """Iterate over paths in the manifest."""
912
913 def keys():
914 """Obtain a list of paths in the manifest."""
915
916 def filesnotin(other, match=None):
917 """Obtain the set of paths in this manifest but not in another.
918
919 ``match`` is an optional matcher function to be applied to both
920 manifests.
921
922 Returns a set of paths.
923 """
924
925 def dirs():
926 """Returns an object implementing the ``idirs`` interface."""
927
928 def hasdir(dir):
929 """Returns a bool indicating if a directory is in this manifest."""
930
931 def matches(match):
932 """Generate a new manifest filtered through a matcher.
933
934 Returns an object conforming to the ``imanifestdict`` interface.
935 """
936
937 def walk(match):
938 """Generator of paths in manifest satisfying a matcher.
939
940 This is equivalent to ``self.matches(match).iterkeys()`` except a new
941 manifest object is not created.
942
943 If the matcher has explicit files listed and they don't exist in
944 the manifest, ``match.bad()`` is called for each missing file.
945 """
946
947 def diff(other, match=None, clean=False):
948 """Find differences between this manifest and another.
949
950 This manifest is compared to ``other``.
951
952 If ``match`` is provided, the two manifests are filtered against this
953 matcher and only entries satisfying the matcher are compared.
954
955 If ``clean`` is True, unchanged files are included in the returned
956 object.
957
958 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
959 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
960 represents the node and flags for this manifest and ``(node2, flag2)``
961 are the same for the other manifest.
962 """
963
964 def setflag(path, flag):
965 """Set the flag value for a given path.
966
967 Raises ``KeyError`` if the path is not already in the manifest.
968 """
969
970 def get(path, default=None):
971 """Obtain the node value for a path or a default value if missing."""
972
973 def flags(path, default=''):
974 """Return the flags value for a path or a default value if missing."""
975
976 def copy():
977 """Return a copy of this manifest."""
978
979 def items():
980 """Returns an iterable of (path, node) for items in this manifest."""
981
982 def iteritems():
983 """Identical to items()."""
984
985 def iterentries():
986 """Returns an iterable of (path, node, flags) for this manifest.
987
988 Similar to ``iteritems()`` except items are a 3-tuple and include
989 flags.
990 """
991
992 def text():
993 """Obtain the raw data representation for this manifest.
994
995 Result is used to create a manifest revision.
996 """
997
998 def fastdelta(base, changes):
999 """Obtain a delta between this manifest and another given changes.
1000
1001 ``base`` in the raw data representation for another manifest.
1002
1003 ``changes`` is an iterable of ``(path, to_delete)``.
1004
1005 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1006 delta between ``base`` and this manifest.
1007 """
1008
1009 class imanifestrevisionbase(interfaceutil.Interface):
1010 """Base interface representing a single revision of a manifest.
1011
1012 Should not be used as a primary interface: should always be inherited
1013 as part of a larger interface.
1014 """
1015
1016 def new():
1017 """Obtain a new manifest instance.
1018
1019 Returns an object conforming to the ``imanifestrevisionwritable``
1020 interface. The instance will be associated with the same
1021 ``imanifestlog`` collection as this instance.
1022 """
1023
1024 def copy():
1025 """Obtain a copy of this manifest instance.
1026
1027 Returns an object conforming to the ``imanifestrevisionwritable``
1028 interface. The instance will be associated with the same
1029 ``imanifestlog`` collection as this instance.
1030 """
1031
1032 def read():
1033 """Obtain the parsed manifest data structure.
1034
1035 The returned object conforms to the ``imanifestdict`` interface.
1036 """
1037
1038 class imanifestrevisionstored(imanifestrevisionbase):
1039 """Interface representing a manifest revision committed to storage."""
1040
1041 def node():
1042 """The binary node for this manifest."""
1043
1044 parents = interfaceutil.Attribute(
1045 """List of binary nodes that are parents for this manifest revision."""
1046 )
1047
1048 def readdelta(shallow=False):
1049 """Obtain the manifest data structure representing changes from parent.
1050
1051 This manifest is compared to its 1st parent. A new manifest representing
1052 those differences is constructed.
1053
1054 The returned object conforms to the ``imanifestdict`` interface.
1055 """
1056
1057 def readfast(shallow=False):
1058 """Calls either ``read()`` or ``readdelta()``.
1059
1060 The faster of the two options is called.
1061 """
1062
1063 def find(key):
1064 """Calls self.read().find(key)``.
1065
1066 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1067 """
1068
1069 class imanifestrevisionwritable(imanifestrevisionbase):
1070 """Interface representing a manifest revision that can be committed."""
1071
1072 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1073 """Add this revision to storage.
1074
1075 Takes a transaction object, the changeset revision number it will
1076 be associated with, its parent nodes, and lists of added and
1077 removed paths.
1078
1079 If match is provided, storage can choose not to inspect or write out
1080 items that do not match. Storage is still required to be able to provide
1081 the full manifest in the future for any directories written (these
1082 manifests should not be "narrowed on disk").
1083
1084 Returns the binary node of the created revision.
1085 """
1086
1087 class imanifeststorage(interfaceutil.Interface):
1088 """Storage interface for manifest data."""
1089
1090 tree = interfaceutil.Attribute(
1091 """The path to the directory this manifest tracks.
1092
1093 The empty bytestring represents the root manifest.
1094 """)
1095
1096 index = interfaceutil.Attribute(
1097 """An ``ifilerevisionssequence`` instance.""")
1098
1099 indexfile = interfaceutil.Attribute(
1100 """Path of revlog index file.
1101
1102 TODO this is revlog specific and should not be exposed.
1103 """)
1104
1105 opener = interfaceutil.Attribute(
1106 """VFS opener to use to access underlying files used for storage.
1107
1108 TODO this is revlog specific and should not be exposed.
1109 """)
1110
1111 version = interfaceutil.Attribute(
1112 """Revlog version number.
1113
1114 TODO this is revlog specific and should not be exposed.
1115 """)
1116
1117 _generaldelta = interfaceutil.Attribute(
1118 """Whether generaldelta storage is being used.
1119
1120 TODO this is revlog specific and should not be exposed.
1121 """)
1122
1123 fulltextcache = interfaceutil.Attribute(
1124 """Dict with cache of fulltexts.
1125
1126 TODO this doesn't feel appropriate for the storage interface.
1127 """)
1128
1129 def __len__():
1130 """Obtain the number of revisions stored for this manifest."""
1131
1132 def __iter__():
1133 """Iterate over revision numbers for this manifest."""
1134
1135 def rev(node):
1136 """Obtain the revision number given a binary node.
1137
1138 Raises ``error.LookupError`` if the node is not known.
1139 """
1140
1141 def node(rev):
1142 """Obtain the node value given a revision number.
1143
1144 Raises ``error.LookupError`` if the revision is not known.
1145 """
1146
1147 def lookup(value):
1148 """Attempt to resolve a value to a node.
1149
1150 Value can be a binary node, hex node, revision number, or a bytes
1151 that can be converted to an integer.
1152
1153 Raises ``error.LookupError`` if a ndoe could not be resolved.
1154 """
1155
1156 def parents(node):
1157 """Returns a 2-tuple of parent nodes for a node.
1158
1159 Values will be ``nullid`` if the parent is empty.
1160 """
1161
1162 def parentrevs(rev):
1163 """Like parents() but operates on revision numbers."""
1164
1165 def linkrev(rev):
1166 """Obtain the changeset revision number a revision is linked to."""
1167
1168 def revision(node, _df=None, raw=False):
1169 """Obtain fulltext data for a node."""
1170
1171 def rawdata(node, _df=None):
1172 """Obtain raw data for a node."""
1173
1174 def revdiff(rev1, rev2):
1175 """Obtain a delta between two revision numbers.
1176
1177 The returned data is the result of ``bdiff.bdiff()`` on the raw
1178 revision data.
1179 """
1180
1181 def cmp(node, fulltext):
1182 """Compare fulltext to another revision.
1183
1184 Returns True if the fulltext is different from what is stored.
1185 """
1186
1187 def emitrevisions(nodes,
1188 nodesorder=None,
1189 revisiondata=False,
1190 assumehaveparentrevisions=False):
1191 """Produce ``irevisiondelta`` describing revisions.
1192
1193 See the documentation for ``ifiledata`` for more.
1194 """
1195
1196 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1197 """Process a series of deltas for storage.
1198
1199 See the documentation in ``ifilemutation`` for more.
1200 """
1201
1202 def rawsize(rev):
1203 """Obtain the size of tracked data.
1204
1205 Is equivalent to ``len(m.rawdata(node))``.
1206
1207 TODO this method is only used by upgrade code and may be removed.
1208 """
1209
1210 def getstrippoint(minlink):
1211 """Find minimum revision that must be stripped to strip a linkrev.
1212
1213 See the documentation in ``ifilemutation`` for more.
1214 """
1215
1216 def strip(minlink, transaction):
1217 """Remove storage of items starting at a linkrev.
1218
1219 See the documentation in ``ifilemutation`` for more.
1220 """
1221
1222 def checksize():
1223 """Obtain the expected sizes of backing files.
1224
1225 TODO this is used by verify and it should not be part of the interface.
1226 """
1227
1228 def files():
1229 """Obtain paths that are backing storage for this manifest.
1230
1231 TODO this is used by verify and there should probably be a better API
1232 for this functionality.
1233 """
1234
1235 def deltaparent(rev):
1236 """Obtain the revision that a revision is delta'd against.
1237
1238 TODO delta encoding is an implementation detail of storage and should
1239 not be exposed to the storage interface.
1240 """
1241
1242 def clone(tr, dest, **kwargs):
1243 """Clone this instance to another."""
1244
1245 def clearcaches(clear_persisted_data=False):
1246 """Clear any caches associated with this instance."""
1247
1248 def dirlog(d):
1249 """Obtain a manifest storage instance for a tree."""
1250
1251 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1252 match=None):
1253 """Add a revision to storage.
1254
1255 ``m`` is an object conforming to ``imanifestdict``.
1256
1257 ``link`` is the linkrev revision number.
1258
1259 ``p1`` and ``p2`` are the parent revision numbers.
1260
1261 ``added`` and ``removed`` are iterables of added and removed paths,
1262 respectively.
1263
1264 ``readtree`` is a function that can be used to read the child tree(s)
1265 when recursively writing the full tree structure when using
1266 treemanifets.
1267
1268 ``match`` is a matcher that can be used to hint to storage that not all
1269 paths must be inspected; this is an optimization and can be safely
1270 ignored. Note that the storage must still be able to reproduce a full
1271 manifest including files that did not match.
1272 """
1273
1274 def storageinfo(exclusivefiles=False, sharedfiles=False,
1275 revisionscount=False, trackedsize=False,
1276 storedsize=False):
1277 """Obtain information about storage for this manifest's data.
1278
1279 See ``ifilestorage.storageinfo()`` for a description of this method.
1280 This one behaves the same way, except for manifest data.
1281 """
1282
1283 class imanifestlog(interfaceutil.Interface):
1284 """Interface representing a collection of manifest snapshots.
1285
1286 Represents the root manifest in a repository.
1287
1288 Also serves as a means to access nested tree manifests and to cache
1289 tree manifests.
1290 """
1291
1292 def __getitem__(node):
1293 """Obtain a manifest instance for a given binary node.
1294
1295 Equivalent to calling ``self.get('', node)``.
1296
1297 The returned object conforms to the ``imanifestrevisionstored``
1298 interface.
1299 """
1300
1301 def get(tree, node, verify=True):
1302 """Retrieve the manifest instance for a given directory and binary node.
1303
1304 ``node`` always refers to the node of the root manifest (which will be
1305 the only manifest if flat manifests are being used).
1306
1307 If ``tree`` is the empty string, the root manifest is returned.
1308 Otherwise the manifest for the specified directory will be returned
1309 (requires tree manifests).
1310
1311 If ``verify`` is True, ``LookupError`` is raised if the node is not
1312 known.
1313
1314 The returned object conforms to the ``imanifestrevisionstored``
1315 interface.
1316 """
1317
1318 def getstorage(tree):
1319 """Retrieve an interface to storage for a particular tree.
1320
1321 If ``tree`` is the empty bytestring, storage for the root manifest will
1322 be returned. Otherwise storage for a tree manifest is returned.
1323
1324 TODO formalize interface for returned object.
1325 """
1326
1327 def clearcaches():
1328 """Clear caches associated with this collection."""
1329
1330 def rev(node):
1331 """Obtain the revision number for a binary node.
1332
1333 Raises ``error.LookupError`` if the node is not known.
1334 """
1335
1336 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1337 """Local repository sub-interface providing access to tracked file storage.
1338
1339 This interface defines how a repository accesses storage for a single
1340 tracked file path.
1341 """
1342
1343 def file(f):
1344 """Obtain a filelog for a tracked path.
1345
1346 The returned type conforms to the ``ifilestorage`` interface.
1347 """
1348
1349 class ilocalrepositorymain(interfaceutil.Interface):
1350 """Main interface for local repositories.
1351
1352 This currently captures the reality of things - not how things should be.
1353 """
1354
1355 supportedformats = interfaceutil.Attribute(
1356 """Set of requirements that apply to stream clone.
1357
1358 This is actually a class attribute and is shared among all instances.
1359 """)
1360
1361 supported = interfaceutil.Attribute(
1362 """Set of requirements that this repo is capable of opening.""")
1363
1364 requirements = interfaceutil.Attribute(
1365 """Set of requirements this repo uses.""")
1366
1367 features = interfaceutil.Attribute(
1368 """Set of "features" this repository supports.
1369
1370 A "feature" is a loosely-defined term. It can refer to a feature
1371 in the classical sense or can describe an implementation detail
1372 of the repository. For example, a ``readonly`` feature may denote
1373 the repository as read-only. Or a ``revlogfilestore`` feature may
1374 denote that the repository is using revlogs for file storage.
1375
1376 The intent of features is to provide a machine-queryable mechanism
1377 for repo consumers to test for various repository characteristics.
1378
1379 Features are similar to ``requirements``. The main difference is that
1380 requirements are stored on-disk and represent requirements to open the
1381 repository. Features are more run-time capabilities of the repository
1382 and more granular capabilities (which may be derived from requirements).
1383 """)
1384
1385 filtername = interfaceutil.Attribute(
1386 """Name of the repoview that is active on this repo.""")
1387
1388 wvfs = interfaceutil.Attribute(
1389 """VFS used to access the working directory.""")
1390
1391 vfs = interfaceutil.Attribute(
1392 """VFS rooted at the .hg directory.
1393
1394 Used to access repository data not in the store.
1395 """)
1396
1397 svfs = interfaceutil.Attribute(
1398 """VFS rooted at the store.
1399
1400 Used to access repository data in the store. Typically .hg/store.
1401 But can point elsewhere if the store is shared.
1402 """)
1403
1404 root = interfaceutil.Attribute(
1405 """Path to the root of the working directory.""")
1406
1407 path = interfaceutil.Attribute(
1408 """Path to the .hg directory.""")
1409
1410 origroot = interfaceutil.Attribute(
1411 """The filesystem path that was used to construct the repo.""")
1412
1413 auditor = interfaceutil.Attribute(
1414 """A pathauditor for the working directory.
1415
1416 This checks if a path refers to a nested repository.
1417
1418 Operates on the filesystem.
1419 """)
1420
1421 nofsauditor = interfaceutil.Attribute(
1422 """A pathauditor for the working directory.
1423
1424 This is like ``auditor`` except it doesn't do filesystem checks.
1425 """)
1426
1427 baseui = interfaceutil.Attribute(
1428 """Original ui instance passed into constructor.""")
1429
1430 ui = interfaceutil.Attribute(
1431 """Main ui instance for this instance.""")
1432
1433 sharedpath = interfaceutil.Attribute(
1434 """Path to the .hg directory of the repo this repo was shared from.""")
1435
1436 store = interfaceutil.Attribute(
1437 """A store instance.""")
1438
1439 spath = interfaceutil.Attribute(
1440 """Path to the store.""")
1441
1442 sjoin = interfaceutil.Attribute(
1443 """Alias to self.store.join.""")
1444
1445 cachevfs = interfaceutil.Attribute(
1446 """A VFS used to access the cache directory.
1447
1448 Typically .hg/cache.
1449 """)
1450
1451 wcachevfs = interfaceutil.Attribute(
1452 """A VFS used to access the cache directory dedicated to working copy
1453
1454 Typically .hg/wcache.
1455 """)
1456
1457 filteredrevcache = interfaceutil.Attribute(
1458 """Holds sets of revisions to be filtered.""")
1459
1460 names = interfaceutil.Attribute(
1461 """A ``namespaces`` instance.""")
1462
1463 def close():
1464 """Close the handle on this repository."""
1465
1466 def peer():
1467 """Obtain an object conforming to the ``peer`` interface."""
1468
1469 def unfiltered():
1470 """Obtain an unfiltered/raw view of this repo."""
1471
1472 def filtered(name, visibilityexceptions=None):
1473 """Obtain a named view of this repository."""
1474
1475 obsstore = interfaceutil.Attribute(
1476 """A store of obsolescence data.""")
1477
1478 changelog = interfaceutil.Attribute(
1479 """A handle on the changelog revlog.""")
1480
1481 manifestlog = interfaceutil.Attribute(
1482 """An instance conforming to the ``imanifestlog`` interface.
1483
1484 Provides access to manifests for the repository.
1485 """)
1486
1487 dirstate = interfaceutil.Attribute(
1488 """Working directory state.""")
1489
1490 narrowpats = interfaceutil.Attribute(
1491 """Matcher patterns for this repository's narrowspec.""")
1492
1493 def narrowmatch(match=None, includeexact=False):
1494 """Obtain a matcher for the narrowspec."""
1495
1496 def setnarrowpats(newincludes, newexcludes):
1497 """Define the narrowspec for this repository."""
1498
1499 def __getitem__(changeid):
1500 """Try to resolve a changectx."""
1501
1502 def __contains__(changeid):
1503 """Whether a changeset exists."""
1504
1505 def __nonzero__():
1506 """Always returns True."""
1507 return True
1508
1509 __bool__ = __nonzero__
1510
1511 def __len__():
1512 """Returns the number of changesets in the repo."""
1513
1514 def __iter__():
1515 """Iterate over revisions in the changelog."""
1516
1517 def revs(expr, *args):
1518 """Evaluate a revset.
1519
1520 Emits revisions.
1521 """
1522
1523 def set(expr, *args):
1524 """Evaluate a revset.
1525
1526 Emits changectx instances.
1527 """
1528
1529 def anyrevs(specs, user=False, localalias=None):
1530 """Find revisions matching one of the given revsets."""
1531
1532 def url():
1533 """Returns a string representing the location of this repo."""
1534
1535 def hook(name, throw=False, **args):
1536 """Call a hook."""
1537
1538 def tags():
1539 """Return a mapping of tag to node."""
1540
1541 def tagtype(tagname):
1542 """Return the type of a given tag."""
1543
1544 def tagslist():
1545 """Return a list of tags ordered by revision."""
1546
1547 def nodetags(node):
1548 """Return the tags associated with a node."""
1549
1550 def nodebookmarks(node):
1551 """Return the list of bookmarks pointing to the specified node."""
1552
1553 def branchmap():
1554 """Return a mapping of branch to heads in that branch."""
1555
1556 def revbranchcache():
1557 pass
1558
1559 def branchtip(branchtip, ignoremissing=False):
1560 """Return the tip node for a given branch."""
1561
1562 def lookup(key):
1563 """Resolve the node for a revision."""
1564
1565 def lookupbranch(key):
1566 """Look up the branch name of the given revision or branch name."""
1567
1568 def known(nodes):
1569 """Determine whether a series of nodes is known.
1570
1571 Returns a list of bools.
1572 """
1573
1574 def local():
1575 """Whether the repository is local."""
1576 return True
1577
1578 def publishing():
1579 """Whether the repository is a publishing repository."""
1580
1581 def cancopy():
1582 pass
1583
1584 def shared():
1585 """The type of shared repository or None."""
1586
1587 def wjoin(f, *insidef):
1588 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1589
1590 def setparents(p1, p2):
1591 """Set the parent nodes of the working directory."""
1592
1593 def filectx(path, changeid=None, fileid=None):
1594 """Obtain a filectx for the given file revision."""
1595
1596 def getcwd():
1597 """Obtain the current working directory from the dirstate."""
1598
1599 def pathto(f, cwd=None):
1600 """Obtain the relative path to a file."""
1601
1602 def adddatafilter(name, fltr):
1603 pass
1604
1605 def wread(filename):
1606 """Read a file from wvfs, using data filters."""
1607
1608 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1609 """Write data to a file in the wvfs, using data filters."""
1610
1611 def wwritedata(filename, data):
1612 """Resolve data for writing to the wvfs, using data filters."""
1613
1614 def currenttransaction():
1615 """Obtain the current transaction instance or None."""
1616
1617 def transaction(desc, report=None):
1618 """Open a new transaction to write to the repository."""
1619
1620 def undofiles():
1621 """Returns a list of (vfs, path) for files to undo transactions."""
1622
1623 def recover():
1624 """Roll back an interrupted transaction."""
1625
1626 def rollback(dryrun=False, force=False):
1627 """Undo the last transaction.
1628
1629 DANGEROUS.
1630 """
1631
1632 def updatecaches(tr=None, full=False):
1633 """Warm repo caches."""
1634
1635 def invalidatecaches():
1636 """Invalidate cached data due to the repository mutating."""
1637
1638 def invalidatevolatilesets():
1639 pass
1640
1641 def invalidatedirstate():
1642 """Invalidate the dirstate."""
1643
1644 def invalidate(clearfilecache=False):
1645 pass
1646
1647 def invalidateall():
1648 pass
1649
1650 def lock(wait=True):
1651 """Lock the repository store and return a lock instance."""
1652
1653 def wlock(wait=True):
1654 """Lock the non-store parts of the repository."""
1655
1656 def currentwlock():
1657 """Return the wlock if it's held or None."""
1658
1659 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1660 pass
1661
1662 def commit(text='', user=None, date=None, match=None, force=False,
1663 editor=False, extra=None):
1664 """Add a new revision to the repository."""
1665
1666 def commitctx(ctx, error=False, origctx=None):
1667 """Commit a commitctx instance to the repository."""
1668
1669 def destroying():
1670 """Inform the repository that nodes are about to be destroyed."""
1671
1672 def destroyed():
1673 """Inform the repository that nodes have been destroyed."""
1674
1675 def status(node1='.', node2=None, match=None, ignored=False,
1676 clean=False, unknown=False, listsubrepos=False):
1677 """Convenience method to call repo[x].status()."""
1678
1679 def addpostdsstatus(ps):
1680 pass
1681
1682 def postdsstatus():
1683 pass
1684
1685 def clearpostdsstatus():
1686 pass
1687
1688 def heads(start=None):
1689 """Obtain list of nodes that are DAG heads."""
1690
1691 def branchheads(branch=None, start=None, closed=False):
1692 pass
1693
1694 def branches(nodes):
1695 pass
1696
1697 def between(pairs):
1698 pass
1699
1700 def checkpush(pushop):
1701 pass
1702
1703 prepushoutgoinghooks = interfaceutil.Attribute(
1704 """util.hooks instance.""")
1705
1706 def pushkey(namespace, key, old, new):
1707 pass
1708
1709 def listkeys(namespace):
1710 pass
1711
1712 def debugwireargs(one, two, three=None, four=None, five=None):
1713 pass
1714
1715 def savecommitmessage(text):
1716 pass
1717
1718 class completelocalrepository(ilocalrepositorymain,
1719 ilocalrepositoryfilestorage):
1720 """Complete interface for a local repository."""
1721
1722 class iwireprotocolcommandcacher(interfaceutil.Interface):
1723 """Represents a caching backend for wire protocol commands.
1724
1725 Wire protocol version 2 supports transparent caching of many commands.
1726 To leverage this caching, servers can activate objects that cache
1727 command responses. Objects handle both cache writing and reading.
1728 This interface defines how that response caching mechanism works.
1729
1730 Wire protocol version 2 commands emit a series of objects that are
1731 serialized and sent to the client. The caching layer exists between
1732 the invocation of the command function and the sending of its output
1733 objects to an output layer.
1734
1735 Instances of this interface represent a binding to a cache that
1736 can serve a response (in place of calling a command function) and/or
1737 write responses to a cache for subsequent use.
1738
1739 When a command request arrives, the following happens with regards
1740 to this interface:
1741
1742 1. The server determines whether the command request is cacheable.
1743 2. If it is, an instance of this interface is spawned.
1744 3. The cacher is activated in a context manager (``__enter__`` is called).
1745 4. A cache *key* for that request is derived. This will call the
1746 instance's ``adjustcachekeystate()`` method so the derivation
1747 can be influenced.
1748 5. The cacher is informed of the derived cache key via a call to
1749 ``setcachekey()``.
1750 6. The cacher's ``lookup()`` method is called to test for presence of
1751 the derived key in the cache.
1752 7. If ``lookup()`` returns a hit, that cached result is used in place
1753 of invoking the command function. ``__exit__`` is called and the instance
1754 is discarded.
1755 8. The command function is invoked.
1756 9. ``onobject()`` is called for each object emitted by the command
1757 function.
1758 10. After the final object is seen, ``onfinished()`` is called.
1759 11. ``__exit__`` is called to signal the end of use of the instance.
1760
1761 Cache *key* derivation can be influenced by the instance.
1762
1763 Cache keys are initially derived by a deterministic representation of
1764 the command request. This includes the command name, arguments, protocol
1765 version, etc. This initial key derivation is performed by CBOR-encoding a
1766 data structure and feeding that output into a hasher.
1767
1768 Instances of this interface can influence this initial key derivation
1769 via ``adjustcachekeystate()``.
1770
1771 The instance is informed of the derived cache key via a call to
1772 ``setcachekey()``. The instance must store the key locally so it can
1773 be consulted on subsequent operations that may require it.
1774
1775 When constructed, the instance has access to a callable that can be used
1776 for encoding response objects. This callable receives as its single
1777 argument an object emitted by a command function. It returns an iterable
1778 of bytes chunks representing the encoded object. Unless the cacher is
1779 caching native Python objects in memory or has a way of reconstructing
1780 the original Python objects, implementations typically call this function
1781 to produce bytes from the output objects and then store those bytes in
1782 the cache. When it comes time to re-emit those bytes, they are wrapped
1783 in a ``wireprototypes.encodedresponse`` instance to tell the output
1784 layer that they are pre-encoded.
1785
1786 When receiving the objects emitted by the command function, instances
1787 can choose what to do with those objects. The simplest thing to do is
1788 re-emit the original objects. They will be forwarded to the output
1789 layer and will be processed as if the cacher did not exist.
1790
1791 Implementations could also choose to not emit objects - instead locally
1792 buffering objects or their encoded representation. They could then emit
1793 a single "coalesced" object when ``onfinished()`` is called. In
1794 this way, the implementation would function as a filtering layer of
1795 sorts.
1796
1797 When caching objects, typically the encoded form of the object will
1798 be stored. Keep in mind that if the original object is forwarded to
1799 the output layer, it will need to be encoded there as well. For large
1800 output, this redundant encoding could add overhead. Implementations
1801 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1802 instances to avoid this overhead.
1803 """
1804 def __enter__():
1805 """Marks the instance as active.
1806
1807 Should return self.
1808 """
1809
1810 def __exit__(exctype, excvalue, exctb):
1811 """Called when cacher is no longer used.
1812
1813 This can be used by implementations to perform cleanup actions (e.g.
1814 disconnecting network sockets, aborting a partially cached response.
1815 """
1816
1817 def adjustcachekeystate(state):
1818 """Influences cache key derivation by adjusting state to derive key.
1819
1820 A dict defining the state used to derive the cache key is passed.
1821
1822 Implementations can modify this dict to record additional state that
1823 is wanted to influence key derivation.
1824
1825 Implementations are *highly* encouraged to not modify or delete
1826 existing keys.
1827 """
1828
1829 def setcachekey(key):
1830 """Record the derived cache key for this request.
1831
1832 Instances may mutate the key for internal usage, as desired. e.g.
1833 instances may wish to prepend the repo name, introduce path
1834 components for filesystem or URL addressing, etc. Behavior is up to
1835 the cache.
1836
1837 Returns a bool indicating if the request is cacheable by this
1838 instance.
1839 """
1840
1841 def lookup():
1842 """Attempt to resolve an entry in the cache.
1843
1844 The instance is instructed to look for the cache key that it was
1845 informed about via the call to ``setcachekey()``.
1846
1847 If there's no cache hit or the cacher doesn't wish to use the cached
1848 entry, ``None`` should be returned.
1849
1850 Else, a dict defining the cached result should be returned. The
1851 dict may have the following keys:
1852
1853 objs
1854 An iterable of objects that should be sent to the client. That
1855 iterable of objects is expected to be what the command function
1856 would return if invoked or an equivalent representation thereof.
1857 """
1858
1859 def onobject(obj):
1860 """Called when a new object is emitted from the command function.
1861
1862 Receives as its argument the object that was emitted from the
1863 command function.
1864
1865 This method returns an iterator of objects to forward to the output
1866 layer. The easiest implementation is a generator that just
1867 ``yield obj``.
1868 """
1869
1870 def onfinished():
1871 """Called after all objects have been emitted from the command function.
1872
1873 Implementations should return an iterator of objects to forward to
1874 the output layer.
1875
1876 This method can be a generator.
1877 """