Mercurial > hg
view hgext/remotefilelog/metadatastore.py @ 42050:03f6480bfdda
unshelve: disable unshelve during merge (issue5123)
As stated in the issue5123, unshelve can destroy the second parent of
the context when tried to unshelve with an uncommitted merge. This
patch makes unshelve to abort when called with an uncommitted merge.
See how shelve.mergefiles works. Commit structure looks like this:
```
... -> pctx -> tmpwctx -> shelvectx
/
/
second
merge parent
pctx = parent before merging working context(first merge parent)
tmpwctx = commited working directory after merge(with two parents)
shelvectx = shelved context
```
shelve.mergefiles first updates to pctx then it reverts shelvectx to pctx with:
```
cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
*pathtofiles(repo, files),
**{'no_backup': True})
```
Reverting tmpwctx files that were merged from second parent to pctx makes them
added because they are not in pctx.
Changing this revert operation is crucial to restore parents after unshelve.
This is a complicated issue as this is not fixing a regression. Thus, for the
time being, unshelve during an uncommitted merge can be aborted.
(Details taken from http://mercurial.808500.n3.nabble.com/PATCH-V3-shelve-restore-parents-after-unshelve-issue5123-tt4036858.html#a4037408)
Differential Revision: https://phab.mercurial-scm.org/D6169
author | Navaneeth Suresh <navaneeths1998@gmail.com> |
---|---|
date | Mon, 25 Mar 2019 12:33:41 +0530 |
parents | 13d4ad8d7801 |
children | 2372284d9457 |
line wrap: on
line source
from __future__ import absolute_import from mercurial.node import hex, nullid from . import ( basestore, shallowutil, ) class unionmetadatastore(basestore.baseunionstore): def __init__(self, *args, **kwargs): super(unionmetadatastore, self).__init__(*args, **kwargs) self.stores = args self.writestore = kwargs.get(r'writestore') # If allowincomplete==True then the union store can return partial # ancestor lists, otherwise it will throw a KeyError if a full # history can't be found. self.allowincomplete = kwargs.get(r'allowincomplete', False) def getancestors(self, name, node, known=None): """Returns as many ancestors as we're aware of. return value: { node: (p1, p2, linknode, copyfrom), ... } """ if known is None: known = set() if node in known: return [] ancestors = {} def traverse(curname, curnode): # TODO: this algorithm has the potential to traverse parts of # history twice. Ex: with A->B->C->F and A->B->D->F, both D and C # may be queued as missing, then B and A are traversed for both. queue = [(curname, curnode)] missing = [] seen = set() while queue: name, node = queue.pop() if (name, node) in seen: continue seen.add((name, node)) value = ancestors.get(node) if not value: missing.append((name, node)) continue p1, p2, linknode, copyfrom = value if p1 != nullid and p1 not in known: queue.append((copyfrom or curname, p1)) if p2 != nullid and p2 not in known: queue.append((curname, p2)) return missing missing = [(name, node)] while missing: curname, curnode = missing.pop() try: ancestors.update(self._getpartialancestors(curname, curnode, known=known)) newmissing = traverse(curname, curnode) missing.extend(newmissing) except KeyError: # If we allow incomplete histories, don't throw. if not self.allowincomplete: raise # If the requested name+node doesn't exist, always throw. if (curname, curnode) == (name, node): raise # TODO: ancestors should probably be (name, node) -> (value) return ancestors @basestore.baseunionstore.retriable def _getpartialancestors(self, name, node, known=None): for store in self.stores: try: return store.getancestors(name, node, known=known) except KeyError: pass raise KeyError((name, hex(node))) @basestore.baseunionstore.retriable def getnodeinfo(self, name, node): for store in self.stores: try: return store.getnodeinfo(name, node) except KeyError: pass raise KeyError((name, hex(node))) def add(self, name, node, data): raise RuntimeError("cannot add content only to remotefilelog " "contentstore") def getmissing(self, keys): missing = keys for store in self.stores: if missing: missing = store.getmissing(missing) return missing def markledger(self, ledger, options=None): for store in self.stores: store.markledger(ledger, options) def getmetrics(self): metrics = [s.getmetrics() for s in self.stores] return shallowutil.sumdicts(*metrics) class remotefilelogmetadatastore(basestore.basestore): def getancestors(self, name, node, known=None): """Returns as many ancestors as we're aware of. return value: { node: (p1, p2, linknode, copyfrom), ... } """ data = self._getdata(name, node) ancestors = shallowutil.ancestormap(data) return ancestors def getnodeinfo(self, name, node): return self.getancestors(name, node)[node] def add(self, name, node, parents, linknode): raise RuntimeError("cannot add metadata only to remotefilelog " "metadatastore") class remotemetadatastore(object): def __init__(self, ui, fileservice, shared): self._fileservice = fileservice self._shared = shared def getancestors(self, name, node, known=None): self._fileservice.prefetch([(name, hex(node))], force=True, fetchdata=False, fetchhistory=True) return self._shared.getancestors(name, node, known=known) def getnodeinfo(self, name, node): return self.getancestors(name, node)[node] def add(self, name, node, data): raise RuntimeError("cannot add to a remote store") def getmissing(self, keys): return keys def markledger(self, ledger, options=None): pass