Mercurial > evolve
changeset 343:6b92f8d5ae58
adapt for new mercurial
author | Pierre-Yves.David@ens-lyon.org |
---|---|
date | Tue, 03 Jul 2012 11:35:31 +0200 |
parents | aab826129142 |
children | f4302a2079f4 |
files | docs/tutorials/tutorial.t hgext/obsolete.py tests/test-obsolete.t tests/test-qsync.t |
diffstat | 4 files changed, 61 insertions(+), 297 deletions(-) [+] |
line wrap: on
line diff
--- a/docs/tutorials/tutorial.t Tue Jul 03 14:18:33 2012 +0200 +++ b/docs/tutorials/tutorial.t Tue Jul 03 11:35:31 2012 +0200 @@ -223,7 +223,7 @@ adding manifests adding file changes added 1 changesets with 1 changes to 1 files (+1 heads) - (run 'hg update' to get a working copy) + (run 'hg heads .' to see heads, 'hg merge' to merge) I now have a new heads. Note that this remote head is immutable
--- a/hgext/obsolete.py Tue Jul 03 14:18:33 2012 +0200 +++ b/hgext/obsolete.py Tue Jul 03 11:35:31 2012 +0200 @@ -120,11 +120,10 @@ ### Patch changectx ############################# -def obsolete(ctx): - """is the changeset obsolete by other""" - if ctx.node()is None: - return False - return bool(ctx._repo.obsoletedby(ctx.node())) and ctx.phase() +# core one is buggy +def obsolete(self): + """True if the changeset is obsolete""" + return self.node() in self._repo.obsstore.precursors and self.phase() context.changectx.obsolete = obsolete @@ -410,34 +409,6 @@ except KeyError: pass # rebase not found -# Pushkey mechanism for mutable -######################################### - -def listmarkers(repo): - """List markers over pushkey""" - if not repo.obsstore: - return {} - data = repo.obsstore._writemarkers() - return {'dump': base85.b85encode(data)} - -def pushmarker(repo, key, old, new): - """Push markers over pushkey""" - if key != 'dump': - repo.ui.warn(_('unknown key: %r') % key) - return 0 - if old: - repo.ui.warn(_('unexpected old value') % key) - return 0 - data = base85.b85decode(new) - lock = repo.lock() - try: - repo.obsstore.mergemarkers(data) - return 1 - finally: - lock.release() - -pushkey.register('obsolete', pushmarker, listmarkers) - ### Discovery wrapping ############################# @@ -550,20 +521,6 @@ cmdtable = {} command = cmdutil.command(cmdtable) -@command('debugobsolete', [], _('SUBJECT OBJECT')) -def cmddebugobsolete(ui, repo, subject, object): - """add an obsolete relation between two nodes - - The subject is expected to be a newer version of the object. - """ - lock = repo.lock() - try: - sub = repo[subject] - obj = repo[object] - repo.addobsolete(sub.node(), obj.node()) - finally: - lock.release() - return 0 @command('debugconvertobsolete', [], '') def cmddebugconvertobsolete(ui, repo): @@ -646,6 +603,15 @@ ui.warn(_('Working directory parent is obsolete\n')) return res +def wrapmaycreateobsmarker(origfn, ui, repo, *args, **opts): + lock = repo.lock() + try: + res = origfn(ui, repo, *args, **opts) + repo._turn_extinct_secret() + finally: + lock.release() + return res + def noextinctsvisibleheads(orig, repo): repo._turn_extinct_secret() return orig(repo) @@ -657,11 +623,8 @@ lock = repo.lock() try: meta = { - 'subjects': [new], - 'object': oldnode, - 'date': util.makedate(), + 'date': '%i %i' % util.makedate(), 'user': ui.username(), - 'reason': 'commit --amend', } repo.obsstore.create(oldnode, [new], 0, meta) repo._clearobsoletecache() @@ -673,6 +636,7 @@ def uisetup(ui): extensions.wrapcommand(commands.table, "update", wrapmayobsoletewc) extensions.wrapcommand(commands.table, "pull", wrapmayobsoletewc) + extensions.wrapcommand(commands.table, "debugobsolete", wrapmaycreateobsmarker) if util.safehasattr(cmdutil, 'amend'): extensions.wrapfunction(cmdutil, 'amend', wrapcmdutilamend) extensions.wrapfunction(discovery, 'findcommonoutgoing', wrapfindcommonoutgoing) @@ -763,158 +727,6 @@ a.update('\0') return a.digest() -# mercurial backport - -def encodemeta(meta): - """Return encoded metadata string to string mapping. - - Assume no ':' in key and no '\0' in both key and value.""" - for key, value in meta.iteritems(): - if ':' in key or '\0' in key: - raise ValueError("':' and '\0' are forbidden in metadata key'") - if '\0' in value: - raise ValueError("':' are forbidden in metadata value'") - return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) - -def decodemeta(data): - """Return string to string dictionary from encoded version.""" - d = {} - for l in data.split('\0'): - if l: - key, value = l.split(':') - d[key] = value - return d - -# data used for parsing and writing -_fmversion = 0 -_fmfixed = '>BIB20s' -_fmnode = '20s' -_fmfsize = struct.calcsize(_fmfixed) -_fnodesize = struct.calcsize(_fmnode) - -def _readmarkers(data): - """Read and enumerate markers from raw data""" - off = 0 - diskversion = _unpack('>B', data[off:off + 1])[0] - off += 1 - if diskversion != _fmversion: - raise util.Abort(_('parsing obsolete marker: unknown version %r') - % diskversion) - - # Loop on markers - l = len(data) - while off + _fmfsize <= l: - # read fixed part - cur = data[off:off + _fmfsize] - off += _fmfsize - nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) - # read replacement - sucs = () - if nbsuc: - s = (_fnodesize * nbsuc) - cur = data[off:off + s] - sucs = _unpack(_fmnode * nbsuc, cur) - off += s - # read metadata - # (metadata will be decoded on demand) - metadata = data[off:off + mdsize] - if len(metadata) != mdsize: - raise util.Abort(_('parsing obsolete marker: metadata is too ' - 'short, %d bytes expected, got %d') - % (len(metadata), mdsize)) - off += mdsize - yield (pre, sucs, flags, metadata) - -class obsstore(object): - """Store obsolete markers - - Markers can be accessed with two mappings: - - precursors: old -> set(new) - - successors: new -> set(old) - """ - - def __init__(self): - self._all = [] - # new markers to serialize - self._new = [] - self.precursors = {} - self.successors = {} - - def __iter__(self): - return iter(self._all) - - def __nonzero__(self): - return bool(self._all) - - def create(self, prec, succs=(), flag=0, metadata=None): - """obsolete: add a new obsolete marker - - * ensuring it is hashable - * check mandatory metadata - * encode metadata - """ - if metadata is None: - metadata = {} - if len(prec) != 20: - raise ValueError(prec) - for succ in succs: - if len(succ) != 20: - raise ValueError(prec) - marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) - self.add(marker) - - def add(self, marker): - """Add a new marker to the store - - This marker still needs to be written to disk""" - self._new.append(marker) - self._load(marker) - - def loadmarkers(self, data): - """Load all markers in data, mark them as known.""" - for marker in _readmarkers(data): - self._load(marker) - - def mergemarkers(self, data): - other = set(_readmarkers(data)) - local = set(self._all) - new = other - local - for marker in new: - self.add(marker) - - def flushmarkers(self, stream): - """Write all markers to a stream - - After this operation, "new" markers are considered "known".""" - self._writemarkers(stream) - self._new[:] = [] - - def _load(self, marker): - self._all.append(marker) - pre, sucs = marker[:2] - self.precursors.setdefault(pre, set()).add(marker) - for suc in sucs: - self.successors.setdefault(suc, set()).add(marker) - - def _writemarkers(self, stream=None): - # Kept separate from flushmarkers(), it will be reused for - # markers exchange. - if stream is None: - final = [] - w = final.append - else: - w = stream.write - w(_pack('>B', _fmversion)) - for marker in self._all: - pre, sucs, flags, metadata = marker - nbsuc = len(sucs) - format = _fmfixed + (_fmnode * nbsuc) - data = [nbsuc, len(metadata), flags, pre] - data.extend(sucs) - w(_pack(format, *data)) - w(metadata) - if stream is None: - return ''.join(final) ### repo subclassing @@ -950,31 +762,19 @@ """return the set of node that <node> make obsolete (sub)""" return set(marker[0] for marker in self.obsstore.successors.get(node, [])) - @storecache('obsstore') - def obsstore(self): - if not getattr(self, '_importoldobsolete', False): - data = repo.opener.tryread('obsolete-relations') - if not data: - data = repo.sopener.tryread('obsoletemarkers') - if data: - raise util.Abort('old format of obsolete marker detected!\n' - 'run `hg debugconvertobsolete` once.') - store = obsstore() - data = self.sopener.tryread('obsstore') - if data: - store.loadmarkers(data) - return store - @util.propertycache def _obsoleteset(self): """the set of obsolete revision""" + data = repo.opener.tryread('obsolete-relations') + if not data: + data = repo.sopener.tryread('obsoletemarkers') + if data: + raise util.Abort('old format of obsolete marker detected!\n' + 'run `hg debugconvertobsolete` once.') obs = set() nm = self.changelog.nodemap - for obj in self.obsstore.precursors: - try: # /!\api change in Hg 2.2 (e8d37b78acfb22ae2c1fb126c2)/!\ - rev = nm.get(obj) - except TypeError: #XXX to remove while breaking Hg 2.1 support - rev = nm.get(obj, None) + for prec in self.obsstore.precursors: + rev = nm.get(prec) if rev is not None: obs.add(rev) return obs @@ -1034,9 +834,8 @@ lock = self.lock() try: meta = { - 'date': util.makedate(), + 'date': '%i %i' % util.makedate(), 'user': ui.username(), - 'reason': 'unknown', } subs = (sub == nullid) and [] or [sub] mid = self.obsstore.create(obj, subs, 0, meta) @@ -1051,7 +850,7 @@ # Assume oldnodes are all descendants of a single rev rootrevs = self.revs('roots(%ln)', oldnodes) assert len(rootrevs) == 1, rootrevs - rootnode = self[rootrevs[0]].node() + #rootnode = self[rootrevs[0]].node() for n in oldnodes: self.addobsolete(newnode, n) @@ -1064,27 +863,6 @@ expobs = [c.node() for c in repo.set(query)] phases.retractboundary(repo, 2, expobs) - ### Disk IO - - def lock(self, *args, **kwargs): - l = olock(*args, **kwargs) - if not getattr(l.releasefn, 'obspatched', False): - oreleasefn = l.releasefn - def releasefn(*args, **kwargs): - if 'obsstore' in vars(self) and self.obsstore._new: - f = self.sopener('obsstore', 'wb', atomictemp=True) - try: - self.obsstore.flushmarkers(f) - f.close() - except: # re-raises - f.discard() - raise - oreleasefn(*args, **kwargs) - releasefn.obspatched = True - l.releasefn = releasefn - return l - - ### pull // push support def pull(self, remote, *args, **kwargs): @@ -1092,13 +870,8 @@ l = repo.lock() try: result = opull(remote, *args, **kwargs) - remoteobs = remote.listkeys('obsolete') - if 'dump' in remoteobs: - data = base85.b85decode(remoteobs['dump']) - self.obsstore.mergemarkers(data) - self._clearobsoletecache() - self._turn_extinct_secret() - return result + self._turn_extinct_secret() + return result finally: l.release() @@ -1106,46 +879,26 @@ """wrapper around pull that pull obsolete relation""" self._turn_extinct_secret() result = opush(remote, *args, **opts) - if 'obsolete' in self.listkeys('namespaces') and self.obsstore: - data = self.obsstore._writemarkers() - r = remote.pushkey('obsolete', 'dump', '', - base85.b85encode(data)) - if not r: - self.ui.warn(_('failed to push obsolete markers!\n')) self._turn_extinct_secret() - return result ### rollback support # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\ - if util.safehasattr(repo, '_journalfiles'): # Hg 2.2 - def _journalfiles(self): - return o_journalfiles() + (self.sjoin('journal.obsstore'),) + def _journalfiles(self): + return o_journalfiles() + (self.sjoin('journal.obsstore'),) - def _writejournal(self, desc): - """wrapped version of _writejournal that save obsolete data""" - o_writejournal(desc) - filename = 'obsstore' - filepath = self.sjoin(filename) - if os.path.exists(filepath): - journalname = 'journal.' + filename - journalpath = self.sjoin(journalname) - util.copyfile(filepath, journalpath) + def _writejournal(self, desc): + """wrapped version of _writejournal that save obsolete data""" + o_writejournal(desc) + filename = 'obsstore' + filepath = self.sjoin(filename) + if os.path.exists(filepath): + journalname = 'journal.' + filename + journalpath = self.sjoin(journalname) + util.copyfile(filepath, journalpath) - else: # XXX removing this bloc will break Hg 2.1 support - def _writejournal(self, desc): - """wrapped version of _writejournal that save obsolete data""" - entries = list(o_writejournal(desc)) - filename = 'obsstore' - filepath = self.sjoin(filename) - if os.path.exists(filepath): - journalname = 'journal.' + filename - journalpath = self.sjoin(journalname) - util.copyfile(filepath, journalpath) - entries.append(journalpath) - return tuple(entries) def _rollback(self, dryrun, force): """wrapped version of _rollback that restore obsolete data"""
--- a/tests/test-obsolete.t Tue Jul 03 14:18:33 2012 +0200 +++ b/tests/test-obsolete.t Tue Jul 03 11:35:31 2012 +0200 @@ -15,6 +15,9 @@ > hg add "$1" > hg ci -m "add $1" > } + $ getid() { + > hg id --debug -ir "$1" + > } $ alias qlog="hg log --template='{rev}\n- {node|short}\n'" $ hg init local @@ -27,7 +30,14 @@ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit obsol_c # 3 created new head - $ hg debugobsolete 3 2 + $ getid 2 + 4538525df7e2b9f09423636c61ef63a4cb872a2d + $ getid 3 + 0d3f46688ccc6e756c7e96cf64c391c411309597 + $ hg debugobsolete 4538525df7e2b9f09423636c61ef63a4cb872a2d 0d3f46688ccc6e756c7e96cf64c391c411309597 + $ hg debugobsolete + 4538525df7e2b9f09423636c61ef63a4cb872a2d 0d3f46688ccc6e756c7e96cf64c391c411309597 0 {'date': '', 'user': 'test'} + Test that obsolete changeset are hidden @@ -82,7 +92,7 @@ $ hg up 1 -q $ mkcommit "obsol_c'" # 4 (on 1) created new head - $ hg debugobsolete 4 3 + $ hg debugobsolete `getid 3` `getid 4` $ qlog 4 - 725c380fe99b @@ -180,7 +190,7 @@ Working directory parent is obsolete $ mkcommit obsol_d # 6 created new head - $ hg debugobsolete 6 5 + $ hg debugobsolete `getid 5` `getid 6` $ qlog 6 - 95de7fc6918d @@ -235,7 +245,7 @@ Working directory parent is obsolete $ mkcommit "obsol_d'" # 7 created new head - $ hg debugobsolete 7 6 + $ hg debugobsolete `getid 6` `getid 7` $ hg pull -R ../other-new . pulling from . searching for changes @@ -318,7 +328,7 @@ Working directory parent is obsolete $ mkcommit "obsol_d''" created new head - $ hg debugobsolete 8 7 + $ hg debugobsolete `getid 7` `getid 8` $ cd ../other-new $ hg up -q 3 $ hg pull ../local/ @@ -375,8 +385,10 @@ created new head $ hg id -n 9 - $ hg debugobsolete 9 0 - 83b5778897ad try to obsolete immutable changeset 1f0dee641bb7 + $ hg debugobsolete `getid 0` `getid 9` +83b5778897ad try to obsolete immutable changeset 1f0dee641bb7 +# at core level the warning is not issued +# this is now a big issue now that we have latecomer warning $ qlog -r 'obsolete()' 3 - 0d3f46688ccc @@ -396,7 +408,7 @@ 0 - 1f0dee641bb7 - $ hg debugobsolete null 9 #kill + $ hg debugobsolete `getid 9` #kill $ hg up null -q # to be not based on 9 anymore $ qlog 8 @@ -473,7 +485,7 @@ $ hg up -q 10 $ mkcommit "obsol_d'''" created new head - $ hg debugobsolete 12 11 + $ hg debugobsolete `getid 11` `getid 12` $ hg push ../other-new --traceback pushing to ../other-new searching for changes @@ -573,7 +585,7 @@ 159dfc9fa5d3 9468a5f5d8b2 1f0dee641bb7 83b5778897ad 4538525df7e2 0d3f46688ccc - 83b5778897ad 000000000000 + 83b5778897ad 909a0fb57e5d 159dfc9fa5d3 9468a5f5d8b2 6db5e282cb91 95de7fc6918d 909a0fb57e5d
--- a/tests/test-qsync.t Tue Jul 03 14:18:33 2012 +0200 +++ b/tests/test-qsync.t Tue Jul 03 11:35:31 2012 +0200 @@ -182,7 +182,6 @@ pulling from ../local2 searching for changes no changes found - (run 'hg update' to get a working copy) $ hg pull --mq ../local2/.hg/patches pulling from ../local2/.hg/patches searching for changes