Mercurial > evolve
comparison hgext/obsolete.py @ 499:a9c27df23129 stable
Prepare 0.1.0 by merging default into stable
stable is now compatible with 2.3 only.
author | Pierre-Yves David <pierre-yves.david@logilab.fr> |
---|---|
date | Fri, 24 Aug 2012 11:53:55 +0200 |
parents | 6989d8fe4ed2 |
children | 9825c7da5b54 |
comparison
equal
deleted
inserted
replaced
476:f17a0f801e0b | 499:a9c27df23129 |
---|---|
3 # Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | 3 # Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
4 # Logilab SA <contact@logilab.fr> | 4 # Logilab SA <contact@logilab.fr> |
5 # | 5 # |
6 # This software may be used and distributed according to the terms of the | 6 # This software may be used and distributed according to the terms of the |
7 # GNU General Public License version 2 or any later version. | 7 # GNU General Public License version 2 or any later version. |
8 """Introduce the Obsolete concept to mercurial | 8 """Deprecated extension that formely introduces "Changeset Obsolescence". |
9 | 9 |
10 General concept | 10 This concept is now partially in Mercurial core (starting with mercurial 2.3). The remaining logic have been grouped with the evolve extension. |
11 =============== | |
12 | 11 |
13 This extension introduces the *obsolete* concept. It adds a new *obsolete* | 12 Some code cemains in this extensions to detect and convert prehistoric format of obsolete marker than early user may have create. Keep it enabled if you were such user. |
14 relation between two changesets. A relation ``<changeset B> obsolete <changeset | 13 """ |
15 A>`` is set to denote that ``<changeset B>`` is new version of ``<changeset | |
16 A>``. | |
17 | 14 |
18 The *obsolete* relation act as a **perpendicular history** to the standard | 15 from mercurial import util |
19 changeset history. Standard changeset history versions files. The *obsolete* | |
20 relation versions changesets. | |
21 | 16 |
22 :obsolete: a changeset that has been replaced by another one. | 17 try: |
23 :unstable: a changeset that is not obsolete but has an obsolete ancestor. | 18 from mercurial import obsolete |
24 :suspended: an obsolete changeset with unstable descendant. | 19 if not obsolete._enabled: |
25 :extinct: an obsolete changeset without unstable descendant. | 20 obsolete._enabled = True |
26 (subject to garbage collection) | 21 except ImportError: |
22 raise util.Abort('Obsolete extension requires Mercurial 2.3 (or later)') | |
27 | 23 |
28 Another name for unstable could be out of sync. | 24 import sys |
25 import json | |
26 | |
27 from mercurial import cmdutil | |
28 from mercurial import error | |
29 from mercurial.node import bin, nullid | |
29 | 30 |
30 | 31 |
31 Usage and Feature | 32 ##################################################################### |
32 ================= | 33 ### Older format management ### |
34 ##################################################################### | |
33 | 35 |
34 Display and Exchange | 36 # Code related to detection and management of older legacy format never |
35 -------------------- | 37 # handled by core |
36 | |
37 obsolete changesets are hidden. (except if they have non obsolete changeset) | |
38 | |
39 obsolete changesets are not exchanged. This will probably change later but it | |
40 was the simpler solution for now. | |
41 | |
42 New commands | |
43 ------------ | |
44 | |
45 Note that rebased changesets are now marked obsolete instead of being stripped. | |
46 | |
47 Context object | |
48 -------------- | |
49 | |
50 Context gains a ``obsolete`` method that will return True if a changeset is | |
51 obsolete False otherwise. | |
52 | |
53 revset | |
54 ------ | |
55 | |
56 Add an ``obsolete()`` entry. | |
57 | |
58 repo extension | |
59 -------------- | |
60 | |
61 To Do | |
62 ~~~~~ | |
63 | |
64 - refuse to obsolete published changesets | |
65 | |
66 - handle split | |
67 | |
68 - handle conflict | |
69 | |
70 - handle unstable // out of sync | |
71 | |
72 """ | |
73 | |
74 import os | |
75 try: | |
76 from cStringIO import StringIO | |
77 except ImportError: | |
78 from StringIO import StringIO | |
79 | |
80 from mercurial.i18n import _ | |
81 | |
82 import base64 | |
83 import json | |
84 | |
85 import struct | |
86 from mercurial import util, base85 | |
87 | |
88 _pack = struct.pack | |
89 _unpack = struct.unpack | |
90 | |
91 from mercurial import util | |
92 from mercurial import context | |
93 from mercurial import revset | |
94 from mercurial import scmutil | |
95 from mercurial import extensions | |
96 from mercurial import pushkey | |
97 from mercurial import discovery | |
98 from mercurial import error | |
99 from mercurial import commands | |
100 from mercurial import changelog | |
101 from mercurial import phases | |
102 from mercurial.node import hex, bin, short, nullid | |
103 from mercurial.lock import release | |
104 from mercurial import localrepo | |
105 from mercurial import cmdutil | |
106 from mercurial import templatekw | |
107 | |
108 try: | |
109 from mercurial.localrepo import storecache | |
110 storecache('babar') # to trigger import | |
111 except (TypeError, ImportError): | |
112 def storecache(*args): | |
113 return scmutil.filecache(*args, instore=True) | |
114 | 38 |
115 | 39 |
116 ### Patch changectx | 40 def reposetup(ui, repo): |
117 ############################# | 41 """Detect that a repo still contains some old obsolete format |
118 | |
119 def obsolete(ctx): | |
120 """is the changeset obsolete by other""" | |
121 if ctx.node()is None: | |
122 return False | |
123 return bool(ctx._repo.obsoletedby(ctx.node())) and ctx.phase() | |
124 | |
125 context.changectx.obsolete = obsolete | |
126 | |
127 def unstable(ctx): | |
128 """is the changeset unstable (have obsolete ancestor)""" | |
129 if ctx.node() is None: | |
130 return False | |
131 return ctx.rev() in ctx._repo._unstableset | |
132 | |
133 context.changectx.unstable = unstable | |
134 | |
135 def extinct(ctx): | |
136 """is the changeset extinct by other""" | |
137 if ctx.node() is None: | |
138 return False | |
139 return ctx.rev() in ctx._repo._extinctset | |
140 | |
141 context.changectx.extinct = extinct | |
142 | |
143 def latecomer(ctx): | |
144 """is the changeset latecomer (Try to succeed to public change)""" | |
145 if ctx.node() is None: | |
146 return False | |
147 return ctx.rev() in ctx._repo._latecomerset | |
148 | |
149 context.changectx.latecomer = latecomer | |
150 | |
151 def conflicting(ctx): | |
152 """is the changeset conflicting (Try to succeed to public change)""" | |
153 if ctx.node() is None: | |
154 return False | |
155 return ctx.rev() in ctx._repo._conflictingset | |
156 | |
157 context.changectx.conflicting = conflicting | |
158 | |
159 | |
160 ### revset | |
161 ############################# | |
162 | |
163 def revsethidden(repo, subset, x): | |
164 """hidden changesets""" | |
165 args = revset.getargs(x, 0, 0, 'hidden takes no argument') | |
166 return [r for r in subset if r in repo.changelog.hiddenrevs] | |
167 | |
168 def revsetobsolete(repo, subset, x): | |
169 """obsolete changesets""" | |
170 args = revset.getargs(x, 0, 0, 'obsolete takes no argument') | |
171 return [r for r in subset if r in repo._obsoleteset and repo._phasecache.phase(repo, r) > 0] | |
172 | |
173 # XXX Backward compatibility, to be removed once stabilized | |
174 if '_phasecache' not in vars(localrepo.localrepository): # new api | |
175 def revsetobsolete(repo, subset, x): | |
176 """obsolete changesets""" | |
177 args = revset.getargs(x, 0, 0, 'obsolete takes no argument') | |
178 return [r for r in subset if r in repo._obsoleteset and repo._phaserev[r] > 0] | |
179 | |
180 def revsetunstable(repo, subset, x): | |
181 """non obsolete changesets descendant of obsolete one""" | |
182 args = revset.getargs(x, 0, 0, 'unstable takes no arguments') | |
183 return [r for r in subset if r in repo._unstableset] | |
184 | |
185 def revsetsuspended(repo, subset, x): | |
186 """obsolete changesets with non obsolete descendants""" | |
187 args = revset.getargs(x, 0, 0, 'suspended takes no arguments') | |
188 return [r for r in subset if r in repo._suspendedset] | |
189 | |
190 def revsetextinct(repo, subset, x): | |
191 """obsolete changesets without obsolete descendants""" | |
192 args = revset.getargs(x, 0, 0, 'extinct takes no arguments') | |
193 return [r for r in subset if r in repo._extinctset] | |
194 | |
195 def revsetlatecomer(repo, subset, x): | |
196 """latecomer, Try to succeed to public change""" | |
197 args = revset.getargs(x, 0, 0, 'latecomer takes no arguments') | |
198 return [r for r in subset if r in repo._latecomerset] | |
199 | |
200 def revsetconflicting(repo, subset, x): | |
201 """conflicting, Try to succeed to public change""" | |
202 args = revset.getargs(x, 0, 0, 'conflicting takes no arguments') | |
203 return [r for r in subset if r in repo._conflictingset] | |
204 | |
205 def _precursors(repo, s): | |
206 """Precursor of a changeset""" | |
207 cs = set() | |
208 nm = repo.changelog.nodemap | |
209 markerbysubj = repo.obsstore.successors | |
210 for r in s: | |
211 for p in markerbysubj.get(repo[r].node(), ()): | |
212 pr = nm.get(p[0]) | |
213 if pr is not None: | |
214 cs.add(pr) | |
215 return cs | |
216 | |
217 def revsetprecursors(repo, subset, x): | |
218 """precursors of a subset""" | |
219 s = revset.getset(repo, range(len(repo)), x) | |
220 cs = _precursors(repo, s) | |
221 return [r for r in subset if r in cs] | |
222 | |
223 def _allprecursors(repo, s): # XXX we need a better naming | |
224 """transitive precursors of a subset""" | |
225 toproceed = [repo[r].node() for r in s] | |
226 seen = set() | |
227 allsubjects = repo.obsstore.successors | |
228 while toproceed: | |
229 nc = toproceed.pop() | |
230 for mark in allsubjects.get(nc, ()): | |
231 np = mark[0] | |
232 if np not in seen: | |
233 seen.add(np) | |
234 toproceed.append(np) | |
235 nm = repo.changelog.nodemap | |
236 cs = set() | |
237 for p in seen: | |
238 pr = nm.get(p) | |
239 if pr is not None: | |
240 cs.add(pr) | |
241 return cs | |
242 | |
243 def revsetallprecursors(repo, subset, x): | |
244 """obsolete parents""" | |
245 s = revset.getset(repo, range(len(repo)), x) | |
246 cs = _allprecursors(repo, s) | |
247 return [r for r in subset if r in cs] | |
248 | |
249 def _successors(repo, s): | |
250 """Successors of a changeset""" | |
251 cs = set() | |
252 nm = repo.changelog.nodemap | |
253 markerbyobj = repo.obsstore.precursors | |
254 for r in s: | |
255 for p in markerbyobj.get(repo[r].node(), ()): | |
256 for sub in p[1]: | |
257 sr = nm.get(sub) | |
258 if sr is not None: | |
259 cs.add(sr) | |
260 return cs | |
261 | |
262 def revsetsuccessors(repo, subset, x): | |
263 """successors of a subset""" | |
264 s = revset.getset(repo, range(len(repo)), x) | |
265 cs = _successors(repo, s) | |
266 return [r for r in subset if r in cs] | |
267 | |
268 def _allsuccessors(repo, s): # XXX we need a better naming | |
269 """transitive successors of a subset""" | |
270 toproceed = [repo[r].node() for r in s] | |
271 seen = set() | |
272 allobjects = repo.obsstore.precursors | |
273 while toproceed: | |
274 nc = toproceed.pop() | |
275 for mark in allobjects.get(nc, ()): | |
276 for sub in mark[1]: | |
277 if sub == nullid: | |
278 continue # should not be here! | |
279 if sub not in seen: | |
280 seen.add(sub) | |
281 toproceed.append(sub) | |
282 nm = repo.changelog.nodemap | |
283 cs = set() | |
284 for s in seen: | |
285 sr = nm.get(s) | |
286 if sr is not None: | |
287 cs.add(sr) | |
288 return cs | |
289 | |
290 def revsetallsuccessors(repo, subset, x): | |
291 """obsolete parents""" | |
292 s = revset.getset(repo, range(len(repo)), x) | |
293 cs = _allsuccessors(repo, s) | |
294 return [r for r in subset if r in cs] | |
295 | |
296 | |
297 ### template keywords | |
298 ##################### | |
299 | |
300 def obsoletekw(repo, ctx, templ, **args): | |
301 """:obsolete: String. The obsolescence level of the node, could be | |
302 ``stable``, ``unstable``, ``suspended`` or ``extinct``. | |
303 """ | 42 """ |
304 rev = ctx.rev() | 43 if not repo.local(): |
305 if rev in repo._extinctset: | 44 return |
306 return 'extinct' | 45 for arg in sys.argv: |
307 if rev in repo._suspendedset: | 46 if 'debugc' in arg: |
308 return 'suspended' | 47 break |
309 if rev in repo._unstableset: | |
310 return 'unstable' | |
311 return 'stable' | |
312 | |
313 ### Other Extension compat | |
314 ############################ | |
315 | |
316 | |
317 def buildstate(orig, repo, dest, rebaseset, *ags, **kws): | |
318 """wrapper for rebase 's buildstate that exclude obsolete changeset""" | |
319 rebaseset = repo.revs('%ld - extinct()', rebaseset) | |
320 return orig(repo, dest, rebaseset, *ags, **kws) | |
321 | |
322 def defineparents(orig, repo, rev, target, state, *args, **kwargs): | |
323 rebasestate = getattr(repo, '_rebasestate', None) | |
324 if rebasestate is not None: | |
325 repo._rebasestate = dict(state) | |
326 repo._rebasetarget = target | |
327 return orig(repo, rev, target, state, *args, **kwargs) | |
328 | |
329 def concludenode(orig, repo, rev, p1, *args, **kwargs): | |
330 """wrapper for rebase 's concludenode that set obsolete relation""" | |
331 newrev = orig(repo, rev, p1, *args, **kwargs) | |
332 rebasestate = getattr(repo, '_rebasestate', None) | |
333 if rebasestate is not None: | |
334 if newrev is not None: | |
335 nrev = repo[newrev].rev() | |
336 else: | |
337 nrev = p1 | |
338 repo._rebasestate[rev] = nrev | |
339 return newrev | |
340 | |
341 def cmdrebase(orig, ui, repo, *args, **kwargs): | |
342 | |
343 reallykeep = kwargs.get('keep', False) | |
344 kwargs = dict(kwargs) | |
345 kwargs['keep'] = True | |
346 | |
347 # We want to mark rebased revision as obsolete and set their | |
348 # replacements if any. Doing it in concludenode() prevents | |
349 # aborting the rebase, and is not called with all relevant | |
350 # revisions in --collapse case. Instead, we try to track the | |
351 # rebase state structure by sampling/updating it in | |
352 # defineparents() and concludenode(). The obsolete markers are | |
353 # added from this state after a successful call. | |
354 repo._rebasestate = {} | |
355 repo._rebasetarget = None | |
356 try: | |
357 res = orig(ui, repo, *args, **kwargs) | |
358 if not reallykeep: | |
359 # Filter nullmerge or unrebased entries | |
360 repo._rebasestate = dict(p for p in repo._rebasestate.iteritems() | |
361 if p[1] >= 0) | |
362 if not res and not kwargs.get('abort') and repo._rebasestate: | |
363 # Rebased revisions are assumed to be descendants of | |
364 # targetrev. If a source revision is mapped to targetrev | |
365 # or to another rebased revision, it must have been | |
366 # removed. | |
367 targetrev = repo[repo._rebasetarget].rev() | |
368 newrevs = set([targetrev]) | |
369 replacements = {} | |
370 for rev, newrev in sorted(repo._rebasestate.items()): | |
371 oldnode = repo[rev].node() | |
372 if newrev not in newrevs: | |
373 newnode = repo[newrev].node() | |
374 newrevs.add(newrev) | |
375 else: | |
376 newnode = nullid | |
377 replacements[oldnode] = newnode | |
378 | |
379 if kwargs.get('collapse'): | |
380 newnodes = set(n for n in replacements.values() if n != nullid) | |
381 if newnodes: | |
382 # Collapsing into more than one revision? | |
383 assert len(newnodes) == 1, newnodes | |
384 newnode = newnodes.pop() | |
385 else: | |
386 newnode = nullid | |
387 repo.addcollapsedobsolete(replacements, newnode) | |
388 else: | |
389 for oldnode, newnode in replacements.iteritems(): | |
390 repo.addobsolete(newnode, oldnode) | |
391 return res | |
392 finally: | |
393 delattr(repo, '_rebasestate') | |
394 delattr(repo, '_rebasetarget') | |
395 | |
396 | |
397 def extsetup(ui): | |
398 | |
399 revset.symbols["hidden"] = revsethidden | |
400 revset.symbols["obsolete"] = revsetobsolete | |
401 revset.symbols["unstable"] = revsetunstable | |
402 revset.symbols["suspended"] = revsetsuspended | |
403 revset.symbols["extinct"] = revsetextinct | |
404 revset.symbols["latecomer"] = revsetlatecomer | |
405 revset.symbols["conflicting"] = revsetconflicting | |
406 revset.symbols["obsparents"] = revsetprecursors # DEPR | |
407 revset.symbols["precursors"] = revsetprecursors | |
408 revset.symbols["obsancestors"] = revsetallprecursors # DEPR | |
409 revset.symbols["allprecursors"] = revsetallprecursors # bad name | |
410 revset.symbols["successors"] = revsetsuccessors | |
411 revset.symbols["allsuccessors"] = revsetallsuccessors # bad name | |
412 | |
413 templatekw.keywords['obsolete'] = obsoletekw | |
414 | |
415 # warning about more obsolete | |
416 for cmd in ['commit', 'push', 'pull', 'graft', 'phase', 'unbundle']: | |
417 entry = extensions.wrapcommand(commands.table, cmd, warnobserrors) | |
418 try: | |
419 rebase = extensions.find('rebase') | |
420 if rebase: | |
421 entry = extensions.wrapcommand(rebase.cmdtable, 'rebase', warnobserrors) | |
422 extensions.wrapfunction(rebase, 'buildstate', buildstate) | |
423 extensions.wrapfunction(rebase, 'defineparents', defineparents) | |
424 extensions.wrapfunction(rebase, 'concludenode', concludenode) | |
425 extensions.wrapcommand(rebase.cmdtable, "rebase", cmdrebase) | |
426 except KeyError: | |
427 pass # rebase not found | |
428 | |
429 # Pushkey mechanism for mutable | |
430 ######################################### | |
431 | |
432 def listmarkers(repo): | |
433 """List markers over pushkey""" | |
434 if not repo.obsstore: | |
435 return {} | |
436 data = repo.obsstore._writemarkers() | |
437 encdata = base85.b85encode(data) | |
438 return {'dump0': encdata, | |
439 'dump': encdata} # legacy compat | |
440 | |
441 def pushmarker(repo, key, old, new): | |
442 """Push markers over pushkey""" | |
443 if not key.startswith('dump'): | |
444 repo.ui.warn(_('unknown key: %r') % key) | |
445 return 0 | |
446 if old: | |
447 repo.ui.warn(_('unexpected old value') % key) | |
448 return 0 | |
449 data = base85.b85decode(new) | |
450 lock = repo.lock() | |
451 try: | |
452 try: | |
453 repo.obsstore.mergemarkers(data) | |
454 return 1 | |
455 except util.Abort: | |
456 return 0 | |
457 finally: | |
458 lock.release() | |
459 | |
460 pushkey.register('obsolete', pushmarker, listmarkers) | |
461 | |
462 ### Discovery wrapping | |
463 ############################# | |
464 | |
465 class blist(list, object): | |
466 """silly class to have non False but empty list""" | |
467 | |
468 def __nonzero__(self): | |
469 return bool(len(self.orig)) | |
470 | |
471 def wrapfindcommonoutgoing(orig, repo, *args, **kwargs): | |
472 """wrap mercurial.discovery.findcommonoutgoing to remove extinct changeset | |
473 | |
474 Such excluded changeset are removed from excluded and will *not* appear | |
475 are excluded secret changeset. | |
476 """ | |
477 outgoing = orig(repo, *args, **kwargs) | |
478 orig = outgoing.excluded | |
479 outgoing.excluded = blist(n for n in orig if not repo[n].extinct()) | |
480 # when no revision is specified (push everything) a shortcut is taken when | |
481 # nothign was exclude. taking this code path when extinct changeset have | |
482 # been excluded leads to repository corruption. | |
483 outgoing.excluded.orig = orig | |
484 return outgoing | |
485 | |
486 def wrapcheckheads(orig, repo, remote, outgoing, *args, **kwargs): | |
487 """wrap mercurial.discovery.checkheads | |
488 | |
489 * prevent unstability to be pushed | |
490 * patch remote to ignore obsolete heads on remote | |
491 """ | |
492 # do not push instability | |
493 for h in outgoing.missingheads: | |
494 # checking heads only is enought because any thing base on obsolete | |
495 # changeset is either obsolete or unstable. | |
496 ctx = repo[h] | |
497 if ctx.unstable(): | |
498 raise util.Abort(_("push includes an unstable changeset: %s!") | |
499 % ctx) | |
500 if ctx.obsolete(): | |
501 raise util.Abort(_("push includes an obsolete changeset: %s!") | |
502 % ctx) | |
503 if ctx.latecomer(): | |
504 raise util.Abort(_("push includes an latecomer changeset: %s!") | |
505 % ctx) | |
506 if ctx.conflicting(): | |
507 raise util.Abort(_("push includes conflicting changeset: %s!") | |
508 % ctx) | |
509 ### patch remote branch map | |
510 # do not read it this burn eyes | |
511 try: | |
512 if 'oldbranchmap' not in vars(remote): | |
513 remote.oldbranchmap = remote.branchmap | |
514 def branchmap(): | |
515 newbm = {} | |
516 oldbm = None | |
517 if (util.safehasattr(phases, 'visiblebranchmap') | |
518 and not util.safehasattr(remote, 'ignorevisiblebranchmap') | |
519 ): | |
520 remote.ignorevisiblebranchmap = False | |
521 remote.branchmap = remote.oldbranchmap | |
522 oldbm = phases.visiblebranchmap(remote) | |
523 remote.branchmap = remote.newbranchmap | |
524 remote.ignorevisiblebranchmap = True | |
525 if oldbm is None: | |
526 oldbm = remote.oldbranchmap() | |
527 for branch, nodes in oldbm.iteritems(): | |
528 nodes = list(nodes) | |
529 new = set() | |
530 while nodes: | |
531 n = nodes.pop() | |
532 if n in repo.obsstore.precursors: | |
533 markers = repo.obsstore.precursors[n] | |
534 for mark in markers: | |
535 for newernode in mark[1]: | |
536 if newernode is not None: | |
537 nodes.append(newernode) | |
538 else: | |
539 new.add(n) | |
540 if new: | |
541 newbm[branch] = list(new) | |
542 return newbm | |
543 remote.ignorevisiblebranchmap = True | |
544 remote.branchmap = branchmap | |
545 remote.newbranchmap = branchmap | |
546 return orig(repo, remote, outgoing, *args, **kwargs) | |
547 finally: | |
548 remote.__dict__.pop('branchmap', None) # restore class one | |
549 remote.__dict__.pop('oldbranchmap', None) | |
550 remote.__dict__.pop('newbranchmap', None) | |
551 remote.__dict__.pop('ignorevisiblebranchmap', None) | |
552 | |
553 # eye are still burning | |
554 def wrapvisiblebranchmap(orig, repo): | |
555 ignore = getattr(repo, 'ignorevisiblebranchmap', None) | |
556 if ignore is None: | |
557 return orig(repo) | |
558 elif ignore: | |
559 return repo.branchmap() | |
560 else: | 48 else: |
561 return None # break recursion | 49 data = repo.opener.tryread('obsolete-relations') |
562 | 50 if not data: |
563 def wrapclearcache(orig, repo, *args, **kwargs): | 51 data = repo.sopener.tryread('obsoletemarkers') |
564 try: | |
565 return orig(repo, *args, **kwargs) | |
566 finally: | |
567 repo._clearobsoletecache() | |
568 | |
569 | |
570 ### New commands | |
571 ############################# | |
572 | |
573 cmdtable = {} | |
574 command = cmdutil.command(cmdtable) | |
575 | |
576 @command('debugobsolete', [], _('SUBJECT OBJECT')) | |
577 def cmddebugobsolete(ui, repo, subject, object): | |
578 """add an obsolete relation between two nodes | |
579 | |
580 The subject is expected to be a newer version of the object. | |
581 """ | |
582 lock = repo.lock() | |
583 try: | |
584 sub = repo[subject] | |
585 obj = repo[object] | |
586 repo.addobsolete(sub.node(), obj.node()) | |
587 finally: | |
588 lock.release() | |
589 return 0 | |
590 | |
591 @command('debugconvertobsolete', [], '') | |
592 def cmddebugconvertobsolete(ui, repo): | |
593 """import markers from an .hg/obsolete-relations file""" | |
594 cnt = 0 | |
595 err = 0 | |
596 l = repo.lock() | |
597 some = False | |
598 try: | |
599 repo._importoldobsolete = True | |
600 store = repo.obsstore | |
601 ### very first format | |
602 try: | |
603 f = repo.opener('obsolete-relations') | |
604 try: | |
605 some = True | |
606 for line in f: | |
607 subhex, objhex = line.split() | |
608 suc = bin(subhex) | |
609 prec = bin(objhex) | |
610 sucs = (suc==nullid) and [] or [suc] | |
611 meta = { | |
612 'date': '%i %i' % util.makedate(), | |
613 'user': ui.username(), | |
614 } | |
615 try: | |
616 store.create(prec, sucs, 0, meta) | |
617 cnt += 1 | |
618 except ValueError: | |
619 repo.ui.write_err("invalid old marker line: %s" | |
620 % (line)) | |
621 err += 1 | |
622 finally: | |
623 f.close() | |
624 util.unlink(repo.join('obsolete-relations')) | |
625 except IOError: | |
626 pass | |
627 ### second (json) format | |
628 data = repo.sopener.tryread('obsoletemarkers') | |
629 if data: | 52 if data: |
630 some = True | 53 raise util.Abort('old format of obsolete marker detected!\n' |
631 for oldmark in json.loads(data): | 54 'run `hg debugconvertobsolete` once.') |
632 del oldmark['id'] # dropped for now | |
633 del oldmark['reason'] # unused until then | |
634 oldobject = str(oldmark.pop('object')) | |
635 oldsubjects = [str(s) for s in oldmark.pop('subjects', [])] | |
636 LOOKUP_ERRORS = (error.RepoLookupError, error.LookupError) | |
637 if len(oldobject) != 40: | |
638 try: | |
639 oldobject = repo[oldobject].node() | |
640 except LOOKUP_ERRORS: | |
641 pass | |
642 if any(len(s) != 40 for s in oldsubjects): | |
643 try: | |
644 oldsubjects = [repo[s].node() for s in oldsubjects] | |
645 except LOOKUP_ERRORS: | |
646 pass | |
647 | |
648 oldmark['date'] = '%i %i' % tuple(oldmark['date']) | |
649 meta = dict((k.encode('utf-8'), v.encode('utf-8')) | |
650 for k, v in oldmark.iteritems()) | |
651 try: | |
652 succs = [bin(n) for n in oldsubjects] | |
653 succs = [n for n in succs if n != nullid] | |
654 store.create(bin(oldobject), succs, | |
655 0, meta) | |
656 cnt += 1 | |
657 except ValueError: | |
658 repo.ui.write_err("invalid marker %s -> %s\n" | |
659 % (oldobject, oldsubjects)) | |
660 err += 1 | |
661 util.unlink(repo.sjoin('obsoletemarkers')) | |
662 finally: | |
663 del repo._importoldobsolete | |
664 l.release() | |
665 if not some: | |
666 ui.warn('nothing to do\n') | |
667 ui.status('%i obsolete marker converted\n' % cnt) | |
668 if err: | |
669 ui.write_err('%i conversion failed. check you graph!\n' % err) | |
670 | |
671 @command('debugsuccessors', [], '') | |
672 def cmddebugsuccessors(ui, repo): | |
673 """dump obsolete changesets and their successors | |
674 | |
675 Each line matches an existing marker, the first identifier is the | |
676 obsolete changeset identifier, followed by it successors. | |
677 """ | |
678 lock = repo.lock() | |
679 try: | |
680 allsuccessors = repo.obsstore.precursors | |
681 for old in sorted(allsuccessors): | |
682 successors = [sorted(m[1]) for m in allsuccessors[old]] | |
683 for i, group in enumerate(sorted(successors)): | |
684 ui.write('%s' % short(old)) | |
685 for new in group: | |
686 ui.write(' %s' % short(new)) | |
687 ui.write('\n') | |
688 finally: | |
689 lock.release() | |
690 | |
691 ### Altering existing command | |
692 ############################# | |
693 | |
694 def wrapmayobsoletewc(origfn, ui, repo, *args, **opts): | |
695 res = origfn(ui, repo, *args, **opts) | |
696 if repo['.'].obsolete(): | |
697 ui.warn(_('Working directory parent is obsolete\n')) | |
698 return res | |
699 | |
700 def warnobserrors(orig, ui, repo, *args, **kwargs): | |
701 """display warning is the command resulted in more instable changeset""" | |
702 priorunstables = len(repo.revs('unstable()')) | |
703 priorlatecomers = len(repo.revs('latecomer()')) | |
704 priorconflictings = len(repo.revs('conflicting()')) | |
705 #print orig, priorunstables | |
706 #print len(repo.revs('secret() - obsolete()')) | |
707 try: | |
708 return orig(ui, repo, *args, **kwargs) | |
709 finally: | |
710 newunstables = len(repo.revs('unstable()')) - priorunstables | |
711 newlatecomers = len(repo.revs('latecomer()')) - priorlatecomers | |
712 newconflictings = len(repo.revs('conflicting()')) - priorconflictings | |
713 #print orig, newunstables | |
714 #print len(repo.revs('secret() - obsolete()')) | |
715 if newunstables > 0: | |
716 ui.warn(_('%i new unstables changesets\n') % newunstables) | |
717 if newlatecomers > 0: | |
718 ui.warn(_('%i new latecomers changesets\n') % newlatecomers) | |
719 if newconflictings > 0: | |
720 ui.warn(_('%i new conflictings changesets\n') % newconflictings) | |
721 | |
722 def noextinctsvisibleheads(orig, repo): | |
723 repo._turn_extinct_secret() | |
724 return orig(repo) | |
725 | |
726 def wrapcmdutilamend(orig, ui, repo, commitfunc, old, *args, **kwargs): | |
727 oldnode = old.node() | |
728 new = orig(ui, repo, commitfunc, old, *args, **kwargs) | |
729 if new != oldnode: | |
730 lock = repo.lock() | |
731 try: | |
732 meta = { | |
733 'subjects': [new], | |
734 'object': oldnode, | |
735 'date': util.makedate(), | |
736 'user': ui.username(), | |
737 'reason': 'commit --amend', | |
738 } | |
739 repo.obsstore.create(oldnode, [new], 0, meta) | |
740 repo._clearobsoletecache() | |
741 repo._turn_extinct_secret() | |
742 finally: | |
743 lock.release() | |
744 return new | |
745 | |
746 def uisetup(ui): | |
747 extensions.wrapcommand(commands.table, "update", wrapmayobsoletewc) | |
748 extensions.wrapcommand(commands.table, "pull", wrapmayobsoletewc) | |
749 if util.safehasattr(cmdutil, 'amend'): | |
750 extensions.wrapfunction(cmdutil, 'amend', wrapcmdutilamend) | |
751 extensions.wrapfunction(discovery, 'findcommonoutgoing', wrapfindcommonoutgoing) | |
752 extensions.wrapfunction(discovery, 'checkheads', wrapcheckheads) | |
753 extensions.wrapfunction(phases, 'visibleheads', noextinctsvisibleheads) | |
754 extensions.wrapfunction(phases, 'advanceboundary', wrapclearcache) | |
755 if util.safehasattr(phases, 'visiblebranchmap'): | |
756 extensions.wrapfunction(phases, 'visiblebranchmap', wrapvisiblebranchmap) | |
757 | |
758 ### serialisation | |
759 ############################# | |
760 | |
761 def _obsserialise(obssubrels, flike): | |
762 """serialise an obsolete relation mapping in a plain text one | |
763 | |
764 this is for subject -> [objects] mapping | |
765 | |
766 format is:: | |
767 | |
768 <subject-full-hex> <object-full-hex>\n""" | |
769 for sub, objs in obssubrels.iteritems(): | |
770 for obj in objs: | |
771 if sub is None: | |
772 sub = nullid | |
773 flike.write('%s %s\n' % (hex(sub), hex(obj))) | |
774 | 55 |
775 def _obsdeserialise(flike): | 56 def _obsdeserialise(flike): |
776 """read a file like object serialised with _obsserialise | 57 """read a file like object serialised with _obsserialise |
777 | 58 |
778 this desierialize into a {subject -> objects} mapping""" | 59 this desierialize into a {subject -> objects} mapping |
60 | |
61 this was the very first format ever.""" | |
779 rels = {} | 62 rels = {} |
780 for line in flike: | 63 for line in flike: |
781 subhex, objhex = line.split() | 64 subhex, objhex = line.split() |
782 subnode = bin(subhex) | 65 subnode = bin(subhex) |
783 if subnode == nullid: | 66 if subnode == nullid: |
784 subnode = None | 67 subnode = None |
785 rels.setdefault( subnode, set()).add(bin(objhex)) | 68 rels.setdefault( subnode, set()).add(bin(objhex)) |
786 return rels | 69 return rels |
787 | 70 |
788 ### diagnostique tools | 71 cmdtable = {} |
789 ############################# | 72 command = cmdutil.command(cmdtable) |
73 @command('debugconvertobsolete', [], '') | |
74 def cmddebugconvertobsolete(ui, repo): | |
75 """import markers from an .hg/obsolete-relations file""" | |
76 cnt = 0 | |
77 err = 0 | |
78 l = repo.lock() | |
79 some = False | |
80 try: | |
81 unlink = [] | |
82 tr = repo.transaction('convert-obsolete') | |
83 try: | |
84 repo._importoldobsolete = True | |
85 store = repo.obsstore | |
86 ### very first format | |
87 try: | |
88 f = repo.opener('obsolete-relations') | |
89 try: | |
90 some = True | |
91 for line in f: | |
92 subhex, objhex = line.split() | |
93 suc = bin(subhex) | |
94 prec = bin(objhex) | |
95 sucs = (suc==nullid) and [] or [suc] | |
96 meta = { | |
97 'date': '%i %i' % util.makedate(), | |
98 'user': ui.username(), | |
99 } | |
100 try: | |
101 store.create(tr, prec, sucs, 0, meta) | |
102 cnt += 1 | |
103 except ValueError: | |
104 repo.ui.write_err("invalid old marker line: %s" | |
105 % (line)) | |
106 err += 1 | |
107 finally: | |
108 f.close() | |
109 unlink.append(repo.join('obsolete-relations')) | |
110 except IOError: | |
111 pass | |
112 ### second (json) format | |
113 data = repo.sopener.tryread('obsoletemarkers') | |
114 if data: | |
115 some = True | |
116 for oldmark in json.loads(data): | |
117 del oldmark['id'] # dropped for now | |
118 del oldmark['reason'] # unused until then | |
119 oldobject = str(oldmark.pop('object')) | |
120 oldsubjects = [str(s) for s in oldmark.pop('subjects', [])] | |
121 LOOKUP_ERRORS = (error.RepoLookupError, error.LookupError) | |
122 if len(oldobject) != 40: | |
123 try: | |
124 oldobject = repo[oldobject].node() | |
125 except LOOKUP_ERRORS: | |
126 pass | |
127 if any(len(s) != 40 for s in oldsubjects): | |
128 try: | |
129 oldsubjects = [repo[s].node() for s in oldsubjects] | |
130 except LOOKUP_ERRORS: | |
131 pass | |
790 | 132 |
791 def unstables(repo): | 133 oldmark['date'] = '%i %i' % tuple(oldmark['date']) |
792 """Return all unstable changeset""" | 134 meta = dict((k.encode('utf-8'), v.encode('utf-8')) |
793 return scmutil.revrange(repo, ['obsolete():: and (not obsolete())']) | 135 for k, v in oldmark.iteritems()) |
794 | 136 try: |
795 def newerversion(repo, obs): | 137 succs = [bin(n) for n in oldsubjects] |
796 """Return the newer version of an obsolete changeset""" | 138 succs = [n for n in succs if n != nullid] |
797 toproceed = set([(obs,)]) | 139 store.create(tr, bin(oldobject), succs, |
798 # XXX known optimization available | 140 0, meta) |
799 newer = set() | 141 cnt += 1 |
800 objectrels = repo.obsstore.precursors | 142 except ValueError: |
801 while toproceed: | 143 repo.ui.write_err("invalid marker %s -> %s\n" |
802 current = toproceed.pop() | 144 % (oldobject, oldsubjects)) |
803 assert len(current) <= 1, 'splitting not handled yet. %r' % current | 145 err += 1 |
804 current = [n for n in current if n != nullid] | 146 unlink.append(repo.sjoin('obsoletemarkers')) |
805 if current: | 147 tr.close() |
806 n, = current | 148 for path in unlink: |
807 if n in objectrels: | 149 util.unlink(path) |
808 markers = objectrels[n] | 150 finally: |
809 for mark in markers: | 151 tr.release() |
810 toproceed.add(tuple(mark[1])) | 152 finally: |
811 else: | 153 del repo._importoldobsolete |
812 newer.add(tuple(current)) | 154 l.release() |
813 else: | 155 if not some: |
814 newer.add(()) | 156 ui.warn('nothing to do\n') |
815 return sorted(newer) | 157 ui.status('%i obsolete marker converted\n' % cnt) |
816 | 158 if err: |
817 ### obsolete relation storage | 159 ui.write_err('%i conversion failed. check you graph!\n' % err) |
818 ############################# | |
819 def add2set(d, key, mark): | |
820 """add <mark> to a `set` in <d>[<key>]""" | |
821 d.setdefault(key, []).append(mark) | |
822 | |
823 def markerid(marker): | |
824 KEYS = ['subjects', "object", "date", "user", "reason"] | |
825 for key in KEYS: | |
826 assert key in marker | |
827 keys = sorted(marker.keys()) | |
828 a = util.sha1() | |
829 for key in keys: | |
830 if key == 'subjects': | |
831 for sub in sorted(marker[key]): | |
832 a.update(sub) | |
833 elif key == 'id': | |
834 pass | |
835 else: | |
836 a.update(str(marker[key])) | |
837 a.update('\0') | |
838 return a.digest() | |
839 | |
840 # mercurial backport | |
841 | |
842 def encodemeta(meta): | |
843 """Return encoded metadata string to string mapping. | |
844 | |
845 Assume no ':' in key and no '\0' in both key and value.""" | |
846 for key, value in meta.iteritems(): | |
847 if ':' in key or '\0' in key: | |
848 raise ValueError("':' and '\0' are forbidden in metadata key'") | |
849 if '\0' in value: | |
850 raise ValueError("':' are forbidden in metadata value'") | |
851 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) | |
852 | |
853 def decodemeta(data): | |
854 """Return string to string dictionary from encoded version.""" | |
855 d = {} | |
856 for l in data.split('\0'): | |
857 if l: | |
858 key, value = l.split(':') | |
859 d[key] = value | |
860 return d | |
861 | |
862 # data used for parsing and writing | |
863 _fmversion = 0 | |
864 _fmfixed = '>BIB20s' | |
865 _fmnode = '20s' | |
866 _fmfsize = struct.calcsize(_fmfixed) | |
867 _fnodesize = struct.calcsize(_fmnode) | |
868 | |
869 def _readmarkers(data): | |
870 """Read and enumerate markers from raw data""" | |
871 off = 0 | |
872 diskversion = _unpack('>B', data[off:off + 1])[0] | |
873 off += 1 | |
874 if diskversion != _fmversion: | |
875 raise util.Abort(_('parsing obsolete marker: unknown version %r') | |
876 % diskversion) | |
877 | |
878 # Loop on markers | |
879 l = len(data) | |
880 while off + _fmfsize <= l: | |
881 # read fixed part | |
882 cur = data[off:off + _fmfsize] | |
883 off += _fmfsize | |
884 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) | |
885 # read replacement | |
886 sucs = () | |
887 if nbsuc: | |
888 s = (_fnodesize * nbsuc) | |
889 cur = data[off:off + s] | |
890 sucs = _unpack(_fmnode * nbsuc, cur) | |
891 off += s | |
892 # read metadata | |
893 # (metadata will be decoded on demand) | |
894 metadata = data[off:off + mdsize] | |
895 if len(metadata) != mdsize: | |
896 raise util.Abort(_('parsing obsolete marker: metadata is too ' | |
897 'short, %d bytes expected, got %d') | |
898 % (len(metadata), mdsize)) | |
899 off += mdsize | |
900 yield (pre, sucs, flags, metadata) | |
901 | |
902 class obsstore(object): | |
903 """Store obsolete markers | |
904 | |
905 Markers can be accessed with two mappings: | |
906 - precursors: old -> set(new) | |
907 - successors: new -> set(old) | |
908 """ | |
909 | |
910 def __init__(self): | |
911 self._all = [] | |
912 # new markers to serialize | |
913 self._new = [] | |
914 self.precursors = {} | |
915 self.successors = {} | |
916 | |
917 def __iter__(self): | |
918 return iter(self._all) | |
919 | |
920 def __nonzero__(self): | |
921 return bool(self._all) | |
922 | |
923 def create(self, prec, succs=(), flag=0, metadata=None): | |
924 """obsolete: add a new obsolete marker | |
925 | |
926 * ensuring it is hashable | |
927 * check mandatory metadata | |
928 * encode metadata | |
929 """ | |
930 if metadata is None: | |
931 metadata = {} | |
932 if len(prec) != 20: | |
933 raise ValueError(repr(prec)) | |
934 for succ in succs: | |
935 if len(succ) != 20: | |
936 raise ValueError((succs)) | |
937 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) | |
938 self.add(marker) | |
939 | |
940 def add(self, marker): | |
941 """Add a new marker to the store | |
942 | |
943 This marker still needs to be written to disk""" | |
944 self._new.append(marker) | |
945 self._load(marker) | |
946 | |
947 def loadmarkers(self, data): | |
948 """Load all markers in data, mark them as known.""" | |
949 for marker in _readmarkers(data): | |
950 self._load(marker) | |
951 | |
952 def mergemarkers(self, data): | |
953 other = set(_readmarkers(data)) | |
954 local = set(self._all) | |
955 new = other - local | |
956 for marker in new: | |
957 self.add(marker) | |
958 | |
959 def flushmarkers(self, stream): | |
960 """Write all markers to a stream | |
961 | |
962 After this operation, "new" markers are considered "known".""" | |
963 self._writemarkers(stream) | |
964 self._new[:] = [] | |
965 | |
966 def _load(self, marker): | |
967 self._all.append(marker) | |
968 pre, sucs = marker[:2] | |
969 self.precursors.setdefault(pre, set()).add(marker) | |
970 for suc in sucs: | |
971 self.successors.setdefault(suc, set()).add(marker) | |
972 | |
973 def _writemarkers(self, stream=None): | |
974 # Kept separate from flushmarkers(), it will be reused for | |
975 # markers exchange. | |
976 if stream is None: | |
977 final = [] | |
978 w = final.append | |
979 else: | |
980 w = stream.write | |
981 w(_pack('>B', _fmversion)) | |
982 for marker in self._all: | |
983 pre, sucs, flags, metadata = marker | |
984 nbsuc = len(sucs) | |
985 format = _fmfixed + (_fmnode * nbsuc) | |
986 data = [nbsuc, len(metadata), flags, pre] | |
987 data.extend(sucs) | |
988 w(_pack(format, *data)) | |
989 w(metadata) | |
990 if stream is None: | |
991 return ''.join(final) | |
992 | |
993 | |
994 ### repo subclassing | |
995 ############################# | |
996 | |
997 def reposetup(ui, repo): | |
998 if not repo.local(): | |
999 return | |
1000 | |
1001 if not util.safehasattr(repo.opener, 'tryread'): | |
1002 raise util.Abort('Obsolete extension requires Mercurial 2.2 (or later)') | |
1003 opull = repo.pull | |
1004 opush = repo.push | |
1005 olock = repo.lock | |
1006 o_rollback = repo._rollback | |
1007 o_updatebranchcache = repo.updatebranchcache | |
1008 | |
1009 # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\ | |
1010 if util.safehasattr(repo, '_journalfiles'): # Hg 2.2 | |
1011 o_journalfiles = repo._journalfiles | |
1012 o_writejournal = repo._writejournal | |
1013 o_hook = repo.hook | |
1014 | |
1015 | |
1016 class obsoletingrepo(repo.__class__): | |
1017 | |
1018 # workaround | |
1019 def hook(self, name, throw=False, **args): | |
1020 if 'pushkey' in name: | |
1021 args.pop('new') | |
1022 args.pop('old') | |
1023 return o_hook(name, throw=False, **args) | |
1024 | |
1025 ### Public method | |
1026 def obsoletedby(self, node): | |
1027 """return the set of node that make <node> obsolete (obj)""" | |
1028 others = set() | |
1029 for marker in self.obsstore.precursors.get(node, []): | |
1030 others.update(marker[1]) | |
1031 return others | |
1032 | |
1033 def obsolete(self, node): | |
1034 """return the set of node that <node> make obsolete (sub)""" | |
1035 return set(marker[0] for marker in self.obsstore.successors.get(node, [])) | |
1036 | |
1037 @storecache('obsstore') | |
1038 def obsstore(self): | |
1039 if not getattr(self, '_importoldobsolete', False): | |
1040 data = repo.opener.tryread('obsolete-relations') | |
1041 if not data: | |
1042 data = repo.sopener.tryread('obsoletemarkers') | |
1043 if data: | |
1044 raise util.Abort('old format of obsolete marker detected!\n' | |
1045 'run `hg debugconvertobsolete` once.') | |
1046 store = obsstore() | |
1047 data = self.sopener.tryread('obsstore') | |
1048 if data: | |
1049 store.loadmarkers(data) | |
1050 return store | |
1051 | |
1052 @util.propertycache | |
1053 def _obsoleteset(self): | |
1054 """the set of obsolete revision""" | |
1055 obs = set() | |
1056 nm = self.changelog.nodemap | |
1057 for obj in self.obsstore.precursors: | |
1058 try: # /!\api change in Hg 2.2 (e8d37b78acfb22ae2c1fb126c2)/!\ | |
1059 rev = nm.get(obj) | |
1060 except TypeError: #XXX to remove while breaking Hg 2.1 support | |
1061 rev = nm.get(obj, None) | |
1062 if rev is not None: | |
1063 obs.add(rev) | |
1064 return obs | |
1065 | |
1066 @util.propertycache | |
1067 def _unstableset(self): | |
1068 """the set of non obsolete revision with obsolete parent""" | |
1069 return set(self.revs('(obsolete()::) - obsolete()')) | |
1070 | |
1071 @util.propertycache | |
1072 def _suspendedset(self): | |
1073 """the set of obsolete parent with non obsolete descendant""" | |
1074 return set(self.revs('obsolete() and obsolete()::unstable()')) | |
1075 | |
1076 @util.propertycache | |
1077 def _extinctset(self): | |
1078 """the set of obsolete parent without non obsolete descendant""" | |
1079 return set(self.revs('obsolete() - obsolete()::unstable()')) | |
1080 | |
1081 @util.propertycache | |
1082 def _latecomerset(self): | |
1083 """the set of rev trying to obsolete public revision""" | |
1084 query = 'allsuccessors(public()) - obsolete() - public()' | |
1085 return set(self.revs(query)) | |
1086 | |
1087 @util.propertycache | |
1088 def _conflictingset(self): | |
1089 """the set of rev trying to obsolete public revision""" | |
1090 conflicting = set() | |
1091 obsstore = self.obsstore | |
1092 newermap = {} | |
1093 for ctx in self.set('(not public()) - obsolete()'): | |
1094 prec = obsstore.successors.get(ctx.node(), ()) | |
1095 toprocess = set(prec) | |
1096 while toprocess: | |
1097 prec = toprocess.pop()[0] | |
1098 if prec not in newermap: | |
1099 newermap[prec] = newerversion(self, prec) | |
1100 newer = [n for n in newermap[prec] if n] # filter kill | |
1101 if len(newer) > 1: | |
1102 conflicting.add(ctx.rev()) | |
1103 break | |
1104 toprocess.update(obsstore.successors.get(prec, ())) | |
1105 return conflicting | |
1106 | |
1107 def _clearobsoletecache(self): | |
1108 if '_obsoleteset' in vars(self): | |
1109 del self._obsoleteset | |
1110 self._clearunstablecache() | |
1111 | |
1112 def updatebranchcache(self): | |
1113 o_updatebranchcache() | |
1114 self._clearunstablecache() | |
1115 | |
1116 def _clearunstablecache(self): | |
1117 if '_unstableset' in vars(self): | |
1118 del self._unstableset | |
1119 if '_suspendedset' in vars(self): | |
1120 del self._suspendedset | |
1121 if '_extinctset' in vars(self): | |
1122 del self._extinctset | |
1123 if '_latecomerset' in vars(self): | |
1124 del self._latecomerset | |
1125 if '_conflictingset' in vars(self): | |
1126 del self._conflictingset | |
1127 | |
1128 def addobsolete(self, sub, obj): | |
1129 """Add a relation marking that node <sub> is a new version of <obj>""" | |
1130 assert sub != obj | |
1131 if not repo[obj].phase(): | |
1132 if sub is None: | |
1133 self.ui.warn( | |
1134 _("trying to kill immutable changeset %(obj)s\n") | |
1135 % {'obj': short(obj)}) | |
1136 if sub is not None: | |
1137 self.ui.warn( | |
1138 _("%(sub)s try to obsolete immutable changeset %(obj)s\n") | |
1139 % {'sub': short(sub), 'obj': short(obj)}) | |
1140 lock = self.lock() | |
1141 try: | |
1142 meta = { | |
1143 'date': util.makedate(), | |
1144 'user': ui.username(), | |
1145 'reason': 'unknown', | |
1146 } | |
1147 subs = (sub == nullid) and [] or [sub] | |
1148 mid = self.obsstore.create(obj, subs, 0, meta) | |
1149 self._clearobsoletecache() | |
1150 self._turn_extinct_secret() | |
1151 return mid | |
1152 finally: | |
1153 lock.release() | |
1154 | |
1155 def addcollapsedobsolete(self, oldnodes, newnode): | |
1156 """Mark oldnodes as collapsed into newnode.""" | |
1157 # Assume oldnodes are all descendants of a single rev | |
1158 rootrevs = self.revs('roots(%ln)', oldnodes) | |
1159 assert len(rootrevs) == 1, rootrevs | |
1160 rootnode = self[rootrevs[0]].node() | |
1161 for n in oldnodes: | |
1162 self.addobsolete(newnode, n) | |
1163 | |
1164 def _turn_extinct_secret(self): | |
1165 """ensure all extinct changeset are secret""" | |
1166 self._clearobsoletecache() | |
1167 # this is mainly for safety purpose | |
1168 # both pull and push | |
1169 query = '(obsolete() - obsolete()::(unstable() - secret())) - secret()' | |
1170 expobs = [c.node() for c in repo.set(query)] | |
1171 phases.retractboundary(repo, 2, expobs) | |
1172 | |
1173 ### Disk IO | |
1174 | |
1175 def lock(self, *args, **kwargs): | |
1176 l = olock(*args, **kwargs) | |
1177 if not getattr(l.releasefn, 'obspatched', False): | |
1178 oreleasefn = l.releasefn | |
1179 def releasefn(*args, **kwargs): | |
1180 if 'obsstore' in vars(self) and self.obsstore._new: | |
1181 f = self.sopener('obsstore', 'wb', atomictemp=True) | |
1182 try: | |
1183 self.obsstore.flushmarkers(f) | |
1184 f.close() | |
1185 except: # re-raises | |
1186 f.discard() | |
1187 raise | |
1188 oreleasefn(*args, **kwargs) | |
1189 releasefn.obspatched = True | |
1190 l.releasefn = releasefn | |
1191 return l | |
1192 | |
1193 | |
1194 ### pull // push support | |
1195 | |
1196 def pull(self, remote, *args, **kwargs): | |
1197 """wrapper around push that push obsolete relation""" | |
1198 l = repo.lock() | |
1199 try: | |
1200 result = opull(remote, *args, **kwargs) | |
1201 remoteobs = remote.listkeys('obsolete') | |
1202 if 'dump' in remoteobs: | |
1203 remoteobs['dump0'] = remoteobs.pop('dump') | |
1204 if 'dump0' in remoteobs: | |
1205 for key, values in remoteobs.iteritems(): | |
1206 if key.startswith('dump'): | |
1207 data = base85.b85decode(remoteobs['dump0']) | |
1208 self.obsstore.mergemarkers(data) | |
1209 self._clearobsoletecache() | |
1210 self._turn_extinct_secret() | |
1211 return result | |
1212 finally: | |
1213 l.release() | |
1214 | |
1215 def push(self, remote, *args, **opts): | |
1216 """wrapper around pull that pull obsolete relation""" | |
1217 self._turn_extinct_secret() | |
1218 try: | |
1219 result = opush(remote, *args, **opts) | |
1220 except util.Abort, ex: | |
1221 hint = _("use 'hg stabilize' to get a stable history (or --force to proceed)") | |
1222 if (len(ex.args) >= 1 | |
1223 and ex.args[0].startswith('push includes ') | |
1224 and ex.hint is None): | |
1225 ex.hint = hint | |
1226 raise | |
1227 if 'obsolete' in remote.listkeys('namespaces') and self.obsstore: | |
1228 data = self.obsstore._writemarkers() | |
1229 r = remote.pushkey('obsolete', 'dump0', '', | |
1230 base85.b85encode(data)) | |
1231 if not r: | |
1232 self.ui.warn(_('failed to push obsolete markers!\n')) | |
1233 self._turn_extinct_secret() | |
1234 | |
1235 return result | |
1236 | |
1237 | |
1238 ### rollback support | |
1239 | |
1240 # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\ | |
1241 if util.safehasattr(repo, '_journalfiles'): # Hg 2.2 | |
1242 def _journalfiles(self): | |
1243 return o_journalfiles() + (self.sjoin('journal.obsstore'),) | |
1244 | |
1245 def _writejournal(self, desc): | |
1246 """wrapped version of _writejournal that save obsolete data""" | |
1247 o_writejournal(desc) | |
1248 filename = 'obsstore' | |
1249 filepath = self.sjoin(filename) | |
1250 if os.path.exists(filepath): | |
1251 journalname = 'journal.' + filename | |
1252 journalpath = self.sjoin(journalname) | |
1253 util.copyfile(filepath, journalpath) | |
1254 | |
1255 else: # XXX removing this bloc will break Hg 2.1 support | |
1256 def _writejournal(self, desc): | |
1257 """wrapped version of _writejournal that save obsolete data""" | |
1258 entries = list(o_writejournal(desc)) | |
1259 filename = 'obsstore' | |
1260 filepath = self.sjoin(filename) | |
1261 if os.path.exists(filepath): | |
1262 journalname = 'journal.' + filename | |
1263 journalpath = self.sjoin(journalname) | |
1264 util.copyfile(filepath, journalpath) | |
1265 entries.append(journalpath) | |
1266 return tuple(entries) | |
1267 | |
1268 def _rollback(self, dryrun, force): | |
1269 """wrapped version of _rollback that restore obsolete data""" | |
1270 ret = o_rollback(dryrun, force) | |
1271 if not (ret or dryrun): #rollback did not failed | |
1272 src = self.sjoin('undo.obsstore') | |
1273 dst = self.sjoin('obsstore') | |
1274 if os.path.exists(src): | |
1275 util.rename(src, dst) | |
1276 elif os.path.exists(dst): | |
1277 # If no state was saved because the file did not existed before. | |
1278 os.unlink(dst) | |
1279 # invalidate cache | |
1280 self.__dict__.pop('obsstore', None) | |
1281 return ret | |
1282 | |
1283 @storecache('00changelog.i') | |
1284 def changelog(self): | |
1285 # << copy pasted from mercurial source | |
1286 c = changelog.changelog(self.sopener) | |
1287 if 'HG_PENDING' in os.environ: | |
1288 p = os.environ['HG_PENDING'] | |
1289 if p.startswith(self.root): | |
1290 c.readpending('00changelog.i.a') | |
1291 # >> end of the copy paste | |
1292 old = c.__dict__.pop('hiddenrevs', ()) | |
1293 if old: | |
1294 ui.warn("old wasn't empty ? %r" % old) | |
1295 def _sethidden(c, value): | |
1296 assert not value | |
1297 | |
1298 | |
1299 class hchangelog(c.__class__): | |
1300 @util.propertycache | |
1301 def hiddenrevs(c): | |
1302 shown = ['not obsolete()', '.', 'bookmark()', 'tagged()', | |
1303 'public()'] | |
1304 basicquery = 'obsolete() - (::(%s))' % (' or '.join(shown)) | |
1305 # !!! self is repo not changelog | |
1306 result = set(scmutil.revrange(self, [basicquery])) | |
1307 return result | |
1308 c.__class__ = hchangelog | |
1309 return c | |
1310 | |
1311 repo.__class__ = obsoletingrepo |