34 def backupbundle( |
34 def backupbundle( |
35 repo, bases, heads, node, suffix, compress=True, obsolescence=True |
35 repo, bases, heads, node, suffix, compress=True, obsolescence=True |
36 ): |
36 ): |
37 """create a bundle with the specified revisions as a backup""" |
37 """create a bundle with the specified revisions as a backup""" |
38 |
38 |
39 backupdir = "strip-backup" |
39 backupdir = b"strip-backup" |
40 vfs = repo.vfs |
40 vfs = repo.vfs |
41 if not vfs.isdir(backupdir): |
41 if not vfs.isdir(backupdir): |
42 vfs.mkdir(backupdir) |
42 vfs.mkdir(backupdir) |
43 |
43 |
44 # Include a hash of all the nodes in the filename for uniqueness |
44 # Include a hash of all the nodes in the filename for uniqueness |
45 allcommits = repo.set('%ln::%ln', bases, heads) |
45 allcommits = repo.set(b'%ln::%ln', bases, heads) |
46 allhashes = sorted(c.hex() for c in allcommits) |
46 allhashes = sorted(c.hex() for c in allcommits) |
47 totalhash = hashlib.sha1(''.join(allhashes)).digest() |
47 totalhash = hashlib.sha1(b''.join(allhashes)).digest() |
48 name = "%s/%s-%s-%s.hg" % ( |
48 name = b"%s/%s-%s-%s.hg" % ( |
49 backupdir, |
49 backupdir, |
50 short(node), |
50 short(node), |
51 hex(totalhash[:4]), |
51 hex(totalhash[:4]), |
52 suffix, |
52 suffix, |
53 ) |
53 ) |
54 |
54 |
55 cgversion = changegroup.localversion(repo) |
55 cgversion = changegroup.localversion(repo) |
56 comp = None |
56 comp = None |
57 if cgversion != '01': |
57 if cgversion != b'01': |
58 bundletype = "HG20" |
58 bundletype = b"HG20" |
59 if compress: |
59 if compress: |
60 comp = 'BZ' |
60 comp = b'BZ' |
61 elif compress: |
61 elif compress: |
62 bundletype = "HG10BZ" |
62 bundletype = b"HG10BZ" |
63 else: |
63 else: |
64 bundletype = "HG10UN" |
64 bundletype = b"HG10UN" |
65 |
65 |
66 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads) |
66 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads) |
67 contentopts = { |
67 contentopts = { |
68 'cg.version': cgversion, |
68 b'cg.version': cgversion, |
69 'obsolescence': obsolescence, |
69 b'obsolescence': obsolescence, |
70 'phases': True, |
70 b'phases': True, |
71 } |
71 } |
72 return bundle2.writenewbundle( |
72 return bundle2.writenewbundle( |
73 repo.ui, |
73 repo.ui, |
74 repo, |
74 repo, |
75 'strip', |
75 b'strip', |
76 name, |
76 name, |
77 bundletype, |
77 bundletype, |
78 outgoing, |
78 outgoing, |
79 contentopts, |
79 contentopts, |
80 vfs, |
80 vfs, |
107 s.update(_collectrevlog(repo.file(fname), striprev)) |
107 s.update(_collectrevlog(repo.file(fname), striprev)) |
108 |
108 |
109 return s |
109 return s |
110 |
110 |
111 |
111 |
112 def strip(ui, repo, nodelist, backup=True, topic='backup'): |
112 def strip(ui, repo, nodelist, backup=True, topic=b'backup'): |
113 # This function requires the caller to lock the repo, but it operates |
113 # This function requires the caller to lock the repo, but it operates |
114 # within a transaction of its own, and thus requires there to be no current |
114 # within a transaction of its own, and thus requires there to be no current |
115 # transaction when it is called. |
115 # transaction when it is called. |
116 if repo.currenttransaction() is not None: |
116 if repo.currenttransaction() is not None: |
117 raise error.ProgrammingError('cannot strip from inside a transaction') |
117 raise error.ProgrammingError(b'cannot strip from inside a transaction') |
118 |
118 |
119 # Simple way to maintain backwards compatibility for this |
119 # Simple way to maintain backwards compatibility for this |
120 # argument. |
120 # argument. |
121 if backup in ['none', 'strip']: |
121 if backup in [b'none', b'strip']: |
122 backup = False |
122 backup = False |
123 |
123 |
124 repo = repo.unfiltered() |
124 repo = repo.unfiltered() |
125 repo.destroying() |
125 repo.destroying() |
126 vfs = repo.vfs |
126 vfs = repo.vfs |
214 repo.file(fn).strip(striprev, tr) |
214 repo.file(fn).strip(striprev, tr) |
215 tr.endgroup() |
215 tr.endgroup() |
216 |
216 |
217 for i in pycompat.xrange(offset, len(tr._entries)): |
217 for i in pycompat.xrange(offset, len(tr._entries)): |
218 file, troffset, ignore = tr._entries[i] |
218 file, troffset, ignore = tr._entries[i] |
219 with repo.svfs(file, 'a', checkambig=True) as fp: |
219 with repo.svfs(file, b'a', checkambig=True) as fp: |
220 fp.truncate(troffset) |
220 fp.truncate(troffset) |
221 if troffset == 0: |
221 if troffset == 0: |
222 repo.store.markremoved(file) |
222 repo.store.markremoved(file) |
223 |
223 |
224 deleteobsmarkers(repo.obsstore, stripobsidx) |
224 deleteobsmarkers(repo.obsstore, stripobsidx) |
225 del repo.obsstore |
225 del repo.obsstore |
226 repo.invalidatevolatilesets() |
226 repo.invalidatevolatilesets() |
227 repo._phasecache.filterunknown(repo) |
227 repo._phasecache.filterunknown(repo) |
228 |
228 |
229 if tmpbundlefile: |
229 if tmpbundlefile: |
230 ui.note(_("adding branch\n")) |
230 ui.note(_(b"adding branch\n")) |
231 f = vfs.open(tmpbundlefile, "rb") |
231 f = vfs.open(tmpbundlefile, b"rb") |
232 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) |
232 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) |
233 if not repo.ui.verbose: |
233 if not repo.ui.verbose: |
234 # silence internal shuffling chatter |
234 # silence internal shuffling chatter |
235 repo.ui.pushbuffer() |
235 repo.ui.pushbuffer() |
236 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile) |
236 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile) |
237 txnname = 'strip' |
237 txnname = b'strip' |
238 if not isinstance(gen, bundle2.unbundle20): |
238 if not isinstance(gen, bundle2.unbundle20): |
239 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl) |
239 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl) |
240 with repo.transaction(txnname) as tr: |
240 with repo.transaction(txnname) as tr: |
241 bundle2.applybundle( |
241 bundle2.applybundle( |
242 repo, gen, tr, source='strip', url=tmpbundleurl |
242 repo, gen, tr, source=b'strip', url=tmpbundleurl |
243 ) |
243 ) |
244 if not repo.ui.verbose: |
244 if not repo.ui.verbose: |
245 repo.ui.popbuffer() |
245 repo.ui.popbuffer() |
246 f.close() |
246 f.close() |
247 |
247 |
248 with repo.transaction('repair') as tr: |
248 with repo.transaction(b'repair') as tr: |
249 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
249 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
250 repo._bookmarks.applychanges(repo, tr, bmchanges) |
250 repo._bookmarks.applychanges(repo, tr, bmchanges) |
251 |
251 |
252 # remove undo files |
252 # remove undo files |
253 for undovfs, undofile in repo.undofiles(): |
253 for undovfs, undofile in repo.undofiles(): |
254 try: |
254 try: |
255 undovfs.unlink(undofile) |
255 undovfs.unlink(undofile) |
256 except OSError as e: |
256 except OSError as e: |
257 if e.errno != errno.ENOENT: |
257 if e.errno != errno.ENOENT: |
258 ui.warn( |
258 ui.warn( |
259 _('error removing %s: %s\n') |
259 _(b'error removing %s: %s\n') |
260 % ( |
260 % ( |
261 undovfs.join(undofile), |
261 undovfs.join(undofile), |
262 stringutil.forcebytestr(e), |
262 stringutil.forcebytestr(e), |
263 ) |
263 ) |
264 ) |
264 ) |
265 |
265 |
266 except: # re-raises |
266 except: # re-raises |
267 if backupfile: |
267 if backupfile: |
268 ui.warn( |
268 ui.warn( |
269 _("strip failed, backup bundle stored in '%s'\n") |
269 _(b"strip failed, backup bundle stored in '%s'\n") |
270 % vfs.join(backupfile) |
270 % vfs.join(backupfile) |
271 ) |
271 ) |
272 if tmpbundlefile: |
272 if tmpbundlefile: |
273 ui.warn( |
273 ui.warn( |
274 _("strip failed, unrecovered changes stored in '%s'\n") |
274 _(b"strip failed, unrecovered changes stored in '%s'\n") |
275 % vfs.join(tmpbundlefile) |
275 % vfs.join(tmpbundlefile) |
276 ) |
276 ) |
277 ui.warn( |
277 ui.warn( |
278 _( |
278 _( |
279 "(fix the problem, then recover the changesets with " |
279 b"(fix the problem, then recover the changesets with " |
280 "\"hg unbundle '%s'\")\n" |
280 b"\"hg unbundle '%s'\")\n" |
281 ) |
281 ) |
282 % vfs.join(tmpbundlefile) |
282 % vfs.join(tmpbundlefile) |
283 ) |
283 ) |
284 raise |
284 raise |
285 else: |
285 else: |
291 # return the backup file path (or None if 'backup' was False) so |
291 # return the backup file path (or None if 'backup' was False) so |
292 # extensions can use it |
292 # extensions can use it |
293 return backupfile |
293 return backupfile |
294 |
294 |
295 |
295 |
296 def softstrip(ui, repo, nodelist, backup=True, topic='backup'): |
296 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'): |
297 """perform a "soft" strip using the archived phase""" |
297 """perform a "soft" strip using the archived phase""" |
298 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)] |
298 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)] |
299 if not tostrip: |
299 if not tostrip: |
300 return None |
300 return None |
301 |
301 |
302 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
302 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
303 if backup: |
303 if backup: |
304 node = tostrip[0] |
304 node = tostrip[0] |
305 backupfile = _createstripbackup(repo, tostrip, node, topic) |
305 backupfile = _createstripbackup(repo, tostrip, node, topic) |
306 |
306 |
307 with repo.transaction('strip') as tr: |
307 with repo.transaction(b'strip') as tr: |
308 phases.retractboundary(repo, tr, phases.archived, tostrip) |
308 phases.retractboundary(repo, tr, phases.archived, tostrip) |
309 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
309 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
310 repo._bookmarks.applychanges(repo, tr, bmchanges) |
310 repo._bookmarks.applychanges(repo, tr, bmchanges) |
311 return backupfile |
311 return backupfile |
312 |
312 |
323 # If we need to move bookmarks, compute bookmark |
323 # If we need to move bookmarks, compute bookmark |
324 # targets. Otherwise we can skip doing this logic. |
324 # targets. Otherwise we can skip doing this logic. |
325 if updatebm: |
325 if updatebm: |
326 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), |
326 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), |
327 # but is much faster |
327 # but is much faster |
328 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) |
328 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip) |
329 if newbmtarget: |
329 if newbmtarget: |
330 newbmtarget = repo[newbmtarget.first()].node() |
330 newbmtarget = repo[newbmtarget.first()].node() |
331 else: |
331 else: |
332 newbmtarget = '.' |
332 newbmtarget = b'.' |
333 return newbmtarget, updatebm |
333 return newbmtarget, updatebm |
334 |
334 |
335 |
335 |
336 def _createstripbackup(repo, stripbases, node, topic): |
336 def _createstripbackup(repo, stripbases, node, topic): |
337 # backup the changeset we are about to strip |
337 # backup the changeset we are about to strip |
338 vfs = repo.vfs |
338 vfs = repo.vfs |
339 cl = repo.changelog |
339 cl = repo.changelog |
340 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic) |
340 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic) |
341 repo.ui.status(_("saved backup bundle to %s\n") % vfs.join(backupfile)) |
341 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile)) |
342 repo.ui.log( |
342 repo.ui.log( |
343 "backupbundle", "saved backup bundle to %s\n", vfs.join(backupfile) |
343 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile) |
344 ) |
344 ) |
345 return backupfile |
345 return backupfile |
346 |
346 |
347 |
347 |
348 def safestriproots(ui, repo, nodes): |
348 def safestriproots(ui, repo, nodes): |
351 revs = set(torev(n) for n in nodes) |
351 revs = set(torev(n) for n in nodes) |
352 # tostrip = wanted - unsafe = wanted - ancestors(orphaned) |
352 # tostrip = wanted - unsafe = wanted - ancestors(orphaned) |
353 # orphaned = affected - wanted |
353 # orphaned = affected - wanted |
354 # affected = descendants(roots(wanted)) |
354 # affected = descendants(roots(wanted)) |
355 # wanted = revs |
355 # wanted = revs |
356 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' |
356 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' |
357 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) |
357 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) |
358 notstrip = revs - tostrip |
358 notstrip = revs - tostrip |
359 if notstrip: |
359 if notstrip: |
360 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip)) |
360 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) |
361 ui.warn( |
361 ui.warn( |
362 _('warning: orphaned descendants detected, ' 'not stripping %s\n') |
362 _(b'warning: orphaned descendants detected, ' b'not stripping %s\n') |
363 % nodestr |
363 % nodestr |
364 ) |
364 ) |
365 return [c.node() for c in repo.set('roots(%ld)', tostrip)] |
365 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] |
366 |
366 |
367 |
367 |
368 class stripcallback(object): |
368 class stripcallback(object): |
369 """used as a transaction postclose callback""" |
369 """used as a transaction postclose callback""" |
370 |
370 |
371 def __init__(self, ui, repo, backup, topic): |
371 def __init__(self, ui, repo, backup, topic): |
372 self.ui = ui |
372 self.ui = ui |
373 self.repo = repo |
373 self.repo = repo |
374 self.backup = backup |
374 self.backup = backup |
375 self.topic = topic or 'backup' |
375 self.topic = topic or b'backup' |
376 self.nodelist = [] |
376 self.nodelist = [] |
377 |
377 |
378 def addnodes(self, nodes): |
378 def addnodes(self, nodes): |
379 self.nodelist.extend(nodes) |
379 self.nodelist.extend(nodes) |
380 |
380 |
397 if not tr: |
397 if not tr: |
398 nodes = safestriproots(ui, repo, nodelist) |
398 nodes = safestriproots(ui, repo, nodelist) |
399 return strip(ui, repo, nodes, backup=backup, topic=topic) |
399 return strip(ui, repo, nodes, backup=backup, topic=topic) |
400 # transaction postclose callbacks are called in alphabet order. |
400 # transaction postclose callbacks are called in alphabet order. |
401 # use '\xff' as prefix so we are likely to be called last. |
401 # use '\xff' as prefix so we are likely to be called last. |
402 callback = tr.getpostclose('\xffstrip') |
402 callback = tr.getpostclose(b'\xffstrip') |
403 if callback is None: |
403 if callback is None: |
404 callback = stripcallback(ui, repo, backup=backup, topic=topic) |
404 callback = stripcallback(ui, repo, backup=backup, topic=topic) |
405 tr.addpostclose('\xffstrip', callback) |
405 tr.addpostclose(b'\xffstrip', callback) |
406 if topic: |
406 if topic: |
407 callback.topic = topic |
407 callback.topic = topic |
408 callback.addnodes(nodelist) |
408 callback.addnodes(nodelist) |
409 |
409 |
410 |
410 |
413 revlog.strip(striprev, tr) |
413 revlog.strip(striprev, tr) |
414 |
414 |
415 |
415 |
416 def manifestrevlogs(repo): |
416 def manifestrevlogs(repo): |
417 yield repo.manifestlog.getstorage(b'') |
417 yield repo.manifestlog.getstorage(b'') |
418 if 'treemanifest' in repo.requirements: |
418 if b'treemanifest' in repo.requirements: |
419 # This logic is safe if treemanifest isn't enabled, but also |
419 # This logic is safe if treemanifest isn't enabled, but also |
420 # pointless, so we skip it if treemanifest isn't enabled. |
420 # pointless, so we skip it if treemanifest isn't enabled. |
421 for unencoded, encoded, size in repo.store.datafiles(): |
421 for unencoded, encoded, size in repo.store.datafiles(): |
422 if unencoded.startswith('meta/') and unencoded.endswith( |
422 if unencoded.startswith(b'meta/') and unencoded.endswith( |
423 '00manifest.i' |
423 b'00manifest.i' |
424 ): |
424 ): |
425 dir = unencoded[5:-12] |
425 dir = unencoded[5:-12] |
426 yield repo.manifestlog.getstorage(dir) |
426 yield repo.manifestlog.getstorage(dir) |
427 |
427 |
428 |
428 |
461 # This is to minimize I/O. |
461 # This is to minimize I/O. |
462 if f in seenfiles: |
462 if f in seenfiles: |
463 continue |
463 continue |
464 seenfiles.add(f) |
464 seenfiles.add(f) |
465 |
465 |
466 i = 'data/%s.i' % f |
466 i = b'data/%s.i' % f |
467 d = 'data/%s.d' % f |
467 d = b'data/%s.d' % f |
468 |
468 |
469 if repo.store._exists(i): |
469 if repo.store._exists(i): |
470 newentries.add(i) |
470 newentries.add(i) |
471 if repo.store._exists(d): |
471 if repo.store._exists(d): |
472 newentries.add(d) |
472 newentries.add(d) |
473 |
473 |
474 progress.complete() |
474 progress.complete() |
475 |
475 |
476 if 'treemanifest' in repo.requirements: |
476 if b'treemanifest' in repo.requirements: |
477 # This logic is safe if treemanifest isn't enabled, but also |
477 # This logic is safe if treemanifest isn't enabled, but also |
478 # pointless, so we skip it if treemanifest isn't enabled. |
478 # pointless, so we skip it if treemanifest isn't enabled. |
479 for dir in util.dirs(seenfiles): |
479 for dir in util.dirs(seenfiles): |
480 i = 'meta/%s/00manifest.i' % dir |
480 i = b'meta/%s/00manifest.i' % dir |
481 d = 'meta/%s/00manifest.d' % dir |
481 d = b'meta/%s/00manifest.d' % dir |
482 |
482 |
483 if repo.store._exists(i): |
483 if repo.store._exists(i): |
484 newentries.add(i) |
484 newentries.add(i) |
485 if repo.store._exists(d): |
485 if repo.store._exists(d): |
486 newentries.add(d) |
486 newentries.add(d) |
487 |
487 |
488 addcount = len(newentries - oldentries) |
488 addcount = len(newentries - oldentries) |
489 removecount = len(oldentries - newentries) |
489 removecount = len(oldentries - newentries) |
490 for p in sorted(oldentries - newentries): |
490 for p in sorted(oldentries - newentries): |
491 ui.write(_('removing %s\n') % p) |
491 ui.write(_(b'removing %s\n') % p) |
492 for p in sorted(newentries - oldentries): |
492 for p in sorted(newentries - oldentries): |
493 ui.write(_('adding %s\n') % p) |
493 ui.write(_(b'adding %s\n') % p) |
494 |
494 |
495 if addcount or removecount: |
495 if addcount or removecount: |
496 ui.write( |
496 ui.write( |
497 _('%d items added, %d removed from fncache\n') |
497 _(b'%d items added, %d removed from fncache\n') |
498 % (addcount, removecount) |
498 % (addcount, removecount) |
499 ) |
499 ) |
500 fnc.entries = newentries |
500 fnc.entries = newentries |
501 fnc._dirty = True |
501 fnc._dirty = True |
502 |
502 |
503 with repo.transaction('fncache') as tr: |
503 with repo.transaction(b'fncache') as tr: |
504 fnc.write(tr) |
504 fnc.write(tr) |
505 else: |
505 else: |
506 ui.write(_('fncache already up to date\n')) |
506 ui.write(_(b'fncache already up to date\n')) |
507 |
507 |
508 |
508 |
509 def deleteobsmarkers(obsstore, indices): |
509 def deleteobsmarkers(obsstore, indices): |
510 """Delete some obsmarkers from obsstore and return how many were deleted |
510 """Delete some obsmarkers from obsstore and return how many were deleted |
511 |
511 |