Mercurial > hg
comparison mercurial/upgrade.py @ 31864:70d163b86316
upgrade: extract code in its own module
Given about 2/3 or 'mercurial.repair' is now about repository upgrade, I think
it is fair to move it into its own module.
An expected benefit is the ability to drop the 'upgrade' prefix of many
functions. This will be done in coming changesets.
author | Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
---|---|
date | Fri, 07 Apr 2017 18:53:17 +0200 |
parents | mercurial/repair.py@7095e783958d |
children | 5dcaa0f4455b |
comparison
equal
deleted
inserted
replaced
31863:cd7aaf344d83 | 31864:70d163b86316 |
---|---|
1 # repair.py - functions for repository repair for mercurial | |
2 # | |
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com> | |
4 # Copyright 2007 Matt Mackall | |
5 # | |
6 # This software may be used and distributed according to the terms of the | |
7 # GNU General Public License version 2 or any later version. | |
8 | |
9 from __future__ import absolute_import | |
10 | |
11 import stat | |
12 import tempfile | |
13 | |
14 from .i18n import _ | |
15 from . import ( | |
16 changelog, | |
17 error, | |
18 manifest, | |
19 revlog, | |
20 scmutil, | |
21 util, | |
22 vfs as vfsmod, | |
23 ) | |
24 | |
25 def upgraderequiredsourcerequirements(repo): | |
26 """Obtain requirements required to be present to upgrade a repo. | |
27 | |
28 An upgrade will not be allowed if the repository doesn't have the | |
29 requirements returned by this function. | |
30 """ | |
31 return set([ | |
32 # Introduced in Mercurial 0.9.2. | |
33 'revlogv1', | |
34 # Introduced in Mercurial 0.9.2. | |
35 'store', | |
36 ]) | |
37 | |
38 def upgradeblocksourcerequirements(repo): | |
39 """Obtain requirements that will prevent an upgrade from occurring. | |
40 | |
41 An upgrade cannot be performed if the source repository contains a | |
42 requirements in the returned set. | |
43 """ | |
44 return set([ | |
45 # The upgrade code does not yet support these experimental features. | |
46 # This is an artificial limitation. | |
47 'manifestv2', | |
48 'treemanifest', | |
49 # This was a precursor to generaldelta and was never enabled by default. | |
50 # It should (hopefully) not exist in the wild. | |
51 'parentdelta', | |
52 # Upgrade should operate on the actual store, not the shared link. | |
53 'shared', | |
54 ]) | |
55 | |
56 def upgradesupportremovedrequirements(repo): | |
57 """Obtain requirements that can be removed during an upgrade. | |
58 | |
59 If an upgrade were to create a repository that dropped a requirement, | |
60 the dropped requirement must appear in the returned set for the upgrade | |
61 to be allowed. | |
62 """ | |
63 return set() | |
64 | |
65 def upgradesupporteddestrequirements(repo): | |
66 """Obtain requirements that upgrade supports in the destination. | |
67 | |
68 If the result of the upgrade would create requirements not in this set, | |
69 the upgrade is disallowed. | |
70 | |
71 Extensions should monkeypatch this to add their custom requirements. | |
72 """ | |
73 return set([ | |
74 'dotencode', | |
75 'fncache', | |
76 'generaldelta', | |
77 'revlogv1', | |
78 'store', | |
79 ]) | |
80 | |
81 def upgradeallowednewrequirements(repo): | |
82 """Obtain requirements that can be added to a repository during upgrade. | |
83 | |
84 This is used to disallow proposed requirements from being added when | |
85 they weren't present before. | |
86 | |
87 We use a list of allowed requirement additions instead of a list of known | |
88 bad additions because the whitelist approach is safer and will prevent | |
89 future, unknown requirements from accidentally being added. | |
90 """ | |
91 return set([ | |
92 'dotencode', | |
93 'fncache', | |
94 'generaldelta', | |
95 ]) | |
96 | |
97 deficiency = 'deficiency' | |
98 optimisation = 'optimization' | |
99 | |
100 class upgradeimprovement(object): | |
101 """Represents an improvement that can be made as part of an upgrade. | |
102 | |
103 The following attributes are defined on each instance: | |
104 | |
105 name | |
106 Machine-readable string uniquely identifying this improvement. It | |
107 will be mapped to an action later in the upgrade process. | |
108 | |
109 type | |
110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious | |
111 problem. An optimization is an action (sometimes optional) that | |
112 can be taken to further improve the state of the repository. | |
113 | |
114 description | |
115 Message intended for humans explaining the improvement in more detail, | |
116 including the implications of it. For ``deficiency`` types, should be | |
117 worded in the present tense. For ``optimisation`` types, should be | |
118 worded in the future tense. | |
119 | |
120 upgrademessage | |
121 Message intended for humans explaining what an upgrade addressing this | |
122 issue will do. Should be worded in the future tense. | |
123 | |
124 fromdefault (``deficiency`` types only) | |
125 Boolean indicating whether the current (deficient) state deviates | |
126 from Mercurial's default configuration. | |
127 | |
128 fromconfig (``deficiency`` types only) | |
129 Boolean indicating whether the current (deficient) state deviates | |
130 from the current Mercurial configuration. | |
131 """ | |
132 def __init__(self, name, type, description, upgrademessage, **kwargs): | |
133 self.name = name | |
134 self.type = type | |
135 self.description = description | |
136 self.upgrademessage = upgrademessage | |
137 | |
138 for k, v in kwargs.items(): | |
139 setattr(self, k, v) | |
140 | |
141 def upgradefindimprovements(repo): | |
142 """Determine improvements that can be made to the repo during upgrade. | |
143 | |
144 Returns a list of ``upgradeimprovement`` describing repository deficiencies | |
145 and optimizations. | |
146 """ | |
147 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil | |
148 from . import localrepo | |
149 | |
150 newreporeqs = localrepo.newreporequirements(repo) | |
151 | |
152 improvements = [] | |
153 | |
154 # We could detect lack of revlogv1 and store here, but they were added | |
155 # in 0.9.2 and we don't support upgrading repos without these | |
156 # requirements, so let's not bother. | |
157 | |
158 if 'fncache' not in repo.requirements: | |
159 improvements.append(upgradeimprovement( | |
160 name='fncache', | |
161 type=deficiency, | |
162 description=_('long and reserved filenames may not work correctly; ' | |
163 'repository performance is sub-optimal'), | |
164 upgrademessage=_('repository will be more resilient to storing ' | |
165 'certain paths and performance of certain ' | |
166 'operations should be improved'), | |
167 fromdefault=True, | |
168 fromconfig='fncache' in newreporeqs)) | |
169 | |
170 if 'dotencode' not in repo.requirements: | |
171 improvements.append(upgradeimprovement( | |
172 name='dotencode', | |
173 type=deficiency, | |
174 description=_('storage of filenames beginning with a period or ' | |
175 'space may not work correctly'), | |
176 upgrademessage=_('repository will be better able to store files ' | |
177 'beginning with a space or period'), | |
178 fromdefault=True, | |
179 fromconfig='dotencode' in newreporeqs)) | |
180 | |
181 if 'generaldelta' not in repo.requirements: | |
182 improvements.append(upgradeimprovement( | |
183 name='generaldelta', | |
184 type=deficiency, | |
185 description=_('deltas within internal storage are unable to ' | |
186 'choose optimal revisions; repository is larger and ' | |
187 'slower than it could be; interaction with other ' | |
188 'repositories may require extra network and CPU ' | |
189 'resources, making "hg push" and "hg pull" slower'), | |
190 upgrademessage=_('repository storage will be able to create ' | |
191 'optimal deltas; new repository data will be ' | |
192 'smaller and read times should decrease; ' | |
193 'interacting with other repositories using this ' | |
194 'storage model should require less network and ' | |
195 'CPU resources, making "hg push" and "hg pull" ' | |
196 'faster'), | |
197 fromdefault=True, | |
198 fromconfig='generaldelta' in newreporeqs)) | |
199 | |
200 # Mercurial 4.0 changed changelogs to not use delta chains. Search for | |
201 # changelogs with deltas. | |
202 cl = repo.changelog | |
203 for rev in cl: | |
204 chainbase = cl.chainbase(rev) | |
205 if chainbase != rev: | |
206 improvements.append(upgradeimprovement( | |
207 name='removecldeltachain', | |
208 type=deficiency, | |
209 description=_('changelog storage is using deltas instead of ' | |
210 'raw entries; changelog reading and any ' | |
211 'operation relying on changelog data are slower ' | |
212 'than they could be'), | |
213 upgrademessage=_('changelog storage will be reformated to ' | |
214 'store raw entries; changelog reading will be ' | |
215 'faster; changelog size may be reduced'), | |
216 fromdefault=True, | |
217 fromconfig=True)) | |
218 break | |
219 | |
220 # Now for the optimizations. | |
221 | |
222 # These are unconditionally added. There is logic later that figures out | |
223 # which ones to apply. | |
224 | |
225 improvements.append(upgradeimprovement( | |
226 name='redeltaparent', | |
227 type=optimisation, | |
228 description=_('deltas within internal storage will be recalculated to ' | |
229 'choose an optimal base revision where this was not ' | |
230 'already done; the size of the repository may shrink and ' | |
231 'various operations may become faster; the first time ' | |
232 'this optimization is performed could slow down upgrade ' | |
233 'execution considerably; subsequent invocations should ' | |
234 'not run noticeably slower'), | |
235 upgrademessage=_('deltas within internal storage will choose a new ' | |
236 'base revision if needed'))) | |
237 | |
238 improvements.append(upgradeimprovement( | |
239 name='redeltamultibase', | |
240 type=optimisation, | |
241 description=_('deltas within internal storage will be recalculated ' | |
242 'against multiple base revision and the smallest ' | |
243 'difference will be used; the size of the repository may ' | |
244 'shrink significantly when there are many merges; this ' | |
245 'optimization will slow down execution in proportion to ' | |
246 'the number of merges in the repository and the amount ' | |
247 'of files in the repository; this slow down should not ' | |
248 'be significant unless there are tens of thousands of ' | |
249 'files and thousands of merges'), | |
250 upgrademessage=_('deltas within internal storage will choose an ' | |
251 'optimal delta by computing deltas against multiple ' | |
252 'parents; may slow down execution time ' | |
253 'significantly'))) | |
254 | |
255 improvements.append(upgradeimprovement( | |
256 name='redeltaall', | |
257 type=optimisation, | |
258 description=_('deltas within internal storage will always be ' | |
259 'recalculated without reusing prior deltas; this will ' | |
260 'likely make execution run several times slower; this ' | |
261 'optimization is typically not needed'), | |
262 upgrademessage=_('deltas within internal storage will be fully ' | |
263 'recomputed; this will likely drastically slow down ' | |
264 'execution time'))) | |
265 | |
266 return improvements | |
267 | |
268 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs, | |
269 optimize): | |
270 """Determine upgrade actions that will be performed. | |
271 | |
272 Given a list of improvements as returned by ``upgradefindimprovements``, | |
273 determine the list of upgrade actions that will be performed. | |
274 | |
275 The role of this function is to filter improvements if needed, apply | |
276 recommended optimizations from the improvements list that make sense, | |
277 etc. | |
278 | |
279 Returns a list of action names. | |
280 """ | |
281 newactions = [] | |
282 | |
283 knownreqs = upgradesupporteddestrequirements(repo) | |
284 | |
285 for i in improvements: | |
286 name = i.name | |
287 | |
288 # If the action is a requirement that doesn't show up in the | |
289 # destination requirements, prune the action. | |
290 if name in knownreqs and name not in destreqs: | |
291 continue | |
292 | |
293 if i.type == deficiency: | |
294 newactions.append(name) | |
295 | |
296 newactions.extend(o for o in sorted(optimize) if o not in newactions) | |
297 | |
298 # FUTURE consider adding some optimizations here for certain transitions. | |
299 # e.g. adding generaldelta could schedule parent redeltas. | |
300 | |
301 return newactions | |
302 | |
303 def _revlogfrompath(repo, path): | |
304 """Obtain a revlog from a repo path. | |
305 | |
306 An instance of the appropriate class is returned. | |
307 """ | |
308 if path == '00changelog.i': | |
309 return changelog.changelog(repo.svfs) | |
310 elif path.endswith('00manifest.i'): | |
311 mandir = path[:-len('00manifest.i')] | |
312 return manifest.manifestrevlog(repo.svfs, dir=mandir) | |
313 else: | |
314 # Filelogs don't do anything special with settings. So we can use a | |
315 # vanilla revlog. | |
316 return revlog.revlog(repo.svfs, path) | |
317 | |
318 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas): | |
319 """Copy revlogs between 2 repos.""" | |
320 revcount = 0 | |
321 srcsize = 0 | |
322 srcrawsize = 0 | |
323 dstsize = 0 | |
324 fcount = 0 | |
325 frevcount = 0 | |
326 fsrcsize = 0 | |
327 frawsize = 0 | |
328 fdstsize = 0 | |
329 mcount = 0 | |
330 mrevcount = 0 | |
331 msrcsize = 0 | |
332 mrawsize = 0 | |
333 mdstsize = 0 | |
334 crevcount = 0 | |
335 csrcsize = 0 | |
336 crawsize = 0 | |
337 cdstsize = 0 | |
338 | |
339 # Perform a pass to collect metadata. This validates we can open all | |
340 # source files and allows a unified progress bar to be displayed. | |
341 for unencoded, encoded, size in srcrepo.store.walk(): | |
342 if unencoded.endswith('.d'): | |
343 continue | |
344 | |
345 rl = _revlogfrompath(srcrepo, unencoded) | |
346 revcount += len(rl) | |
347 | |
348 datasize = 0 | |
349 rawsize = 0 | |
350 idx = rl.index | |
351 for rev in rl: | |
352 e = idx[rev] | |
353 datasize += e[1] | |
354 rawsize += e[2] | |
355 | |
356 srcsize += datasize | |
357 srcrawsize += rawsize | |
358 | |
359 # This is for the separate progress bars. | |
360 if isinstance(rl, changelog.changelog): | |
361 crevcount += len(rl) | |
362 csrcsize += datasize | |
363 crawsize += rawsize | |
364 elif isinstance(rl, manifest.manifestrevlog): | |
365 mcount += 1 | |
366 mrevcount += len(rl) | |
367 msrcsize += datasize | |
368 mrawsize += rawsize | |
369 elif isinstance(rl, revlog.revlog): | |
370 fcount += 1 | |
371 frevcount += len(rl) | |
372 fsrcsize += datasize | |
373 frawsize += rawsize | |
374 | |
375 if not revcount: | |
376 return | |
377 | |
378 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, ' | |
379 '%d in changelog)\n') % | |
380 (revcount, frevcount, mrevcount, crevcount)) | |
381 ui.write(_('migrating %s in store; %s tracked data\n') % ( | |
382 (util.bytecount(srcsize), util.bytecount(srcrawsize)))) | |
383 | |
384 # Used to keep track of progress. | |
385 progress = [] | |
386 def oncopiedrevision(rl, rev, node): | |
387 progress[1] += 1 | |
388 srcrepo.ui.progress(progress[0], progress[1], total=progress[2]) | |
389 | |
390 # Do the actual copying. | |
391 # FUTURE this operation can be farmed off to worker processes. | |
392 seen = set() | |
393 for unencoded, encoded, size in srcrepo.store.walk(): | |
394 if unencoded.endswith('.d'): | |
395 continue | |
396 | |
397 oldrl = _revlogfrompath(srcrepo, unencoded) | |
398 newrl = _revlogfrompath(dstrepo, unencoded) | |
399 | |
400 if isinstance(oldrl, changelog.changelog) and 'c' not in seen: | |
401 ui.write(_('finished migrating %d manifest revisions across %d ' | |
402 'manifests; change in size: %s\n') % | |
403 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))) | |
404 | |
405 ui.write(_('migrating changelog containing %d revisions ' | |
406 '(%s in store; %s tracked data)\n') % | |
407 (crevcount, util.bytecount(csrcsize), | |
408 util.bytecount(crawsize))) | |
409 seen.add('c') | |
410 progress[:] = [_('changelog revisions'), 0, crevcount] | |
411 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen: | |
412 ui.write(_('finished migrating %d filelog revisions across %d ' | |
413 'filelogs; change in size: %s\n') % | |
414 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))) | |
415 | |
416 ui.write(_('migrating %d manifests containing %d revisions ' | |
417 '(%s in store; %s tracked data)\n') % | |
418 (mcount, mrevcount, util.bytecount(msrcsize), | |
419 util.bytecount(mrawsize))) | |
420 seen.add('m') | |
421 progress[:] = [_('manifest revisions'), 0, mrevcount] | |
422 elif 'f' not in seen: | |
423 ui.write(_('migrating %d filelogs containing %d revisions ' | |
424 '(%s in store; %s tracked data)\n') % | |
425 (fcount, frevcount, util.bytecount(fsrcsize), | |
426 util.bytecount(frawsize))) | |
427 seen.add('f') | |
428 progress[:] = [_('file revisions'), 0, frevcount] | |
429 | |
430 ui.progress(progress[0], progress[1], total=progress[2]) | |
431 | |
432 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded)) | |
433 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision, | |
434 deltareuse=deltareuse, | |
435 aggressivemergedeltas=aggressivemergedeltas) | |
436 | |
437 datasize = 0 | |
438 idx = newrl.index | |
439 for rev in newrl: | |
440 datasize += idx[rev][1] | |
441 | |
442 dstsize += datasize | |
443 | |
444 if isinstance(newrl, changelog.changelog): | |
445 cdstsize += datasize | |
446 elif isinstance(newrl, manifest.manifestrevlog): | |
447 mdstsize += datasize | |
448 else: | |
449 fdstsize += datasize | |
450 | |
451 ui.progress(progress[0], None) | |
452 | |
453 ui.write(_('finished migrating %d changelog revisions; change in size: ' | |
454 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize))) | |
455 | |
456 ui.write(_('finished migrating %d total revisions; total change in store ' | |
457 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize))) | |
458 | |
459 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st): | |
460 """Determine whether to copy a store file during upgrade. | |
461 | |
462 This function is called when migrating store files from ``srcrepo`` to | |
463 ``dstrepo`` as part of upgrading a repository. | |
464 | |
465 Args: | |
466 srcrepo: repo we are copying from | |
467 dstrepo: repo we are copying to | |
468 requirements: set of requirements for ``dstrepo`` | |
469 path: store file being examined | |
470 mode: the ``ST_MODE`` file type of ``path`` | |
471 st: ``stat`` data structure for ``path`` | |
472 | |
473 Function should return ``True`` if the file is to be copied. | |
474 """ | |
475 # Skip revlogs. | |
476 if path.endswith(('.i', '.d')): | |
477 return False | |
478 # Skip transaction related files. | |
479 if path.startswith('undo'): | |
480 return False | |
481 # Only copy regular files. | |
482 if mode != stat.S_IFREG: | |
483 return False | |
484 # Skip other skipped files. | |
485 if path in ('lock', 'fncache'): | |
486 return False | |
487 | |
488 return True | |
489 | |
490 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements): | |
491 """Hook point for extensions to perform additional actions during upgrade. | |
492 | |
493 This function is called after revlogs and store files have been copied but | |
494 before the new store is swapped into the original location. | |
495 """ | |
496 | |
497 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions): | |
498 """Do the low-level work of upgrading a repository. | |
499 | |
500 The upgrade is effectively performed as a copy between a source | |
501 repository and a temporary destination repository. | |
502 | |
503 The source repository is unmodified for as long as possible so the | |
504 upgrade can abort at any time without causing loss of service for | |
505 readers and without corrupting the source repository. | |
506 """ | |
507 assert srcrepo.currentwlock() | |
508 assert dstrepo.currentwlock() | |
509 | |
510 ui.write(_('(it is safe to interrupt this process any time before ' | |
511 'data migration completes)\n')) | |
512 | |
513 if 'redeltaall' in actions: | |
514 deltareuse = revlog.revlog.DELTAREUSENEVER | |
515 elif 'redeltaparent' in actions: | |
516 deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
517 elif 'redeltamultibase' in actions: | |
518 deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
519 else: | |
520 deltareuse = revlog.revlog.DELTAREUSEALWAYS | |
521 | |
522 with dstrepo.transaction('upgrade') as tr: | |
523 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, | |
524 'redeltamultibase' in actions) | |
525 | |
526 # Now copy other files in the store directory. | |
527 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True): | |
528 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements, | |
529 p, kind, st): | |
530 continue | |
531 | |
532 srcrepo.ui.write(_('copying %s\n') % p) | |
533 src = srcrepo.store.vfs.join(p) | |
534 dst = dstrepo.store.vfs.join(p) | |
535 util.copyfile(src, dst, copystat=True) | |
536 | |
537 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements) | |
538 | |
539 ui.write(_('data fully migrated to temporary repository\n')) | |
540 | |
541 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path) | |
542 backupvfs = vfsmod.vfs(backuppath) | |
543 | |
544 # Make a backup of requires file first, as it is the first to be modified. | |
545 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires')) | |
546 | |
547 # We install an arbitrary requirement that clients must not support | |
548 # as a mechanism to lock out new clients during the data swap. This is | |
549 # better than allowing a client to continue while the repository is in | |
550 # an inconsistent state. | |
551 ui.write(_('marking source repository as being upgraded; clients will be ' | |
552 'unable to read from repository\n')) | |
553 scmutil.writerequires(srcrepo.vfs, | |
554 srcrepo.requirements | set(['upgradeinprogress'])) | |
555 | |
556 ui.write(_('starting in-place swap of repository data\n')) | |
557 ui.write(_('replaced files will be backed up at %s\n') % | |
558 backuppath) | |
559 | |
560 # Now swap in the new store directory. Doing it as a rename should make | |
561 # the operation nearly instantaneous and atomic (at least in well-behaved | |
562 # environments). | |
563 ui.write(_('replacing store...\n')) | |
564 tstart = util.timer() | |
565 util.rename(srcrepo.spath, backupvfs.join('store')) | |
566 util.rename(dstrepo.spath, srcrepo.spath) | |
567 elapsed = util.timer() - tstart | |
568 ui.write(_('store replacement complete; repository was inconsistent for ' | |
569 '%0.1fs\n') % elapsed) | |
570 | |
571 # We first write the requirements file. Any new requirements will lock | |
572 # out legacy clients. | |
573 ui.write(_('finalizing requirements file and making repository readable ' | |
574 'again\n')) | |
575 scmutil.writerequires(srcrepo.vfs, requirements) | |
576 | |
577 # The lock file from the old store won't be removed because nothing has a | |
578 # reference to its new location. So clean it up manually. Alternatively, we | |
579 # could update srcrepo.svfs and other variables to point to the new | |
580 # location. This is simpler. | |
581 backupvfs.unlink('store/lock') | |
582 | |
583 return backuppath | |
584 | |
585 def upgraderepo(ui, repo, run=False, optimize=None): | |
586 """Upgrade a repository in place.""" | |
587 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil | |
588 from . import localrepo | |
589 | |
590 optimize = set(optimize or []) | |
591 repo = repo.unfiltered() | |
592 | |
593 # Ensure the repository can be upgraded. | |
594 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements | |
595 if missingreqs: | |
596 raise error.Abort(_('cannot upgrade repository; requirement ' | |
597 'missing: %s') % _(', ').join(sorted(missingreqs))) | |
598 | |
599 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements | |
600 if blockedreqs: | |
601 raise error.Abort(_('cannot upgrade repository; unsupported source ' | |
602 'requirement: %s') % | |
603 _(', ').join(sorted(blockedreqs))) | |
604 | |
605 # FUTURE there is potentially a need to control the wanted requirements via | |
606 # command arguments or via an extension hook point. | |
607 newreqs = localrepo.newreporequirements(repo) | |
608 | |
609 noremovereqs = (repo.requirements - newreqs - | |
610 upgradesupportremovedrequirements(repo)) | |
611 if noremovereqs: | |
612 raise error.Abort(_('cannot upgrade repository; requirement would be ' | |
613 'removed: %s') % _(', ').join(sorted(noremovereqs))) | |
614 | |
615 noaddreqs = (newreqs - repo.requirements - | |
616 upgradeallowednewrequirements(repo)) | |
617 if noaddreqs: | |
618 raise error.Abort(_('cannot upgrade repository; do not support adding ' | |
619 'requirement: %s') % | |
620 _(', ').join(sorted(noaddreqs))) | |
621 | |
622 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo) | |
623 if unsupportedreqs: | |
624 raise error.Abort(_('cannot upgrade repository; do not support ' | |
625 'destination requirement: %s') % | |
626 _(', ').join(sorted(unsupportedreqs))) | |
627 | |
628 # Find and validate all improvements that can be made. | |
629 improvements = upgradefindimprovements(repo) | |
630 for i in improvements: | |
631 if i.type not in (deficiency, optimisation): | |
632 raise error.Abort(_('unexpected improvement type %s for %s') % ( | |
633 i.type, i.name)) | |
634 | |
635 # Validate arguments. | |
636 unknownoptimize = optimize - set(i.name for i in improvements | |
637 if i.type == optimisation) | |
638 if unknownoptimize: | |
639 raise error.Abort(_('unknown optimization action requested: %s') % | |
640 ', '.join(sorted(unknownoptimize)), | |
641 hint=_('run without arguments to see valid ' | |
642 'optimizations')) | |
643 | |
644 actions = upgradedetermineactions(repo, improvements, repo.requirements, | |
645 newreqs, optimize) | |
646 | |
647 def printrequirements(): | |
648 ui.write(_('requirements\n')) | |
649 ui.write(_(' preserved: %s\n') % | |
650 _(', ').join(sorted(newreqs & repo.requirements))) | |
651 | |
652 if repo.requirements - newreqs: | |
653 ui.write(_(' removed: %s\n') % | |
654 _(', ').join(sorted(repo.requirements - newreqs))) | |
655 | |
656 if newreqs - repo.requirements: | |
657 ui.write(_(' added: %s\n') % | |
658 _(', ').join(sorted(newreqs - repo.requirements))) | |
659 | |
660 ui.write('\n') | |
661 | |
662 def printupgradeactions(): | |
663 for action in actions: | |
664 for i in improvements: | |
665 if i.name == action: | |
666 ui.write('%s\n %s\n\n' % | |
667 (i.name, i.upgrademessage)) | |
668 | |
669 if not run: | |
670 fromdefault = [] | |
671 fromconfig = [] | |
672 optimizations = [] | |
673 | |
674 for i in improvements: | |
675 assert i.type in (deficiency, optimisation) | |
676 if i.type == deficiency: | |
677 if i.fromdefault: | |
678 fromdefault.append(i) | |
679 if i.fromconfig: | |
680 fromconfig.append(i) | |
681 else: | |
682 optimizations.append(i) | |
683 | |
684 if fromdefault or fromconfig: | |
685 fromconfignames = set(x.name for x in fromconfig) | |
686 onlydefault = [i for i in fromdefault | |
687 if i.name not in fromconfignames] | |
688 | |
689 if fromconfig: | |
690 ui.write(_('repository lacks features recommended by ' | |
691 'current config options:\n\n')) | |
692 for i in fromconfig: | |
693 ui.write('%s\n %s\n\n' % (i.name, i.description)) | |
694 | |
695 if onlydefault: | |
696 ui.write(_('repository lacks features used by the default ' | |
697 'config options:\n\n')) | |
698 for i in onlydefault: | |
699 ui.write('%s\n %s\n\n' % (i.name, i.description)) | |
700 | |
701 ui.write('\n') | |
702 else: | |
703 ui.write(_('(no feature deficiencies found in existing ' | |
704 'repository)\n')) | |
705 | |
706 ui.write(_('performing an upgrade with "--run" will make the following ' | |
707 'changes:\n\n')) | |
708 | |
709 printrequirements() | |
710 printupgradeactions() | |
711 | |
712 unusedoptimize = [i for i in improvements | |
713 if i.name not in actions and i.type == optimisation] | |
714 if unusedoptimize: | |
715 ui.write(_('additional optimizations are available by specifying ' | |
716 '"--optimize <name>":\n\n')) | |
717 for i in unusedoptimize: | |
718 ui.write(_('%s\n %s\n\n') % (i.name, i.description)) | |
719 return | |
720 | |
721 # Else we're in the run=true case. | |
722 ui.write(_('upgrade will perform the following actions:\n\n')) | |
723 printrequirements() | |
724 printupgradeactions() | |
725 | |
726 ui.write(_('beginning upgrade...\n')) | |
727 with repo.wlock(): | |
728 with repo.lock(): | |
729 ui.write(_('repository locked and read-only\n')) | |
730 # Our strategy for upgrading the repository is to create a new, | |
731 # temporary repository, write data to it, then do a swap of the | |
732 # data. There are less heavyweight ways to do this, but it is easier | |
733 # to create a new repo object than to instantiate all the components | |
734 # (like the store) separately. | |
735 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path) | |
736 backuppath = None | |
737 try: | |
738 ui.write(_('creating temporary repository to stage migrated ' | |
739 'data: %s\n') % tmppath) | |
740 dstrepo = localrepo.localrepository(repo.baseui, | |
741 path=tmppath, | |
742 create=True) | |
743 | |
744 with dstrepo.wlock(): | |
745 with dstrepo.lock(): | |
746 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, | |
747 actions) | |
748 | |
749 finally: | |
750 ui.write(_('removing temporary repository %s\n') % tmppath) | |
751 repo.vfs.rmtree(tmppath, forcibly=True) | |
752 | |
753 if backuppath: | |
754 ui.warn(_('copy of old repository backed up at %s\n') % | |
755 backuppath) | |
756 ui.warn(_('the old repository will not be deleted; remove ' | |
757 'it to free up disk space once the upgraded ' | |
758 'repository is verified\n')) |