Mercurial > evolve
comparison hgext3rd/pullbundle.py @ 4814:48b30ff742cb
python3: use format-source to run byteify-strings in .py files
Using the format-source extension smooth out the pain of merging after
auto-formatting.
This change makes all of the Evolve test suite pass under python3 and has
added benefit of being 100% automated using mercurial's `byteify-strings`
script version 1.0 (revision 11498aa91c036c6d70f7ac5ee5af2664a84a1130).
How to benefit from the help of format-source is explained in the README.
author | Raphaël Gomès <rgomes@octobus.net> |
---|---|
date | Tue, 06 Aug 2019 15:06:38 +0200 |
parents | ea8da5aa23c6 |
children | bb2b4f6c99dc |
comparison
equal
deleted
inserted
replaced
4812:67567d7f1174 | 4814:48b30ff742cb |
---|---|
90 util, | 90 util, |
91 ) | 91 ) |
92 | 92 |
93 from mercurial.i18n import _ | 93 from mercurial.i18n import _ |
94 | 94 |
95 __version__ = '0.1.1' | 95 __version__ = b'0.1.1' |
96 testedwith = '4.4 4.5 4.6 4.7.1' | 96 testedwith = b'4.4 4.5 4.6 4.7.1' |
97 minimumhgversion = '4.4' | 97 minimumhgversion = b'4.4' |
98 buglink = 'https://bz.mercurial-scm.org/' | 98 buglink = b'https://bz.mercurial-scm.org/' |
99 | 99 |
100 cmdtable = {} | 100 cmdtable = {} |
101 command = registrar.command(cmdtable) | 101 command = registrar.command(cmdtable) |
102 | 102 |
103 configtable = {} | 103 configtable = {} |
104 configitem = registrar.configitem(configtable) | 104 configitem = registrar.configitem(configtable) |
105 | 105 |
106 configitem('pullbundle', 'cache-directory', | 106 configitem(b'pullbundle', b'cache-directory', |
107 default=None, | 107 default=None, |
108 ) | 108 ) |
109 | 109 |
110 # generic wrapping | 110 # generic wrapping |
111 | 111 |
112 def uisetup(ui): | 112 def uisetup(ui): |
113 exchange.getbundle2partsmapping['changegroup'] = _getbundlechangegrouppart | 113 exchange.getbundle2partsmapping[b'changegroup'] = _getbundlechangegrouppart |
114 | 114 |
115 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None, | 115 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None, |
116 b2caps=None, heads=None, common=None, **kwargs): | 116 b2caps=None, heads=None, common=None, **kwargs): |
117 """add a changegroup part to the requested bundle""" | 117 """add a changegroup part to the requested bundle""" |
118 if not kwargs.get(r'cg', True): | 118 if not kwargs.get(r'cg', True): |
119 return | 119 return |
120 | 120 |
121 version = '01' | 121 version = b'01' |
122 cgversions = b2caps.get('changegroup') | 122 cgversions = b2caps.get(b'changegroup') |
123 if cgversions: # 3.1 and 3.2 ship with an empty value | 123 if cgversions: # 3.1 and 3.2 ship with an empty value |
124 cgversions = [v for v in cgversions | 124 cgversions = [v for v in cgversions |
125 if v in changegroup.supportedoutgoingversions(repo)] | 125 if v in changegroup.supportedoutgoingversions(repo)] |
126 if not cgversions: | 126 if not cgversions: |
127 raise ValueError(_('no common changegroup version')) | 127 raise ValueError(_(b'no common changegroup version')) |
128 version = max(cgversions) | 128 version = max(cgversions) |
129 | 129 |
130 outgoing = exchange._computeoutgoing(repo, heads, common) | 130 outgoing = exchange._computeoutgoing(repo, heads, common) |
131 if not outgoing.missing: | 131 if not outgoing.missing: |
132 return | 132 return |
143 makeallcgpart(bundler.newpart, repo, outgoing, version, source, bundlecaps, | 143 makeallcgpart(bundler.newpart, repo, outgoing, version, source, bundlecaps, |
144 filematcher, cgversions) | 144 filematcher, cgversions) |
145 # END OF ALTERED PART | 145 # END OF ALTERED PART |
146 | 146 |
147 if kwargs.get(r'narrow', False) and (include or exclude): | 147 if kwargs.get(r'narrow', False) and (include or exclude): |
148 narrowspecpart = bundler.newpart('narrow:spec') | 148 narrowspecpart = bundler.newpart(b'narrow:spec') |
149 if include: | 149 if include: |
150 narrowspecpart.addparam( | 150 narrowspecpart.addparam( |
151 'include', '\n'.join(include), mandatory=True) | 151 b'include', b'\n'.join(include), mandatory=True) |
152 if exclude: | 152 if exclude: |
153 narrowspecpart.addparam( | 153 narrowspecpart.addparam( |
154 'exclude', '\n'.join(exclude), mandatory=True) | 154 b'exclude', b'\n'.join(exclude), mandatory=True) |
155 | 155 |
156 def makeallcgpart(newpart, repo, outgoing, version, source, | 156 def makeallcgpart(newpart, repo, outgoing, version, source, |
157 bundlecaps, filematcher, cgversions): | 157 bundlecaps, filematcher, cgversions): |
158 | 158 |
159 pullbundle = not filematcher | 159 pullbundle = not filematcher |
160 if pullbundle and not util.safehasattr(repo, 'stablerange'): | 160 if pullbundle and not util.safehasattr(repo, 'stablerange'): |
161 repo.ui.warn('pullbundle: required extension "evolve" are missing, skipping pullbundle\n') | 161 repo.ui.warn(b'pullbundle: required extension "evolve" are missing, skipping pullbundle\n') |
162 pullbundle = False | 162 pullbundle = False |
163 if filematcher: | 163 if filematcher: |
164 makeonecgpart(newpart, repo, None, outgoing, version, source, bundlecaps, | 164 makeonecgpart(newpart, repo, None, outgoing, version, source, bundlecaps, |
165 filematcher, cgversions) | 165 filematcher, cgversions) |
166 else: | 166 else: |
167 start = util.timer() | 167 start = util.timer() |
168 slices = sliceoutgoing(repo, outgoing) | 168 slices = sliceoutgoing(repo, outgoing) |
169 end = util.timer() | 169 end = util.timer() |
170 msg = _('pullbundle-cache: "missing" set sliced into %d subranges ' | 170 msg = _(b'pullbundle-cache: "missing" set sliced into %d subranges ' |
171 'in %f seconds\n') | 171 b'in %f seconds\n') |
172 repo.ui.write(msg % (len(slices), end - start)) | 172 repo.ui.write(msg % (len(slices), end - start)) |
173 for sliceid, sliceout in slices: | 173 for sliceid, sliceout in slices: |
174 makeonecgpart(newpart, repo, sliceid, sliceout, version, source, bundlecaps, | 174 makeonecgpart(newpart, repo, sliceid, sliceout, version, source, bundlecaps, |
175 filematcher, cgversions) | 175 filematcher, cgversions) |
176 | 176 |
190 ss = [] | 190 ss = [] |
191 allslices = [] | 191 allslices = [] |
192 missingheads = [rev(n) for n in sorted(outgoing.missingheads, reverse=True)] | 192 missingheads = [rev(n) for n in sorted(outgoing.missingheads, reverse=True)] |
193 for head in missingheads: | 193 for head in missingheads: |
194 localslices = [] | 194 localslices = [] |
195 localmissing = set(repo.revs('%ld and ::%d', missingrevs, head)) | 195 localmissing = set(repo.revs(b'%ld and ::%d', missingrevs, head)) |
196 thisrunmissing = localmissing.copy() | 196 thisrunmissing = localmissing.copy() |
197 while localmissing: | 197 while localmissing: |
198 slicerevs = [] | 198 slicerevs = [] |
199 for r in revsort.walkfrom(repo, head): | 199 for r in revsort.walkfrom(repo, head): |
200 if r not in thisrunmissing: | 200 if r not in thisrunmissing: |
205 if DEBUG: | 205 if DEBUG: |
206 ss.append(slicerevs) | 206 ss.append(slicerevs) |
207 missingrevs.difference_update(slicerevs) | 207 missingrevs.difference_update(slicerevs) |
208 localmissing.difference_update(slicerevs) | 208 localmissing.difference_update(slicerevs) |
209 if localmissing: | 209 if localmissing: |
210 heads = list(repo.revs('heads(%ld)', localmissing)) | 210 heads = list(repo.revs(b'heads(%ld)', localmissing)) |
211 heads.sort(key=node) | 211 heads.sort(key=node) |
212 head = heads.pop() | 212 head = heads.pop() |
213 if heads: | 213 if heads: |
214 thisrunmissing = repo.revs('%ld and only(%d, %ld)', | 214 thisrunmissing = repo.revs(b'%ld and only(%d, %ld)', |
215 localmissing, | 215 localmissing, |
216 head, | 216 head, |
217 heads) | 217 heads) |
218 else: | 218 else: |
219 thisrunmissing = localmissing.copy() | 219 thisrunmissing = localmissing.copy() |
220 if DEBUG: | 220 if DEBUG: |
221 for s in reversed(ss): | 221 for s in reversed(ss): |
222 ms -= set(s) | 222 ms -= set(s) |
223 missingbase = repo.revs('parents(%ld) and %ld', s, ms) | 223 missingbase = repo.revs(b'parents(%ld) and %ld', s, ms) |
224 if missingbase: | 224 if missingbase: |
225 repo.ui.write_err('!!! rev bundled while parents missing\n') | 225 repo.ui.write_err(b'!!! rev bundled while parents missing\n') |
226 repo.ui.write_err(' parent: %s\n' % list(missingbase)) | 226 repo.ui.write_err(b' parent: %s\n' % list(missingbase)) |
227 pb = repo.revs('%ld and children(%ld)', s, missingbase) | 227 pb = repo.revs(b'%ld and children(%ld)', s, missingbase) |
228 repo.ui.write_err(' children: %s\n' % list(pb)) | 228 repo.ui.write_err(b' children: %s\n' % list(pb)) |
229 h = repo.revs('heads(%ld)', s) | 229 h = repo.revs(b'heads(%ld)', s) |
230 repo.ui.write_err(' heads: %s\n' % list(h)) | 230 repo.ui.write_err(b' heads: %s\n' % list(h)) |
231 raise error.ProgrammingError('issuing a range before its parents') | 231 raise error.ProgrammingError(b'issuing a range before its parents') |
232 | 232 |
233 for s in reversed(localslices): | 233 for s in reversed(localslices): |
234 allslices.extend(s) | 234 allslices.extend(s) |
235 # unknown subrange might had to be computed | 235 # unknown subrange might had to be computed |
236 repo.stablerange.save(repo) | 236 repo.stablerange.save(repo) |
379 missingheads=nodes) | 379 missingheads=nodes) |
380 | 380 |
381 # changegroup part construction | 381 # changegroup part construction |
382 | 382 |
383 def _changegroupinfo(repo, nodes, source): | 383 def _changegroupinfo(repo, nodes, source): |
384 if repo.ui.verbose or source == 'bundle': | 384 if repo.ui.verbose or source == b'bundle': |
385 repo.ui.status(_("%d changesets found\n") % len(nodes)) | 385 repo.ui.status(_(b"%d changesets found\n") % len(nodes)) |
386 | 386 |
387 def _makenewstream(newpart, repo, outgoing, version, source, | 387 def _makenewstream(newpart, repo, outgoing, version, source, |
388 bundlecaps, filematcher, cgversions): | 388 bundlecaps, filematcher, cgversions): |
389 old = changegroup._changegroupinfo | 389 old = changegroup._changegroupinfo |
390 try: | 390 try: |
406 return (cgstream, nbchanges, pversion) | 406 return (cgstream, nbchanges, pversion) |
407 | 407 |
408 def _makepartfromstream(newpart, repo, cgstream, nbchanges, version): | 408 def _makepartfromstream(newpart, repo, cgstream, nbchanges, version): |
409 # same as upstream code | 409 # same as upstream code |
410 | 410 |
411 part = newpart('changegroup', data=cgstream) | 411 part = newpart(b'changegroup', data=cgstream) |
412 if version: | 412 if version: |
413 part.addparam('version', version) | 413 part.addparam(b'version', version) |
414 | 414 |
415 part.addparam('nbchanges', '%d' % nbchanges, | 415 part.addparam(b'nbchanges', b'%d' % nbchanges, |
416 mandatory=False) | 416 mandatory=False) |
417 | 417 |
418 if 'treemanifest' in repo.requirements: | 418 if b'treemanifest' in repo.requirements: |
419 part.addparam('treemanifest', '1') | 419 part.addparam(b'treemanifest', b'1') |
420 | 420 |
421 # cache management | 421 # cache management |
422 | 422 |
423 def cachedir(repo): | 423 def cachedir(repo): |
424 cachedir = repo.ui.config('pullbundle', 'cache-directory') | 424 cachedir = repo.ui.config(b'pullbundle', b'cache-directory') |
425 if cachedir is not None: | 425 if cachedir is not None: |
426 return cachedir | 426 return cachedir |
427 return repo.cachevfs.join('pullbundles') | 427 return repo.cachevfs.join(b'pullbundles') |
428 | 428 |
429 def getcache(repo, bundlename): | 429 def getcache(repo, bundlename): |
430 cdir = cachedir(repo) | 430 cdir = cachedir(repo) |
431 bundlepath = os.path.join(cdir, bundlename) | 431 bundlepath = os.path.join(cdir, bundlename) |
432 if not os.path.exists(bundlepath): | 432 if not os.path.exists(bundlepath): |
452 with util.atomictempfile(bundlepath) as cachefile: | 452 with util.atomictempfile(bundlepath) as cachefile: |
453 for chunk in stream: | 453 for chunk in stream: |
454 cachefile.write(chunk) | 454 cachefile.write(chunk) |
455 yield chunk | 455 yield chunk |
456 | 456 |
457 BUNDLEMASK = "%s-%s-%010iskip-%010isize.hg" | 457 BUNDLEMASK = b"%s-%s-%010iskip-%010isize.hg" |
458 | 458 |
459 def makeonecgpart(newpart, repo, rangeid, outgoing, version, source, | 459 def makeonecgpart(newpart, repo, rangeid, outgoing, version, source, |
460 bundlecaps, filematcher, cgversions): | 460 bundlecaps, filematcher, cgversions): |
461 bundlename = cachedata = None | 461 bundlename = cachedata = None |
462 if rangeid is not None: | 462 if rangeid is not None: |
470 bundlecaps, filematcher, cgversions) | 470 bundlecaps, filematcher, cgversions) |
471 if bundlename is not None: | 471 if bundlename is not None: |
472 cgstream = cachewriter(repo, bundlename, partdata[0]) | 472 cgstream = cachewriter(repo, bundlename, partdata[0]) |
473 partdata = (cgstream,) + partdata[1:] | 473 partdata = (cgstream,) + partdata[1:] |
474 else: | 474 else: |
475 if repo.ui.verbose or source == 'bundle': | 475 if repo.ui.verbose or source == b'bundle': |
476 repo.ui.status(_("%d changesets found in caches\n") % nbchanges) | 476 repo.ui.status(_(b"%d changesets found in caches\n") % nbchanges) |
477 pversion = None | 477 pversion = None |
478 if cgversions: | 478 if cgversions: |
479 pversion = version | 479 pversion = version |
480 partdata = (cachedata, nbchanges, pversion) | 480 partdata = (cachedata, nbchanges, pversion) |
481 return _makepartfromstream(newpart, repo, *partdata) | 481 return _makepartfromstream(newpart, repo, *partdata) |
482 | 482 |
483 @command('debugpullbundlecacheoverlap', | 483 @command(b'debugpullbundlecacheoverlap', |
484 [('', 'count', 100, _('of "client" pulling')), | 484 [(b'', b'count', 100, _(b'of "client" pulling')), |
485 ('', 'min-cache', 1, _('minimum size of cached bundle')), | 485 (b'', b'min-cache', 1, _(b'minimum size of cached bundle')), |
486 ], | 486 ], |
487 _('hg debugpullbundlecacheoverlap [--client 100] REVSET')) | 487 _(b'hg debugpullbundlecacheoverlap [--client 100] REVSET')) |
488 def debugpullbundlecacheoverlap(ui, repo, *revs, **opts): | 488 def debugpullbundlecacheoverlap(ui, repo, *revs, **opts): |
489 '''Display statistic on bundle cache hit | 489 '''Display statistic on bundle cache hit |
490 | 490 |
491 This command "simulate pulls from multiple clients. Each using a random | 491 This command "simulate pulls from multiple clients. Each using a random |
492 subset of revisions defined by REVSET. And display statistic about the | 492 subset of revisions defined by REVSET. And display statistic about the |
493 overlap in bundle necessary to serve them. | 493 overlap in bundle necessary to serve them. |
494 ''' | 494 ''' |
495 actionrevs = scmutil.revrange(repo, revs) | 495 actionrevs = scmutil.revrange(repo, revs) |
496 if not revs: | 496 if not revs: |
497 raise error.Abort('No revision selected') | 497 raise error.Abort(b'No revision selected') |
498 count = opts['count'] | 498 count = opts['count'] |
499 min_cache = opts['min_cache'] | 499 min_cache = opts['min_cache'] |
500 | 500 |
501 bundlehits = collections.defaultdict(lambda: 0) | 501 bundlehits = collections.defaultdict(lambda: 0) |
502 pullstats = [] | 502 pullstats = [] |
503 | 503 |
504 rlen = lambda rangeid: repo.stablerange.rangelength(repo, rangeid) | 504 rlen = lambda rangeid: repo.stablerange.rangelength(repo, rangeid) |
505 | 505 |
506 repo.ui.write("gathering %d sample pulls within %d revisions\n" | 506 repo.ui.write(b"gathering %d sample pulls within %d revisions\n" |
507 % (count, len(actionrevs))) | 507 % (count, len(actionrevs))) |
508 if 1 < min_cache: | 508 if 1 < min_cache: |
509 repo.ui.write(" not caching ranges smaller than %d changesets\n" % min_cache) | 509 repo.ui.write(b" not caching ranges smaller than %d changesets\n" % min_cache) |
510 for i in range(count): | 510 for i in range(count): |
511 repo.ui.progress('gathering data', i, total=count) | 511 repo.ui.progress(b'gathering data', i, total=count) |
512 outgoing = takeonesample(repo, actionrevs) | 512 outgoing = takeonesample(repo, actionrevs) |
513 ranges = sliceoutgoing(repo, outgoing) | 513 ranges = sliceoutgoing(repo, outgoing) |
514 hitranges = 0 | 514 hitranges = 0 |
515 hitchanges = 0 | 515 hitchanges = 0 |
516 totalchanges = 0 | 516 totalchanges = 0 |
530 hitchanges, | 530 hitchanges, |
531 len(largeranges), | 531 len(largeranges), |
532 hitranges, | 532 hitranges, |
533 ) | 533 ) |
534 pullstats.append(stats) | 534 pullstats.append(stats) |
535 repo.ui.progress('gathering data', None) | 535 repo.ui.progress(b'gathering data', None) |
536 | 536 |
537 sizes = [] | 537 sizes = [] |
538 changesmissing = [] | 538 changesmissing = [] |
539 totalchanges = 0 | 539 totalchanges = 0 |
540 totalcached = 0 | 540 totalcached = 0 |
561 length = rlen(rangeid) | 561 length = rlen(rangeid) |
562 cachedsizes.append(length) | 562 cachedsizes.append(length) |
563 cachedhits.append(hits) | 563 cachedhits.append(hits) |
564 | 564 |
565 sizesdist = distribution(sizes) | 565 sizesdist = distribution(sizes) |
566 repo.ui.write(fmtdist('pull size', sizesdist)) | 566 repo.ui.write(fmtdist(b'pull size', sizesdist)) |
567 | 567 |
568 changesmissingdist = distribution(changesmissing) | 568 changesmissingdist = distribution(changesmissing) |
569 repo.ui.write(fmtdist('non-cached changesets', changesmissingdist)) | 569 repo.ui.write(fmtdist(b'non-cached changesets', changesmissingdist)) |
570 | 570 |
571 changesratiodist = distribution(changesratio) | 571 changesratiodist = distribution(changesratio) |
572 repo.ui.write(fmtdist('ratio of cached changesets', changesratiodist)) | 572 repo.ui.write(fmtdist(b'ratio of cached changesets', changesratiodist)) |
573 | 573 |
574 bundlecountdist = distribution(bundlecount) | 574 bundlecountdist = distribution(bundlecount) |
575 repo.ui.write(fmtdist('bundle count', bundlecountdist)) | 575 repo.ui.write(fmtdist(b'bundle count', bundlecountdist)) |
576 | 576 |
577 rangesratiodist = distribution(rangesratio) | 577 rangesratiodist = distribution(rangesratio) |
578 repo.ui.write(fmtdist('ratio of cached bundles', rangesratiodist)) | 578 repo.ui.write(fmtdist(b'ratio of cached bundles', rangesratiodist)) |
579 | 579 |
580 repo.ui.write('changesets served:\n') | 580 repo.ui.write(b'changesets served:\n') |
581 repo.ui.write(' total: %7d\n' % totalchanges) | 581 repo.ui.write(b' total: %7d\n' % totalchanges) |
582 repo.ui.write(' from cache: %7d (%2d%%)\n' | 582 repo.ui.write(b' from cache: %7d (%2d%%)\n' |
583 % (totalcached, (totalcached * 100 // totalchanges))) | 583 % (totalcached, (totalcached * 100 // totalchanges))) |
584 repo.ui.write(' bundle: %7d\n' % sum(bundlecount)) | 584 repo.ui.write(b' bundle: %7d\n' % sum(bundlecount)) |
585 | 585 |
586 cachedsizesdist = distribution(cachedsizes) | 586 cachedsizesdist = distribution(cachedsizes) |
587 repo.ui.write(fmtdist('size of cached bundles', cachedsizesdist)) | 587 repo.ui.write(fmtdist(b'size of cached bundles', cachedsizesdist)) |
588 | 588 |
589 cachedhitsdist = distribution(cachedhits) | 589 cachedhitsdist = distribution(cachedhits) |
590 repo.ui.write(fmtdist('hit on cached bundles', cachedhitsdist)) | 590 repo.ui.write(fmtdist(b'hit on cached bundles', cachedhitsdist)) |
591 | 591 |
592 def takeonesample(repo, revs): | 592 def takeonesample(repo, revs): |
593 node = repo.changelog.node | 593 node = repo.changelog.node |
594 pulled = random.sample(revs, max(4, len(revs) // 1000)) | 594 pulled = random.sample(revs, max(4, len(revs) // 1000)) |
595 pulled = repo.revs('%ld::%ld', pulled, pulled) | 595 pulled = repo.revs(b'%ld::%ld', pulled, pulled) |
596 nodes = [node(r) for r in pulled] | 596 nodes = [node(r) for r in pulled] |
597 return outgoingfromnodes(repo, nodes) | 597 return outgoingfromnodes(repo, nodes) |
598 | 598 |
599 def distribution(data): | 599 def distribution(data): |
600 data.sort() | 600 data.sort() |
601 length = len(data) | 601 length = len(data) |
602 return { | 602 return { |
603 'min': data[0], | 603 b'min': data[0], |
604 '10%': data[length // 10], | 604 b'10%': data[length // 10], |
605 '25%': data[length // 4], | 605 b'25%': data[length // 4], |
606 '50%': data[length // 2], | 606 b'50%': data[length // 2], |
607 '75%': data[(length // 4) * 3], | 607 b'75%': data[(length // 4) * 3], |
608 '90%': data[(length // 10) * 9], | 608 b'90%': data[(length // 10) * 9], |
609 '95%': data[(length // 20) * 19], | 609 b'95%': data[(length // 20) * 19], |
610 'max': data[-1], | 610 b'max': data[-1], |
611 } | 611 } |
612 | 612 |
613 STATSFORMAT = """{name}: | 613 STATSFORMAT = b"""{name}: |
614 min: {min} | 614 min: {min} |
615 10%: {10%} | 615 10%: {10%} |
616 25%: {25%} | 616 25%: {25%} |
617 50%: {50%} | 617 50%: {50%} |
618 75%: {75%} | 618 75%: {75%} |