upgraderepo: add a config option for parallel computation
The option is put to use to compute new copy tracing side data in parallel. It
use the multiprocessing module as it had the appropriate primitive for what we
needed. Gregory Szorc had concerned on windows so we disabled it there.
See inline comment for details on the parallel implementation.
#!/usr/bin/env python
"""
Tests the behavior of filelog w.r.t. data starting with '\1\n'
"""
from __future__ import absolute_import, print_function
from mercurial.node import (
hex,
nullid,
)
from mercurial import (
hg,
ui as uimod,
)
myui = uimod.ui.load()
repo = hg.repository(myui, path=b'.', create=True)
fl = repo.file(b'foobar')
def addrev(text, renamed=False):
if renamed:
# data doesn't matter. Just make sure filelog.renamed() returns True
meta = {b'copyrev': hex(nullid), b'copy': b'bar'}
else:
meta = {}
lock = t = None
try:
lock = repo.lock()
t = repo.transaction(b'commit')
node = fl.add(text, meta, t, 0, nullid, nullid)
return node
finally:
if t:
t.close()
if lock:
lock.release()
def error(text):
print('ERROR: ' + text)
textwith = b'\1\nfoo'
without = b'foo'
node = addrev(textwith)
if not textwith == fl.read(node):
error('filelog.read for data starting with \\1\\n')
if fl.cmp(node, textwith) or not fl.cmp(node, without):
error('filelog.cmp for data starting with \\1\\n')
if fl.size(0) != len(textwith):
error(
'FIXME: This is a known failure of filelog.size for data starting '
'with \\1\\n'
)
node = addrev(textwith, renamed=True)
if not textwith == fl.read(node):
error('filelog.read for a renaming + data starting with \\1\\n')
if fl.cmp(node, textwith) or not fl.cmp(node, without):
error('filelog.cmp for a renaming + data starting with \\1\\n')
if fl.size(1) != len(textwith):
error('filelog.size for a renaming + data starting with \\1\\n')
print('OK.')