setdiscovery: fix hang when #heads>200 (
issue2971)
When setting up the next sample, we always add all of the heads, regardless
of the desired max sample size. But if the number of heads exceeds this
size, then we don't add any more nodes from the still undecided set.
(This is debatable per se, and I'll investigate it, but it's how we designed
it at the moment.)
The bug was that we always added the overall heads, not the heads of the
remaining undecided set. Thus, if #heads>200 (desired sample size), we
did not make progress any longer.
#!/usr/bin/env python
"""
Tests the behaviour of filelog w.r.t. data starting with '\1\n'
"""
from mercurial import ui, hg
from mercurial.node import nullid, hex
myui = ui.ui()
repo = hg.repository(myui, path='.', create=True)
fl = repo.file('foobar')
def addrev(text, renamed=False):
if renamed:
# data doesnt matter. Just make sure filelog.renamed() returns True
meta = dict(copyrev=hex(nullid), copy='bar')
else:
meta = {}
t = repo.transaction('commit')
try:
node = fl.add(text, meta, t, 0, nullid, nullid)
return node
finally:
t.close()
def error(text):
print 'ERROR: ' + text
textwith = '\1\nfoo'
without = 'foo'
node = addrev(textwith)
if not textwith == fl.read(node):
error('filelog.read for data starting with \\1\\n')
if fl.cmp(node, textwith) or not fl.cmp(node, without):
error('filelog.cmp for data starting with \\1\\n')
if fl.size(0) != len(textwith):
error('FIXME: This is a known failure of filelog.size for data starting '
'with \\1\\n')
node = addrev(textwith, renamed=True)
if not textwith == fl.read(node):
error('filelog.read for a renaming + data starting with \\1\\n')
if fl.cmp(node, textwith) or not fl.cmp(node, without):
error('filelog.cmp for a renaming + data starting with \\1\\n')
if fl.size(1) != len(textwith):
error('filelog.size for a renaming + data starting with \\1\\n')
print 'OK.'