Mercurial > hg
changeset 192:5d8553352d2e
Changes to network protocol
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1
Changes to network protocol
Stream changes at the delta level rather than at whole delta groups
this breaks the protocol - we now send a zero byte delta to indicate
the end of a group rather than sending the entire group length up front
Fix filename length asymmetry while we're breaking things
Fix hidden O(n^2) bug in calculating changegroup
list.append(e) is O(n), list + [element] is not
Decompress chunks on read in revlog.group()
Improve status messages
report bytes transferred
report nothing to do
Deal with /dev/null path brokenness
Remove untriggered patch assertion
manifest hash: 3eedcfe878561f9eb4adedb04f6be618fb8ae8d8
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.0 (GNU/Linux)
iD8DBQFCmzlqywK+sNU5EO8RAn0KAJ4z4toWSSGjLoZO6FKWLx/3QbZufACglQgd
S48bumc++DnuY1iPSNWKGAI=
=lCjx
-----END PGP SIGNATURE-----
author | mpm@selenic.com |
---|---|
date | Mon, 30 May 2005 08:03:54 -0800 |
parents | d7e859cf2f1b |
children | 0a28dfe59f8f |
files | hg mercurial/hg.py mercurial/revlog.py |
diffstat | 3 files changed, 68 insertions(+), 63 deletions(-) [+] |
line wrap: on
line diff
--- a/hg Sun May 29 09:24:51 2005 -0800 +++ b/hg Mon May 30 08:03:54 2005 -0800 @@ -417,7 +417,6 @@ if args[0] in paths: args[0] = paths[args[0]] other = hg.repository(ui, args[0]) - ui.status("requesting changegroup\n") cg = repo.getchangegroup(other) repo.addchangegroup(cg) else:
--- a/mercurial/hg.py Sun May 29 09:24:51 2005 -0800 +++ b/mercurial/hg.py Mon May 30 08:03:54 2005 -0800 @@ -291,6 +291,7 @@ return os.path.join(self.path, f) def file(self, f): + if f[0] == '/': f = f[1:] return filelog(self.opener, f) def transaction(self): @@ -530,6 +531,8 @@ fetch = [] seen = {} seenbranch = {} + + self.ui.status("searching for changes\n") tip = remote.branches([])[0] self.ui.debug("remote tip branch is %s:%s\n" % (short(tip[0]), short(tip[1]))) @@ -542,7 +545,7 @@ unknown = [tip] if tip[0] in m: - self.ui.note("nothing to do!\n") + self.ui.status("nothing to do!\n") return None while unknown: @@ -627,14 +630,13 @@ # the changegroup is changesets + manifests + all file revs revs = [ self.changelog.rev(n) for n in nodes ] - yield self.changelog.group(linkmap) - yield self.manifest.group(linkmap) - + for y in self.changelog.group(linkmap): yield y + for y in self.manifest.group(linkmap): yield y for f in changed: + yield struct.pack(">l", len(f) + 4) + f g = self.file(f).group(linkmap) - if not g: raise "couldn't find change to %s" % f - l = struct.pack(">l", len(f)) - yield "".join([l, f, g]) + for y in g: + yield y def addchangegroup(self, generator): changesets = files = revisions = 0 @@ -656,11 +658,18 @@ if not generator: return source = genread(generator) - def getchunk(add = 0): + def getchunk(): d = source.read(4) if not d: return "" l = struct.unpack(">l", d)[0] - return source.read(l - 4 + add) + if l <= 4: return "" + return source.read(l - 4) + + def getgroup(): + while 1: + c = getchunk() + if not c: break + yield c tr = self.transaction() simple = True @@ -671,21 +680,17 @@ def report(x): self.ui.debug("add changeset %s\n" % short(x)) return self.changelog.count() - - csg = getchunk() + co = self.changelog.tip() - cn = self.changelog.addgroup(csg, report, tr) + cn = self.changelog.addgroup(getgroup(), report, tr) - revisions = self.changelog.rev(cn) - self.changelog.rev(co) - changesets = revisions + changesets = self.changelog.rev(cn) - self.changelog.rev(co) self.ui.status("adding manifests\n") # pull off the manifest group - mfg = getchunk() mm = self.manifest.tip() - mo = self.manifest.addgroup(mfg, lambda x: self.changelog.rev(x), tr) - - revisions += self.manifest.rev(mo) - self.manifest.rev(mm) + mo = self.manifest.addgroup(getgroup(), + lambda x: self.changelog.rev(x), tr) # do we need a resolve? if self.changelog.ancestor(co, cn) != co: @@ -749,13 +754,12 @@ # process the files self.ui.status("adding files\n") while 1: - f = getchunk(4) + f = getchunk() if not f: break - fg = getchunk() self.ui.debug("adding %s revisions\n" % f) fl = self.file(f) o = fl.tip() - n = fl.addgroup(fg, lambda x: self.changelog.rev(x), tr) + n = fl.addgroup(getgroup(), lambda x: self.changelog.rev(x), tr) revisions += fl.rev(n) - fl.rev(o) files += 1 if f in need: @@ -774,9 +778,9 @@ # For simple merges, we don't need to resolve manifests or changesets if simple: self.ui.debug("simple merge, skipping resolve\n") - self.ui.status(("added %d changesets, %d files," + + self.ui.status(("modified %d files, added %d changesets" + " and %d new revisions\n") - % (changesets, files, revisions)) + % (files, changesets, revisions)) tr.close() return @@ -865,12 +869,15 @@ n = " ".join(map(hex, nodes)) zd = zlib.decompressobj() f = self.do_cmd("changegroup", roots=n) + bytes = 0 while 1: d = f.read(4096) + bytes += len(d) if not d: yield zd.flush() break yield zd.decompress(d) + self.ui.note("%d bytes of data transfered\n" % bytes) def repository(ui, path=None, create=0): if path and path[:7] == "http://":
--- a/mercurial/revlog.py Sun May 29 09:24:51 2005 -0800 +++ b/mercurial/revlog.py Mon May 30 08:03:54 2005 -0800 @@ -244,8 +244,6 @@ end = self.end(t) prev = self.revision(self.tip()) d = self.diff(prev, text) - if self.patches(prev, [d]) != text: - raise AssertionError("diff failed") data = compress(d) dist = end - start + len(data) @@ -330,7 +328,9 @@ needed[i] = 1 # if we don't have any revisions touched by these changesets, bail - if not revs: return struct.pack(">l", 0) + if not revs: + yield struct.pack(">l", 0) + return # add the parent of the first rev p = self.parents(self.node(revs[0]))[0] @@ -352,25 +352,25 @@ needed = needed.keys() needed.sort() spans = [] + oo = -1 + ol = 0 for n in needed: if n < 0: continue o = self.start(n) l = self.length(n) - spans.append((o, l, [(n, l)])) - - # merge spans - merge = [spans.pop(0)] - while spans: - e = spans.pop(0) - f = merge[-1] - if e[0] == f[0] + f[1]: - merge[-1] = (f[0], f[1] + e[1], f[2] + e[2]) + if oo + ol == o: # can we merge with the previous? + nl = spans[-1][2] + nl.append((n, l)) + ol += l + spans[-1] = (oo, ol, nl) else: - merge.append(e) + oo = o + ol = l + spans.append((oo, ol, [(n, l)])) # read spans in, divide up chunks chunks = {} - for span in merge: + for span in spans: # we reopen the file for each span to make http happy for now f = self.opener(self.datafile) f.seek(span[0]) @@ -379,12 +379,12 @@ # divide up the span pos = 0 for r, l in span[2]: - chunks[r] = data[pos: pos + l] + chunks[r] = decompress(data[pos: pos + l]) pos += l # helper to reconstruct intermediate versions def construct(text, base, rev): - bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)] + bins = [chunks[r] for r in xrange(base + 1, rev + 1)] return mdiff.patches(text, bins) # build deltas @@ -392,11 +392,12 @@ for d in xrange(0, len(revs) - 1): a, b = revs[d], revs[d + 1] n = self.node(b) - + + # do we need to construct a new delta? if a + 1 != b or self.base(b) == b: if a >= 0: base = self.base(a) - ta = decompress(chunks[self.base(a)]) + ta = chunks[self.base(a)] ta = construct(ta, base, a) else: ta = "" @@ -406,36 +407,30 @@ base = a tb = ta else: - tb = decompress(chunks[self.base(b)]) + tb = chunks[self.base(b)] tb = construct(tb, base, b) d = self.diff(ta, tb) else: - d = decompress(chunks[b]) + d = chunks[b] p = self.parents(n) meta = n + p[0] + p[1] + linkmap[self.linkrev(n)] l = struct.pack(">l", len(meta) + len(d) + 4) - deltas.append(l + meta + d) + yield l + yield meta + yield d - l = struct.pack(">l", sum(map(len, deltas)) + 4) - deltas.insert(0, l) - return "".join(deltas) - - def addgroup(self, data, linkmapper, transaction): + yield struct.pack(">l", 0) + + def addgroup(self, revs, linkmapper, transaction): # given a set of deltas, add them to the revision log. the # first delta is against its parent, which should be in our # log, the rest are against the previous delta. - if not data: return self.tip() - - # retrieve the parent revision of the delta chain - chain = data[24:44] - if not chain in self.nodemap: - raise "unknown base %s" % short(chain[:4]) - # track the base of the current delta log r = self.count() t = r - 1 + node = nullid base = prev = -1 start = end = 0 @@ -452,15 +447,19 @@ ifh = self.opener(self.indexfile, "a") # loop through our set of deltas - pos = 0 - while pos < len(data): - l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s", - data[pos:pos+84]) + chain = None + for chunk in revs: + node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) link = linkmapper(cs) if node in self.nodemap: raise "already have %s" % hex(node[:4]) - delta = data[pos + 84:pos + l] - pos += l + delta = chunk[80:] + + if not chain: + # retrieve the parent revision of the delta chain + chain = p1 + if not chain in self.nodemap: + raise "unknown base %s" % short(chain[:4]) # full versions are inserted when the needed deltas become # comparable to the uncompressed text or when the previous