largefiles: synchronize lfdirstate with dirstate after automated committing
Before this patch, after successful "hg rebase" of the revision
removing largefiles, "hg status" may still show ""R" for such
largefiles unexpectedly.
"lfilesrepo.commit" executes the special code path for automated
committing while rebase/transplant, and lfdirstate entries for removed
files aren't updated in this code path, even after successful
committing.
Then, "R" entries still existing in lfdirstate cause unexpected "hg
status" output.
This patch synchronizes lfdirstate with dirstate after automated
committing.
This patch passes False as "normallookup" to "synclfdirstate", because
modified files in "files()" of the recent (= just committed) context
should be "normal"-ed.
This is a temporary way to fix with less changes. For fundamental
resolution of this kind of problems in the future, lfdirstate should
be updated with dirstate simultaneously. Hooking "markcommitted" of
ctx in "localrepository.commitctx" may achieve this.
This problem occurs, only when (1) the parent of the working directory
is rebased and (2) it removes largefiles, because:
- if the parent of the working directory isn't rebased, returning to
the initial revision (= update) after rebase hides this problem
- files added on "other" branch (= rebase target) are treated not as
"added" but as "modified" (= "normal" status and "unset"
timestamp) at merging
This patch tests also the status of added largefile, but it is only
for avoiding regression.
In addition to conditions above, "hg status" must not take existing
files to reproduce this problem, because existing files make
"match._files" not empty in "lfilesrepo.status" code path below:
def sfindirstate(f):
sf = lfutil.standin(f)
dirstate = self.dirstate
return sf in dirstate or sf in dirstate.dirs()
match._files = [f for f in match._files
if sfindirstate(f)]
Not empty "match._files" prevents "status" on lfdirstate from
returning the result containing problematic "R" files.
This is reason why "large1" (removed) and "largeX" (added) are checked
separately in this patch.
Problematic code path in "lfilesrepo.commit" is used also by "hg
transplant", but this problem doesn't occur at "hg transplant",
because invocation of "updatelfiles" after transplant-ing in
"overridetransplant" causes cleaning lfdirstate up.
This patch tests also "hg transplant" as same as "hg rebase", but it
is only for avoiding regression.
# Revision graph generator for Mercurial
#
# Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
# Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""supports walking the history as DAGs suitable for graphical output
The most basic format we use is that of::
(id, type, data, [parentids])
The node and parent ids are arbitrary integers which identify a node in the
context of the graph returned. Type is a constant specifying the node type.
Data depends on type.
"""
from mercurial.node import nullrev
import util
CHANGESET = 'C'
def dagwalker(repo, revs):
"""cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
This generator function walks through revisions (which should be ordered
from bigger to lower). It returns a tuple for each node. The node and parent
ids are arbitrary integers which identify a node in the context of the graph
returned.
"""
if not revs:
return
cl = repo.changelog
lowestrev = revs.min()
gpcache = {}
knownrevs = revs.set()
for rev in revs:
ctx = repo[rev]
parents = sorted(set([p.rev() for p in ctx.parents()
if p.rev() in knownrevs]))
mpars = [p.rev() for p in ctx.parents() if
p.rev() != nullrev and p.rev() not in parents]
for mpar in mpars:
gp = gpcache.get(mpar)
if gp is None:
gp = gpcache[mpar] = grandparent(cl, lowestrev, revs, mpar)
if not gp:
parents.append(mpar)
else:
parents.extend(g for g in gp if g not in parents)
yield (ctx.rev(), CHANGESET, ctx, parents)
def nodes(repo, nodes):
"""cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
This generator function walks the given nodes. It only returns parents
that are in nodes, too.
"""
include = set(nodes)
for node in nodes:
ctx = repo[node]
parents = set([p.rev() for p in ctx.parents() if p.node() in include])
yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
def colored(dag, repo):
"""annotates a DAG with colored edge information
For each DAG node this function emits tuples::
(id, type, data, (col, color), [(col, nextcol, color)])
with the following new elements:
- Tuple (col, color) with column and color index for the current node
- A list of tuples indicating the edges between the current node and its
parents.
"""
seen = []
colors = {}
newcolor = 1
config = {}
for key, val in repo.ui.configitems('graph'):
if '.' in key:
branch, setting = key.rsplit('.', 1)
# Validation
if setting == "width" and val.isdigit():
config.setdefault(branch, {})[setting] = int(val)
elif setting == "color" and val.isalnum():
config.setdefault(branch, {})[setting] = val
if config:
getconf = util.lrucachefunc(
lambda rev: config.get(repo[rev].branch(), {}))
else:
getconf = lambda rev: {}
for (cur, type, data, parents) in dag:
# Compute seen and next
if cur not in seen:
seen.append(cur) # new head
colors[cur] = newcolor
newcolor += 1
col = seen.index(cur)
color = colors.pop(cur)
next = seen[:]
# Add parents to next
addparents = [p for p in parents if p not in next]
next[col:col + 1] = addparents
# Set colors for the parents
for i, p in enumerate(addparents):
if not i:
colors[p] = color
else:
colors[p] = newcolor
newcolor += 1
# Add edges to the graph
edges = []
for ecol, eid in enumerate(seen):
if eid in next:
bconf = getconf(eid)
edges.append((
ecol, next.index(eid), colors[eid],
bconf.get('width', -1),
bconf.get('color', '')))
elif eid == cur:
for p in parents:
bconf = getconf(p)
edges.append((
ecol, next.index(p), color,
bconf.get('width', -1),
bconf.get('color', '')))
# Yield and move on
yield (cur, type, data, (col, color), edges)
seen = next
def grandparent(cl, lowestrev, roots, head):
"""Return all ancestors of head in roots which revision is
greater or equal to lowestrev.
"""
pending = set([head])
seen = set()
kept = set()
llowestrev = max(nullrev, lowestrev)
while pending:
r = pending.pop()
if r >= llowestrev and r not in seen:
if r in roots:
kept.add(r)
else:
pending.update([p for p in cl.parentrevs(r)])
seen.add(r)
return sorted(kept)
def asciiedges(type, char, lines, seen, rev, parents):
"""adds edge info to changelog DAG walk suitable for ascii()"""
if rev not in seen:
seen.append(rev)
nodeidx = seen.index(rev)
knownparents = []
newparents = []
for parent in parents:
if parent in seen:
knownparents.append(parent)
else:
newparents.append(parent)
ncols = len(seen)
nextseen = seen[:]
nextseen[nodeidx:nodeidx + 1] = newparents
edges = [(nodeidx, nextseen.index(p)) for p in knownparents if p != nullrev]
while len(newparents) > 2:
# ascii() only knows how to add or remove a single column between two
# calls. Nodes with more than two parents break this constraint so we
# introduce intermediate expansion lines to grow the active node list
# slowly.
edges.append((nodeidx, nodeidx))
edges.append((nodeidx, nodeidx + 1))
nmorecols = 1
yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
char = '\\'
lines = []
nodeidx += 1
ncols += 1
edges = []
del newparents[0]
if len(newparents) > 0:
edges.append((nodeidx, nodeidx))
if len(newparents) > 1:
edges.append((nodeidx, nodeidx + 1))
nmorecols = len(nextseen) - ncols
seen[:] = nextseen
yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
def _fixlongrightedges(edges):
for (i, (start, end)) in enumerate(edges):
if end > start:
edges[i] = (start, end + 1)
def _getnodelineedgestail(
node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
# Still going in the same non-vertical direction.
if n_columns_diff == -1:
start = max(node_index + 1, p_node_index)
tail = ["|", " "] * (start - node_index - 1)
tail.extend(["/", " "] * (n_columns - start))
return tail
else:
return ["\\", " "] * (n_columns - node_index - 1)
else:
return ["|", " "] * (n_columns - node_index - 1)
def _drawedges(edges, nodeline, interline):
for (start, end) in edges:
if start == end + 1:
interline[2 * end + 1] = "/"
elif start == end - 1:
interline[2 * start + 1] = "\\"
elif start == end:
interline[2 * start] = "|"
else:
if 2 * end >= len(nodeline):
continue
nodeline[2 * end] = "+"
if start > end:
(start, end) = (end, start)
for i in range(2 * start + 1, 2 * end):
if nodeline[i] != "+":
nodeline[i] = "-"
def _getpaddingline(ni, n_columns, edges):
line = []
line.extend(["|", " "] * ni)
if (ni, ni - 1) in edges or (ni, ni) in edges:
# (ni, ni - 1) (ni, ni)
# | | | | | | | |
# +---o | | o---+
# | | c | | c | |
# | |/ / | |/ /
# | | | | | |
c = "|"
else:
c = " "
line.extend([c, " "])
line.extend(["|", " "] * (n_columns - ni - 1))
return line
def asciistate():
"""returns the initial value for the "state" argument to ascii()"""
return [0, 0]
def ascii(ui, state, type, char, text, coldata):
"""prints an ASCII graph of the DAG
takes the following arguments (one call per node in the graph):
- ui to write to
- Somewhere to keep the needed state in (init to asciistate())
- Column of the current node in the set of ongoing edges.
- Type indicator of node data, usually 'C' for changesets.
- Payload: (char, lines):
- Character to use as node's symbol.
- List of lines to display as the node's text.
- Edges; a list of (col, next_col) indicating the edges between
the current node and its parents.
- Number of columns (ongoing edges) in the current revision.
- The difference between the number of columns (ongoing edges)
in the next revision and the number of columns (ongoing edges)
in the current revision. That is: -1 means one column removed;
0 means no columns added or removed; 1 means one column added.
"""
idx, edges, ncols, coldiff = coldata
assert -2 < coldiff < 2
if coldiff == -1:
# Transform
#
# | | | | | |
# o | | into o---+
# |X / |/ /
# | | | |
_fixlongrightedges(edges)
# add_padding_line says whether to rewrite
#
# | | | | | | | |
# | o---+ into | o---+
# | / / | | | # <--- padding line
# o | | | / /
# o | |
add_padding_line = (len(text) > 2 and coldiff == -1 and
[x for (x, y) in edges if x + 1 < y])
# fix_nodeline_tail says whether to rewrite
#
# | | o | | | | o | |
# | | |/ / | | |/ /
# | o | | into | o / / # <--- fixed nodeline tail
# | |/ / | |/ /
# o | | o | |
fix_nodeline_tail = len(text) <= 2 and not add_padding_line
# nodeline is the line containing the node character (typically o)
nodeline = ["|", " "] * idx
nodeline.extend([char, " "])
nodeline.extend(
_getnodelineedgestail(idx, state[1], ncols, coldiff,
state[0], fix_nodeline_tail))
# shift_interline is the line containing the non-vertical
# edges between this entry and the next
shift_interline = ["|", " "] * idx
if coldiff == -1:
n_spaces = 1
edge_ch = "/"
elif coldiff == 0:
n_spaces = 2
edge_ch = "|"
else:
n_spaces = 3
edge_ch = "\\"
shift_interline.extend(n_spaces * [" "])
shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
# draw edges from the current node to its parents
_drawedges(edges, nodeline, shift_interline)
# lines is the list of all graph lines to print
lines = [nodeline]
if add_padding_line:
lines.append(_getpaddingline(idx, ncols, edges))
lines.append(shift_interline)
# make sure that there are as many graph lines as there are
# log strings
while len(text) < len(lines):
text.append("")
if len(lines) < len(text):
extra_interline = ["|", " "] * (ncols + coldiff)
while len(lines) < len(text):
lines.append(extra_interline)
# print lines
indentation_level = max(ncols, ncols + coldiff)
for (line, logstr) in zip(lines, text):
ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
ui.write(ln.rstrip() + '\n')
# ... and start over
state[0] = coldiff
state[1] = idx