Mercurial > hg-stable
changeset 48056:37a41267d000
branching: merge stable into default
author | Raphaël Gomès <rgomes@octobus.net> |
---|---|
date | Tue, 28 Sep 2021 09:40:57 +0200 |
parents | 7970895a21cb (current diff) 62f325f9b347 (diff) |
children | 008959fcbfb2 |
files | mercurial/scmutil.py rust/hg-core/src/dirstate_tree/owning.rs rust/hg-core/src/dirstate_tree/owning_dispatch.rs tests/test-clone-stream.t tests/test-persistent-nodemap.t |
diffstat | 151 files changed, 4125 insertions(+), 3414 deletions(-) [+] |
line wrap: on
line diff
--- a/contrib/dirstatenonnormalcheck.py Tue Sep 21 18:18:56 2021 +0200 +++ b/contrib/dirstatenonnormalcheck.py Tue Sep 28 09:40:57 2021 +0200 @@ -24,17 +24,27 @@ return res +INCONSISTENCY_MESSAGE = b"""%s call to %s + inconsistency in nonnormalset + result from dirstatemap: %s + expected nonnormalset: %s +""" + + def checkconsistency(ui, orig, dmap, _nonnormalset, label): """Compute nonnormalset from dmap, check that it matches _nonnormalset""" nonnormalcomputedmap = nonnormalentries(dmap) if _nonnormalset != nonnormalcomputedmap: b_orig = pycompat.sysbytes(repr(orig)) - ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate') - ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate') b_nonnormal = pycompat.sysbytes(repr(_nonnormalset)) - ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate') b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap)) - ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate') + msg = INCONSISTENCY_MESSAGE % ( + label, + b_orig, + b_nonnormal, + b_nonnormalcomputed, + ) + ui.develwarn(msg, config=b'dirstate') def _checkdirstate(orig, self, *args, **kwargs): @@ -59,11 +69,13 @@ if paranoid: # We don't do all these checks when paranoid is disable as it would # make the extension run very slowly on large repos - extensions.wrapfunction(dirstatecl, 'normallookup', _checkdirstate) - extensions.wrapfunction(dirstatecl, 'otherparent', _checkdirstate) - extensions.wrapfunction(dirstatecl, 'normal', _checkdirstate) extensions.wrapfunction(dirstatecl, 'write', _checkdirstate) - extensions.wrapfunction(dirstatecl, 'add', _checkdirstate) - extensions.wrapfunction(dirstatecl, 'remove', _checkdirstate) - extensions.wrapfunction(dirstatecl, 'merge', _checkdirstate) - extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate) + extensions.wrapfunction(dirstatecl, 'set_tracked', _checkdirstate) + extensions.wrapfunction(dirstatecl, 'set_untracked', _checkdirstate) + extensions.wrapfunction( + dirstatecl, 'set_possibly_dirty', _checkdirstate + ) + extensions.wrapfunction( + dirstatecl, 'update_file_p1', _checkdirstate + ) + extensions.wrapfunction(dirstatecl, 'update_file', _checkdirstate)
--- a/hgext/fastannotate/protocol.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/fastannotate/protocol.py Tue Sep 28 09:40:57 2021 +0200 @@ -140,12 +140,10 @@ def getannotate(self, path, lastnode=None): if not self.capable(b'getannotate'): ui.warn(_(b'remote peer cannot provide annotate cache\n')) - yield None, None + return None, None else: args = {b'path': path, b'lastnode': lastnode or b''} - f = wireprotov1peer.future() - yield args, f - yield _parseresponse(f.value) + return args, _parseresponse peer.__class__ = fastannotatepeer
--- a/hgext/infinitepush/__init__.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/infinitepush/__init__.py Tue Sep 28 09:40:57 2021 +0200 @@ -431,18 +431,19 @@ @wireprotov1peer.batchable def listkeyspatterns(self, namespace, patterns): if not self.capable(b'pushkey'): - yield {}, None - f = wireprotov1peer.future() + return {}, None self.ui.debug(b'preparing listkeys for "%s"\n' % namespace) - yield { + + def decode(d): + self.ui.debug( + b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) + ) + return pushkey.decodekeys(d) + + return { b'namespace': encoding.fromlocal(namespace), b'patterns': wireprototypes.encodelist(patterns), - }, f - d = f.value - self.ui.debug( - b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) - ) - yield pushkey.decodekeys(d) + }, decode def _readbundlerevs(bundlerepo):
--- a/hgext/largefiles/lfcommands.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/largefiles/lfcommands.py Tue Sep 28 09:40:57 2021 +0200 @@ -577,7 +577,7 @@ repo.wvfs.unlinkpath(lfutil.standin(f)) # This needs to happen for dropped files, otherwise they stay in # the M state. - lfdirstate._drop(f) + lfdirstate._map.reset_state(f) statuswriter(_(b'getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles)
--- a/hgext/largefiles/proto.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/largefiles/proto.py Tue Sep 28 09:40:57 2021 +0200 @@ -184,17 +184,18 @@ @wireprotov1peer.batchable def statlfile(self, sha): - f = wireprotov1peer.future() + def decode(d): + try: + return int(d) + except (ValueError, urlerr.httperror): + # If the server returns anything but an integer followed by a + # newline, newline, it's not speaking our language; if we get + # an HTTP error, we can't be sure the largefile is present; + # either way, consider it missing. + return 2 + result = {b'sha': sha} - yield result, f - try: - yield int(f.value) - except (ValueError, urlerr.httperror): - # If the server returns anything but an integer followed by a - # newline, newline, it's not speaking our language; if we get - # an HTTP error, we can't be sure the largefile is present; - # either way, consider it missing. - yield 2 + return result, decode repo.__class__ = lfileswirerepository
--- a/hgext/narrow/narrowcommands.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/narrow/narrowcommands.py Tue Sep 28 09:40:57 2021 +0200 @@ -289,7 +289,7 @@ repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) todelete = [] - for t, f, f2, size in repo.store.datafiles(): + for t, f, size in repo.store.datafiles(): if f.startswith(b'data/'): file = f[5:-2] if not newmatch(file):
--- a/hgext/remotefilelog/contentstore.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/remotefilelog/contentstore.py Tue Sep 28 09:40:57 2021 +0200 @@ -378,7 +378,7 @@ ledger.markdataentry(self, treename, node) ledger.markhistoryentry(self, treename, node) - for t, path, encoded, size in self._store.datafiles(): + for t, path, size in self._store.datafiles(): if path[:5] != b'meta/' or path[-2:] != b'.i': continue
--- a/hgext/remotefilelog/fileserverclient.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/remotefilelog/fileserverclient.py Tue Sep 28 09:40:57 2021 +0200 @@ -63,12 +63,14 @@ raise error.Abort( b'configured remotefile server does not support getfile' ) - f = wireprotov1peer.future() - yield {b'file': file, b'node': node}, f - code, data = f.value.split(b'\0', 1) - if int(code): - raise error.LookupError(file, node, data) - yield data + + def decode(d): + code, data = d.split(b'\0', 1) + if int(code): + raise error.LookupError(file, node, data) + return data + + return {b'file': file, b'node': node}, decode @wireprotov1peer.batchable def x_rfl_getflogheads(self, path): @@ -77,10 +79,11 @@ b'configured remotefile server does not ' b'support getflogheads' ) - f = wireprotov1peer.future() - yield {b'path': path}, f - heads = f.value.split(b'\n') if f.value else [] - yield heads + + def decode(d): + return d.split(b'\n') if d else [] + + return {b'path': path}, decode def _updatecallstreamopts(self, command, opts): if command != b'getbundle':
--- a/hgext/remotefilelog/remotefilelogserver.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/remotefilelog/remotefilelogserver.py Tue Sep 28 09:40:57 2021 +0200 @@ -166,24 +166,24 @@ n = util.pconvert(fp[striplen:]) d = store.decodedir(n) t = store.FILETYPE_OTHER - yield (t, d, n, st.st_size) + yield (t, d, st.st_size) if kind == stat.S_IFDIR: visit.append(fp) if scmutil.istreemanifest(repo): - for (t, u, e, s) in repo.store.datafiles(): + for (t, u, s) in repo.store.datafiles(): if u.startswith(b'meta/') and ( u.endswith(b'.i') or u.endswith(b'.d') ): - yield (t, u, e, s) + yield (t, u, s) # Return .d and .i files that do not match the shallow pattern match = state.match if match and not match.always(): - for (t, u, e, s) in repo.store.datafiles(): + for (t, u, s) in repo.store.datafiles(): f = u[5:-2] # trim data/... and .i/.d if not state.match(f): - yield (t, u, e, s) + yield (t, u, s) for x in repo.store.topfiles(): if state.noflatmf and x[1][:11] == b'00manifest.':
--- a/hgext/sparse.py Tue Sep 21 18:18:56 2021 +0200 +++ b/hgext/sparse.py Tue Sep 28 09:40:57 2021 +0200 @@ -255,14 +255,9 @@ # Prevent adding files that are outside the sparse checkout editfuncs = [ - b'normal', b'set_tracked', b'set_untracked', - b'add', - b'normallookup', b'copy', - b'remove', - b'merge', ] hint = _( b'include file with `hg debugsparse --include <pattern>` or use '
--- a/mercurial/archival.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/archival.py Tue Sep 28 09:40:57 2021 +0200 @@ -29,6 +29,8 @@ vfs as vfsmod, ) +from .utils import stringutil + stringio = util.stringio # from unzip source code: @@ -196,7 +198,7 @@ name, pycompat.sysstr(mode + kind), fileobj ) except tarfile.CompressionError as e: - raise error.Abort(pycompat.bytestr(e)) + raise error.Abort(stringutil.forcebytestr(e)) if isinstance(dest, bytes): self.z = taropen(b'w:', name=dest)
--- a/mercurial/bookmarks.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/bookmarks.py Tue Sep 28 09:40:57 2021 +0200 @@ -680,8 +680,25 @@ return books -def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()): - ui.debug(b"checking for updated bookmarks\n") +def mirroring_remote(ui, repo, remotemarks): + """computes the bookmark changes that set the local bookmarks to + remotemarks""" + changed = [] + localmarks = repo._bookmarks + for (b, id) in pycompat.iteritems(remotemarks): + if id != localmarks.get(b, None) and id in repo: + changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b)) + for b in localmarks: + if b not in remotemarks: + changed.append( + (b, None, ui.debug, _(b"removing bookmark %s\n") % b) + ) + return changed + + +def merging_from_remote(ui, repo, remotemarks, path, explicit=()): + """computes the bookmark changes that merge remote bookmarks into the + local bookmarks, based on comparebookmarks""" localmarks = repo._bookmarks ( addsrc, @@ -752,6 +769,15 @@ _(b"remote bookmark %s points to locally missing %s\n") % (b, hex(scid)[:12]) ) + return changed + + +def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()): + ui.debug(b"checking for updated bookmarks\n") + if ui.configbool(b'bookmarks', b'mirror'): + changed = mirroring_remote(ui, repo, remotemarks) + else: + changed = merging_from_remote(ui, repo, remotemarks, path, explicit) if changed: tr = trfunc() @@ -760,7 +786,7 @@ for b, node, writer, msg in sorted(changed, key=key): changes.append((b, node)) writer(msg) - localmarks.applychanges(repo, tr, changes) + repo._bookmarks.applychanges(repo, tr, changes) def incoming(ui, repo, peer):
--- a/mercurial/cext/charencode.c Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/cext/charencode.c Tue Sep 28 09:40:57 2021 +0200 @@ -264,7 +264,7 @@ } tuple = (dirstateItemObject *)v; - if (tuple->state != 'r') { + if (tuple->flags | dirstate_flag_wc_tracked) { PyObject *normed; if (table != NULL) { normed = _asciitransform(k, table,
--- a/mercurial/cext/dirs.c Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/cext/dirs.c Tue Sep 28 09:40:57 2021 +0200 @@ -161,7 +161,7 @@ return ret; } -static int dirs_fromdict(PyObject *dirs, PyObject *source, char skipchar) +static int dirs_fromdict(PyObject *dirs, PyObject *source, bool only_tracked) { PyObject *key, *value; Py_ssize_t pos = 0; @@ -171,13 +171,14 @@ PyErr_SetString(PyExc_TypeError, "expected string key"); return -1; } - if (skipchar) { + if (only_tracked) { if (!dirstate_tuple_check(value)) { PyErr_SetString(PyExc_TypeError, "expected a dirstate tuple"); return -1; } - if (((dirstateItemObject *)value)->state == skipchar) + if (!(((dirstateItemObject *)value)->flags & + dirstate_flag_wc_tracked)) continue; } @@ -218,15 +219,17 @@ * Calculate a refcounted set of directory names for the files in a * dirstate. */ -static int dirs_init(dirsObject *self, PyObject *args) +static int dirs_init(dirsObject *self, PyObject *args, PyObject *kwargs) { PyObject *dirs = NULL, *source = NULL; - char skipchar = 0; + int only_tracked = 0; int ret = -1; + static char *keywords_name[] = {"map", "only_tracked", NULL}; self->dict = NULL; - if (!PyArg_ParseTuple(args, "|Oc:__init__", &source, &skipchar)) + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Oi:__init__", + keywords_name, &source, &only_tracked)) return -1; dirs = PyDict_New(); @@ -237,10 +240,10 @@ if (source == NULL) ret = 0; else if (PyDict_Check(source)) - ret = dirs_fromdict(dirs, source, skipchar); - else if (skipchar) + ret = dirs_fromdict(dirs, source, (bool)only_tracked); + else if (only_tracked) PyErr_SetString(PyExc_ValueError, - "skip character is only supported " + "`only_tracked` is only supported " "with a dict source"); else ret = dirs_fromiter(dirs, source);
--- a/mercurial/cext/parsers.c Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/cext/parsers.c Tue Sep 28 09:40:57 2021 +0200 @@ -44,42 +44,91 @@ return _dict_new_presized(expected_size); } -static inline dirstateItemObject *make_dirstate_item(char state, int mode, - int size, int mtime) -{ - dirstateItemObject *t = - PyObject_New(dirstateItemObject, &dirstateItemType); - if (!t) { - return NULL; - } - t->state = state; - t->mode = mode; - t->size = size; - t->mtime = mtime; - return t; -} - static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds) { /* We do all the initialization here and not a tp_init function because * dirstate_item is immutable. */ dirstateItemObject *t; - char state; - int size, mode, mtime; - if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { + int wc_tracked; + int p1_tracked; + int p2_tracked; + int merged; + int clean_p1; + int clean_p2; + int possibly_dirty; + PyObject *parentfiledata; + static char *keywords_name[] = { + "wc_tracked", "p1_tracked", "p2_tracked", + "merged", "clean_p1", "clean_p2", + "possibly_dirty", "parentfiledata", NULL, + }; + wc_tracked = 0; + p1_tracked = 0; + p2_tracked = 0; + merged = 0; + clean_p1 = 0; + clean_p2 = 0; + possibly_dirty = 0; + parentfiledata = Py_None; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiiiiiiO", keywords_name, + &wc_tracked, &p1_tracked, &p2_tracked, + &merged, &clean_p1, &clean_p2, + &possibly_dirty, &parentfiledata + + )) { return NULL; } - + if (merged && (clean_p1 || clean_p2)) { + PyErr_SetString(PyExc_RuntimeError, + "`merged` argument incompatible with " + "`clean_p1`/`clean_p2`"); + return NULL; + } t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); if (!t) { return NULL; } - t->state = state; - t->mode = mode; - t->size = size; - t->mtime = mtime; + t->flags = 0; + if (wc_tracked) { + t->flags |= dirstate_flag_wc_tracked; + } + if (p1_tracked) { + t->flags |= dirstate_flag_p1_tracked; + } + if (p2_tracked) { + t->flags |= dirstate_flag_p2_tracked; + } + if (possibly_dirty) { + t->flags |= dirstate_flag_possibly_dirty; + } + if (merged) { + t->flags |= dirstate_flag_merged; + } + if (clean_p1) { + t->flags |= dirstate_flag_clean_p1; + } + if (clean_p2) { + t->flags |= dirstate_flag_clean_p2; + } + t->mode = 0; + t->size = dirstate_v1_nonnormal; + t->mtime = ambiguous_time; + if (parentfiledata != Py_None) { + if (!PyTuple_CheckExact(parentfiledata)) { + PyErr_SetString( + PyExc_TypeError, + "parentfiledata should be a Tuple or None"); + return NULL; + } + t->mode = + (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 0)); + t->size = + (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 1)); + t->mtime = + (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 2)); + } return (PyObject *)t; } @@ -88,75 +137,134 @@ PyObject_Del(o); } -static Py_ssize_t dirstate_item_length(PyObject *o) +static inline bool dirstate_item_c_tracked(dirstateItemObject *self) +{ + return (self->flags & dirstate_flag_wc_tracked); +} + +static inline bool dirstate_item_c_added(dirstateItemObject *self) { - return 4; + unsigned char mask = + (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | + dirstate_flag_p2_tracked); + unsigned char target = dirstate_flag_wc_tracked; + return (self->flags & mask) == target; +} + +static inline bool dirstate_item_c_removed(dirstateItemObject *self) +{ + if (self->flags & dirstate_flag_wc_tracked) { + return false; + } + return (self->flags & + (dirstate_flag_p1_tracked | dirstate_flag_p2_tracked)); +} + +static inline bool dirstate_item_c_merged(dirstateItemObject *self) +{ + return ((self->flags & dirstate_flag_wc_tracked) && + (self->flags & dirstate_flag_merged)); } -static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i) +static inline bool dirstate_item_c_merged_removed(dirstateItemObject *self) +{ + if (!dirstate_item_c_removed(self)) { + return false; + } + return (self->flags & dirstate_flag_merged); +} + +static inline bool dirstate_item_c_from_p2(dirstateItemObject *self) { - dirstateItemObject *t = (dirstateItemObject *)o; - switch (i) { - case 0: - return PyBytes_FromStringAndSize(&t->state, 1); - case 1: - return PyInt_FromLong(t->mode); - case 2: - return PyInt_FromLong(t->size); - case 3: - return PyInt_FromLong(t->mtime); - default: - PyErr_SetString(PyExc_IndexError, "index out of range"); - return NULL; + if (!dirstate_item_c_tracked(self)) { + return false; + } + return (self->flags & dirstate_flag_clean_p2); +} + +static inline bool dirstate_item_c_from_p2_removed(dirstateItemObject *self) +{ + if (!dirstate_item_c_removed(self)) { + return false; + } + return (self->flags & dirstate_flag_clean_p2); +} + +static inline char dirstate_item_c_v1_state(dirstateItemObject *self) +{ + if (dirstate_item_c_removed(self)) { + return 'r'; + } else if (dirstate_item_c_merged(self)) { + return 'm'; + } else if (dirstate_item_c_added(self)) { + return 'a'; + } else { + return 'n'; } } -static PySequenceMethods dirstate_item_sq = { - dirstate_item_length, /* sq_length */ - 0, /* sq_concat */ - 0, /* sq_repeat */ - dirstate_item_item, /* sq_item */ - 0, /* sq_ass_item */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0 /* sq_inplace_repeat */ -}; +static inline int dirstate_item_c_v1_mode(dirstateItemObject *self) +{ + return self->mode; +} + +static inline int dirstate_item_c_v1_size(dirstateItemObject *self) +{ + if (dirstate_item_c_merged_removed(self)) { + return dirstate_v1_nonnormal; + } else if (dirstate_item_c_from_p2_removed(self)) { + return dirstate_v1_from_p2; + } else if (dirstate_item_c_removed(self)) { + return 0; + } else if (dirstate_item_c_merged(self)) { + return dirstate_v1_from_p2; + } else if (dirstate_item_c_added(self)) { + return dirstate_v1_nonnormal; + } else if (dirstate_item_c_from_p2(self)) { + return dirstate_v1_from_p2; + } else if (self->flags & dirstate_flag_possibly_dirty) { + return self->size; /* NON NORMAL ? */ + } else { + return self->size; + } +} + +static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self) +{ + if (dirstate_item_c_removed(self)) { + return 0; + } else if (self->flags & dirstate_flag_possibly_dirty) { + return ambiguous_time; + } else if (dirstate_item_c_merged(self)) { + return ambiguous_time; + } else if (dirstate_item_c_added(self)) { + return ambiguous_time; + } else if (dirstate_item_c_from_p2(self)) { + return ambiguous_time; + } else { + return self->mtime; + } +} static PyObject *dirstate_item_v1_state(dirstateItemObject *self) { - return PyBytes_FromStringAndSize(&self->state, 1); + char state = dirstate_item_c_v1_state(self); + return PyBytes_FromStringAndSize(&state, 1); }; static PyObject *dirstate_item_v1_mode(dirstateItemObject *self) { - return PyInt_FromLong(self->mode); + return PyInt_FromLong(dirstate_item_c_v1_mode(self)); }; static PyObject *dirstate_item_v1_size(dirstateItemObject *self) { - return PyInt_FromLong(self->size); + return PyInt_FromLong(dirstate_item_c_v1_size(self)); }; static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self) { - return PyInt_FromLong(self->mtime); -}; - -static PyObject *dm_nonnormal(dirstateItemObject *self) -{ - if (self->state != 'n' || self->mtime == ambiguous_time) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; - } -}; -static PyObject *dm_otherparent(dirstateItemObject *self) -{ - if (self->size == dirstate_v1_from_p2) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; - } + return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); }; static PyObject *dirstate_item_need_delay(dirstateItemObject *self, @@ -166,14 +274,15 @@ if (!pylong_to_long(value, &now)) { return NULL; } - if (self->state == 'n' && self->mtime == now) { + if (dirstate_item_c_v1_state(self) == 'n' && + dirstate_item_c_v1_mtime(self) == now) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } }; -/* This will never change since it's bound to V1, unlike `make_dirstate_item` +/* This will never change since it's bound to V1 */ static inline dirstateItemObject * dirstate_item_from_v1_data(char state, int mode, int size, int mtime) @@ -183,10 +292,70 @@ if (!t) { return NULL; } - t->state = state; - t->mode = mode; - t->size = size; - t->mtime = mtime; + + if (state == 'm') { + t->flags = + (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | + dirstate_flag_p2_tracked | dirstate_flag_merged); + t->mode = 0; + t->size = dirstate_v1_from_p2; + t->mtime = ambiguous_time; + } else if (state == 'a') { + t->flags = dirstate_flag_wc_tracked; + t->mode = 0; + t->size = dirstate_v1_nonnormal; + t->mtime = ambiguous_time; + } else if (state == 'r') { + t->mode = 0; + t->size = 0; + t->mtime = 0; + if (size == dirstate_v1_nonnormal) { + t->flags = + (dirstate_flag_p1_tracked | + dirstate_flag_p2_tracked | dirstate_flag_merged); + } else if (size == dirstate_v1_from_p2) { + t->flags = + (dirstate_flag_p2_tracked | dirstate_flag_clean_p2); + } else { + t->flags = dirstate_flag_p1_tracked; + } + } else if (state == 'n') { + if (size == dirstate_v1_from_p2) { + t->flags = + (dirstate_flag_wc_tracked | + dirstate_flag_p2_tracked | dirstate_flag_clean_p2); + t->mode = 0; + t->size = dirstate_v1_from_p2; + t->mtime = ambiguous_time; + } else if (size == dirstate_v1_nonnormal) { + t->flags = (dirstate_flag_wc_tracked | + dirstate_flag_p1_tracked | + dirstate_flag_possibly_dirty); + t->mode = 0; + t->size = dirstate_v1_nonnormal; + t->mtime = ambiguous_time; + } else if (mtime == ambiguous_time) { + t->flags = (dirstate_flag_wc_tracked | + dirstate_flag_p1_tracked | + dirstate_flag_possibly_dirty); + t->mode = mode; + t->size = size; + t->mtime = 0; + } else { + t->flags = (dirstate_flag_wc_tracked | + dirstate_flag_p1_tracked); + t->mode = mode; + t->size = size; + t->mtime = mtime; + } + } else { + PyErr_Format(PyExc_RuntimeError, + "unknown state: `%c` (%d, %d, %d)", state, mode, + size, mtime, NULL); + Py_DECREF(t); + return NULL; + } + return t; } @@ -196,22 +365,110 @@ { /* We do all the initialization here and not a tp_init function because * dirstate_item is immutable. */ - dirstateItemObject *t; char state; int size, mode, mtime; if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { return NULL; } + return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime); +}; + +/* constructor to help legacy API to build a new "added" item + +Should eventually be removed */ +static PyObject *dirstate_item_new_added(PyTypeObject *subtype) +{ + dirstateItemObject *t; + t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); + if (!t) { + return NULL; + } + t->flags = dirstate_flag_wc_tracked; + t->mode = 0; + t->size = dirstate_v1_nonnormal; + t->mtime = ambiguous_time; + return (PyObject *)t; +}; + +/* constructor to help legacy API to build a new "merged" item + +Should eventually be removed */ +static PyObject *dirstate_item_new_merged(PyTypeObject *subtype) +{ + dirstateItemObject *t; + t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); + if (!t) { + return NULL; + } + t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | + dirstate_flag_p2_tracked | dirstate_flag_merged); + t->mode = 0; + t->size = dirstate_v1_from_p2; + t->mtime = ambiguous_time; + return (PyObject *)t; +}; + +/* constructor to help legacy API to build a new "from_p2" item + +Should eventually be removed */ +static PyObject *dirstate_item_new_from_p2(PyTypeObject *subtype) +{ + /* We do all the initialization here and not a tp_init function because + * dirstate_item is immutable. */ + dirstateItemObject *t; + t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); + if (!t) { + return NULL; + } + t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p2_tracked | + dirstate_flag_clean_p2); + t->mode = 0; + t->size = dirstate_v1_from_p2; + t->mtime = ambiguous_time; + return (PyObject *)t; +}; + +/* constructor to help legacy API to build a new "possibly" item + +Should eventually be removed */ +static PyObject *dirstate_item_new_possibly_dirty(PyTypeObject *subtype) +{ + /* We do all the initialization here and not a tp_init function because + * dirstate_item is immutable. */ + dirstateItemObject *t; + t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); + if (!t) { + return NULL; + } + t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | + dirstate_flag_possibly_dirty); + t->mode = 0; + t->size = dirstate_v1_nonnormal; + t->mtime = ambiguous_time; + return (PyObject *)t; +}; + +/* constructor to help legacy API to build a new "normal" item + +Should eventually be removed */ +static PyObject *dirstate_item_new_normal(PyTypeObject *subtype, PyObject *args) +{ + /* We do all the initialization here and not a tp_init function because + * dirstate_item is immutable. */ + dirstateItemObject *t; + int size, mode, mtime; + if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) { + return NULL; + } t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); if (!t) { return NULL; } - t->state = state; + t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked); t->mode = mode; t->size = size; t->mtime = mtime; - return (PyObject *)t; }; @@ -219,7 +476,42 @@ to make sure it is correct. */ static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self) { - self->mtime = ambiguous_time; + self->flags |= dirstate_flag_possibly_dirty; + Py_RETURN_NONE; +} + +/* See docstring of the python implementation for details */ +static PyObject *dirstate_item_set_clean(dirstateItemObject *self, + PyObject *args) +{ + int size, mode, mtime; + if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) { + return NULL; + } + self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked; + self->mode = mode; + self->size = size; + self->mtime = mtime; + Py_RETURN_NONE; +} + +static PyObject *dirstate_item_set_tracked(dirstateItemObject *self) +{ + self->flags |= dirstate_flag_wc_tracked; + self->flags |= dirstate_flag_possibly_dirty; + /* size = None on the python size turn into size = NON_NORMAL when + * accessed. So the next line is currently required, but a some future + * clean up would be welcome. */ + self->size = dirstate_v1_nonnormal; + Py_RETURN_NONE; +} + +static PyObject *dirstate_item_set_untracked(dirstateItemObject *self) +{ + self->flags &= ~dirstate_flag_wc_tracked; + self->mode = 0; + self->mtime = 0; + self->size = 0; Py_RETURN_NONE; } @@ -234,40 +526,58 @@ "return a \"mtime\" suitable for v1 serialization"}, {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, "True if the stored mtime would be ambiguous with the current time"}, - {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O, - "build a new DirstateItem object from V1 data"}, + {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, + METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"}, + {"new_added", (PyCFunction)dirstate_item_new_added, + METH_NOARGS | METH_CLASS, + "constructor to help legacy API to build a new \"added\" item"}, + {"new_merged", (PyCFunction)dirstate_item_new_merged, + METH_NOARGS | METH_CLASS, + "constructor to help legacy API to build a new \"merged\" item"}, + {"new_from_p2", (PyCFunction)dirstate_item_new_from_p2, + METH_NOARGS | METH_CLASS, + "constructor to help legacy API to build a new \"from_p2\" item"}, + {"new_possibly_dirty", (PyCFunction)dirstate_item_new_possibly_dirty, + METH_NOARGS | METH_CLASS, + "constructor to help legacy API to build a new \"possibly_dirty\" item"}, + {"new_normal", (PyCFunction)dirstate_item_new_normal, + METH_VARARGS | METH_CLASS, + "constructor to help legacy API to build a new \"normal\" item"}, {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty, METH_NOARGS, "mark a file as \"possibly dirty\""}, - {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS, - "True is the entry is non-normal in the dirstatemap sense"}, - {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS, - "True is the entry is `otherparent` in the dirstatemap sense"}, + {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS, + "mark a file as \"clean\""}, + {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS, + "mark a file as \"tracked\""}, + {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS, + "mark a file as \"untracked\""}, {NULL} /* Sentinel */ }; static PyObject *dirstate_item_get_mode(dirstateItemObject *self) { - return PyInt_FromLong(self->mode); + return PyInt_FromLong(dirstate_item_c_v1_mode(self)); }; static PyObject *dirstate_item_get_size(dirstateItemObject *self) { - return PyInt_FromLong(self->size); + return PyInt_FromLong(dirstate_item_c_v1_size(self)); }; static PyObject *dirstate_item_get_mtime(dirstateItemObject *self) { - return PyInt_FromLong(self->mtime); + return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); }; static PyObject *dirstate_item_get_state(dirstateItemObject *self) { - return PyBytes_FromStringAndSize(&self->state, 1); + char state = dirstate_item_c_v1_state(self); + return PyBytes_FromStringAndSize(&state, 1); }; static PyObject *dirstate_item_get_tracked(dirstateItemObject *self) { - if (self->state == 'a' || self->state == 'm' || self->state == 'n') { + if (dirstate_item_c_tracked(self)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; @@ -276,7 +586,7 @@ static PyObject *dirstate_item_get_added(dirstateItemObject *self) { - if (self->state == 'a') { + if (dirstate_item_c_added(self)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; @@ -285,7 +595,7 @@ static PyObject *dirstate_item_get_merged(dirstateItemObject *self) { - if (self->state == 'm') { + if (dirstate_item_c_merged(self)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; @@ -294,7 +604,7 @@ static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self) { - if (self->state == 'r' && self->size == dirstate_v1_nonnormal) { + if (dirstate_item_c_merged_removed(self)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; @@ -303,7 +613,7 @@ static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self) { - if (self->state == 'n' && self->size == dirstate_v1_from_p2) { + if (dirstate_item_c_from_p2(self)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; @@ -312,7 +622,7 @@ static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self) { - if (self->state == 'r' && self->size == dirstate_v1_from_p2) { + if (dirstate_item_c_from_p2_removed(self)) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; @@ -321,7 +631,25 @@ static PyObject *dirstate_item_get_removed(dirstateItemObject *self) { - if (self->state == 'r') { + if (dirstate_item_c_removed(self)) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +}; + +static PyObject *dm_nonnormal(dirstateItemObject *self) +{ + if ((dirstate_item_c_v1_state(self) != 'n') || + (dirstate_item_c_v1_mtime(self) == ambiguous_time)) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } +}; +static PyObject *dm_otherparent(dirstateItemObject *self) +{ + if (dirstate_item_c_v1_mtime(self) == dirstate_v1_from_p2) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; @@ -342,6 +670,8 @@ "from_p2_removed", NULL}, {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL}, + {"dm_nonnormal", (getter)dm_nonnormal, NULL, "dm_nonnormal", NULL}, + {"dm_otherparent", (getter)dm_otherparent, NULL, "dm_otherparent", NULL}, {NULL} /* Sentinel */ }; @@ -357,7 +687,7 @@ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ - &dirstate_item_sq, /* tp_as_sequence */ + 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ @@ -441,6 +771,8 @@ entry = (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime); + if (!entry) + goto quit; cpos = memchr(cur, 0, flen); if (cpos) { fname = PyBytes_FromStringAndSize(cur, cpos - cur); @@ -509,17 +841,19 @@ } t = (dirstateItemObject *)v; - if (t->state == 'n' && t->size == -2) { + if (dirstate_item_c_from_p2(t)) { if (PySet_Add(otherpset, fname) == -1) { goto bail; } } - - if (t->state == 'n' && t->mtime != -1) { - continue; - } - if (PySet_Add(nonnset, fname) == -1) { - goto bail; + if (!(t->flags & dirstate_flag_wc_tracked) || + !(t->flags & + (dirstate_flag_p1_tracked | dirstate_flag_p2_tracked)) || + (t->flags & + (dirstate_flag_possibly_dirty | dirstate_flag_merged))) { + if (PySet_Add(nonnset, fname) == -1) { + goto bail; + } } } @@ -616,15 +950,15 @@ } tuple = (dirstateItemObject *)v; - state = tuple->state; - mode = tuple->mode; - size = tuple->size; - mtime = tuple->mtime; + state = dirstate_item_c_v1_state(tuple); + mode = dirstate_item_c_v1_mode(tuple); + size = dirstate_item_c_v1_size(tuple); + mtime = dirstate_item_c_v1_mtime(tuple); if (state == 'n' && mtime == now) { /* See pure/parsers.py:pack_dirstate for why we do * this. */ mtime = -1; - mtime_unset = (PyObject *)make_dirstate_item( + mtime_unset = (PyObject *)dirstate_item_from_v1_data( state, mode, size, mtime); if (!mtime_unset) { goto bail; @@ -917,7 +1251,7 @@ revlog_module_init(mod); capsule = PyCapsule_New( - make_dirstate_item, + dirstate_item_from_v1_data, "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL); if (capsule != NULL) PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
--- a/mercurial/cext/util.h Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/cext/util.h Tue Sep 28 09:40:57 2021 +0200 @@ -24,13 +24,21 @@ /* clang-format off */ typedef struct { PyObject_HEAD - char state; + unsigned char flags; int mode; int size; int mtime; } dirstateItemObject; /* clang-format on */ +static const unsigned char dirstate_flag_wc_tracked = 1; +static const unsigned char dirstate_flag_p1_tracked = 1 << 1; +static const unsigned char dirstate_flag_p2_tracked = 1 << 2; +static const unsigned char dirstate_flag_possibly_dirty = 1 << 3; +static const unsigned char dirstate_flag_merged = 1 << 4; +static const unsigned char dirstate_flag_clean_p1 = 1 << 5; +static const unsigned char dirstate_flag_clean_p2 = 1 << 6; + extern PyTypeObject dirstateItemType; #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType)
--- a/mercurial/commands.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/commands.py Tue Sep 28 09:40:57 2021 +0200 @@ -526,7 +526,7 @@ ) def bad(x, y): - raise error.Abort(b"%s: %s" % (x, y)) + raise error.InputError(b"%s: %s" % (x, y)) m = scmutil.match(ctx, pats, opts, badfn=bad) @@ -1081,7 +1081,7 @@ raise error.StateError(_(b'current bisect revision is a merge')) if rev: if not nodes: - raise error.Abort(_(b'empty revision set')) + raise error.InputError(_(b'empty revision set')) node = repo[nodes[-1]].node() with hbisect.restore_state(repo, state, node): while changesets:
--- a/mercurial/configitems.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/configitems.py Tue Sep 28 09:40:57 2021 +0200 @@ -207,6 +207,11 @@ b'pushing', default=list, ) +coreconfigitem( + b'bookmarks', + b'mirror', + default=False, +) # bundle.mainreporoot: internal hack for bundlerepo coreconfigitem( b'bundle', @@ -1266,6 +1271,11 @@ ) coreconfigitem( b'experimental', + b'web.full-garbage-collection-rate', + default=1, # still forcing a full collection on each request +) +coreconfigitem( + b'experimental', b'worker.wdir-get-thread-safe', default=False, )
--- a/mercurial/debugcommands.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/debugcommands.py Tue Sep 28 09:40:57 2021 +0200 @@ -962,35 +962,29 @@ datesort = opts.get('datesort') if datesort: - keyfunc = lambda x: ( - x[1].v1_mtime(), - x[0], - ) # sort by mtime, then by filename + + def keyfunc(entry): + filename, _state, _mode, _size, mtime = entry + return (mtime, filename) + else: keyfunc = None # sort by filename - if opts['all']: - entries = list(repo.dirstate._map.debug_iter()) - else: - entries = list(pycompat.iteritems(repo.dirstate)) + entries = list(repo.dirstate._map.debug_iter(all=opts['all'])) entries.sort(key=keyfunc) - for file_, ent in entries: - if ent.v1_mtime() == -1: + for entry in entries: + filename, state, mode, size, mtime = entry + if mtime == -1: timestr = b'unset ' elif nodates: timestr = b'set ' else: - timestr = time.strftime( - "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime()) - ) + timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime)) timestr = encoding.strtolocal(timestr) - if ent.mode & 0o20000: + if mode & 0o20000: mode = b'lnk' else: - mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask) - ui.write( - b"%c %s %10d %s%s\n" - % (ent.v1_state(), mode, ent.v1_size(), timestr, file_) - ) + mode = b'%3o' % (mode & 0o777 & ~util.umask) + ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename)) for f in repo.dirstate.copies(): ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) @@ -2987,10 +2981,22 @@ dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) -@command(b'debugrebuildfncache', [], b'') -def debugrebuildfncache(ui, repo): +@command( + b'debugrebuildfncache', + [ + ( + b'', + b'only-data', + False, + _(b'only look for wrong .d files (much faster)'), + ) + ], + b'', +) +def debugrebuildfncache(ui, repo, **opts): """rebuild the fncache file""" - repair.rebuildfncache(ui, repo) + opts = pycompat.byteskwargs(opts) + repair.rebuildfncache(ui, repo, opts.get(b"only_data")) @command(
--- a/mercurial/dirstate.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/dirstate.py Tue Sep 28 09:40:57 2021 +0200 @@ -344,9 +344,6 @@ iteritems = items - def directories(self): - return self._map.directories() - def parents(self): return [self._validate(p) for p in self._pl] @@ -387,10 +384,8 @@ self._origpl = self._pl self._map.setparents(p1, p2) copies = {} - if ( - oldp2 != self._nodeconstants.nullid - and p2 == self._nodeconstants.nullid - ): + nullid = self._nodeconstants.nullid + if oldp2 != nullid and p2 == nullid: candidatefiles = self._map.non_normal_or_other_parent_paths() for f in candidatefiles: @@ -403,13 +398,24 @@ source = self._map.copymap.get(f) if source: copies[f] = source - self._normallookup(f) + self._map.reset_state( + f, + wc_tracked=True, + p1_tracked=True, + possibly_dirty=True, + ) # Also fix up otherparent markers elif s.from_p2: source = self._map.copymap.get(f) if source: copies[f] = source - self._add(f) + self._check_new_tracked_filename(f) + self._updatedfiles.add(f) + self._map.reset_state( + f, + p1_tracked=False, + wc_tracked=True, + ) return copies def setbranch(self, branch): @@ -471,18 +477,12 @@ return True the file was previously untracked, False otherwise. """ + self._dirty = True + self._updatedfiles.add(filename) entry = self._map.get(filename) - if entry is None: - self._add(filename) - return True - elif not entry.tracked: - self._normallookup(filename) - return True - # XXX This is probably overkill for more case, but we need this to - # fully replace the `normallookup` call with `set_tracked` one. - # Consider smoothing this in the future. - self.set_possibly_dirty(filename) - return False + if entry is None or not entry.tracked: + self._check_new_tracked_filename(filename) + return self._map.set_tracked(filename) @requires_no_parents_change def set_untracked(self, filename): @@ -493,22 +493,29 @@ return True the file was previously tracked, False otherwise. """ - entry = self._map.get(filename) - if entry is None: - return False - elif entry.added: - self._drop(filename) - return True - else: - self._remove(filename) - return True + ret = self._map.set_untracked(filename) + if ret: + self._dirty = True + self._updatedfiles.add(filename) + return ret @requires_no_parents_change def set_clean(self, filename, parentfiledata=None): """record that the current state of the file on disk is known to be clean""" self._dirty = True self._updatedfiles.add(filename) - self._normal(filename, parentfiledata=parentfiledata) + if parentfiledata: + (mode, size, mtime) = parentfiledata + else: + (mode, size, mtime) = self._get_filedata(filename) + if not self._map[filename].tracked: + self._check_new_tracked_filename(filename) + self._map.set_clean(filename, mode, size, mtime) + if mtime > self._lastnormaltime: + # Remember the most recent modification timeslot for status(), + # to make sure we won't miss future size-preserving file content + # modifications that happen within the same timeslot. + self._lastnormaltime = mtime @requires_no_parents_change def set_possibly_dirty(self, filename): @@ -546,7 +553,10 @@ possibly_dirty = True elif not (p1_tracked or wc_tracked): # the file is no longer relevant to anyone - self._drop(filename) + if self._map.get(filename) is not None: + self._map.reset_state(filename) + self._dirty = True + self._updatedfiles.add(filename) elif (not p1_tracked) and wc_tracked: if entry is not None and entry.added: return # avoid dropping copy information (maybe?) @@ -655,45 +665,21 @@ # modifications that happen within the same timeslot. self._lastnormaltime = parentfiledata[2] - def _addpath( - self, - f, - mode=0, - size=None, - mtime=None, - added=False, - merged=False, - from_p2=False, - possibly_dirty=False, - ): - entry = self._map.get(f) - if added or entry is not None and entry.removed: - scmutil.checkfilename(f) - if self._map.hastrackeddir(f): - msg = _(b'directory %r already in dirstate') - msg %= pycompat.bytestr(f) + def _check_new_tracked_filename(self, filename): + scmutil.checkfilename(filename) + if self._map.hastrackeddir(filename): + msg = _(b'directory %r already in dirstate') + msg %= pycompat.bytestr(filename) + raise error.Abort(msg) + # shadows + for d in pathutil.finddirs(filename): + if self._map.hastrackeddir(d): + break + entry = self._map.get(d) + if entry is not None and not entry.removed: + msg = _(b'file %r in dirstate clashes with %r') + msg %= (pycompat.bytestr(d), pycompat.bytestr(filename)) raise error.Abort(msg) - # shadows - for d in pathutil.finddirs(f): - if self._map.hastrackeddir(d): - break - entry = self._map.get(d) - if entry is not None and not entry.removed: - msg = _(b'file %r in dirstate clashes with %r') - msg %= (pycompat.bytestr(d), pycompat.bytestr(f)) - raise error.Abort(msg) - self._dirty = True - self._updatedfiles.add(f) - self._map.addfile( - f, - mode=mode, - size=size, - mtime=mtime, - added=added, - merged=merged, - from_p2=from_p2, - possibly_dirty=possibly_dirty, - ) def _get_filedata(self, filename): """returns""" @@ -703,215 +689,6 @@ mtime = s[stat.ST_MTIME] return (mode, size, mtime) - def normal(self, f, parentfiledata=None): - """Mark a file normal and clean. - - parentfiledata: (mode, size, mtime) of the clean file - - parentfiledata should be computed from memory (for mode, - size), as or close as possible from the point where we - determined the file was clean, to limit the risk of the - file having been changed by an external process between the - moment where the file was determined to be clean and now.""" - if self.pendingparentchange(): - util.nouideprecwarn( - b"do not use `normal` inside of update/merge context." - b" Use `update_file` or `update_file_p1`", - b'6.0', - stacklevel=2, - ) - else: - util.nouideprecwarn( - b"do not use `normal` outside of update/merge context." - b" Use `set_tracked`", - b'6.0', - stacklevel=2, - ) - self._normal(f, parentfiledata=parentfiledata) - - def _normal(self, f, parentfiledata=None): - if parentfiledata: - (mode, size, mtime) = parentfiledata - else: - (mode, size, mtime) = self._get_filedata(f) - self._addpath(f, mode=mode, size=size, mtime=mtime) - self._map.copymap.pop(f, None) - if f in self._map.nonnormalset: - self._map.nonnormalset.remove(f) - if mtime > self._lastnormaltime: - # Remember the most recent modification timeslot for status(), - # to make sure we won't miss future size-preserving file content - # modifications that happen within the same timeslot. - self._lastnormaltime = mtime - - def normallookup(self, f): - '''Mark a file normal, but possibly dirty.''' - if self.pendingparentchange(): - util.nouideprecwarn( - b"do not use `normallookup` inside of update/merge context." - b" Use `update_file` or `update_file_p1`", - b'6.0', - stacklevel=2, - ) - else: - util.nouideprecwarn( - b"do not use `normallookup` outside of update/merge context." - b" Use `set_possibly_dirty` or `set_tracked`", - b'6.0', - stacklevel=2, - ) - self._normallookup(f) - - def _normallookup(self, f): - '''Mark a file normal, but possibly dirty.''' - if self.in_merge: - # if there is a merge going on and the file was either - # "merged" or coming from other parent (-2) before - # being removed, restore that state. - entry = self._map.get(f) - if entry is not None: - # XXX this should probably be dealt with a a lower level - # (see `merged_removed` and `from_p2_removed`) - if entry.merged_removed or entry.from_p2_removed: - source = self._map.copymap.get(f) - if entry.merged_removed: - self._merge(f) - elif entry.from_p2_removed: - self._otherparent(f) - if source is not None: - self.copy(source, f) - return - elif entry.merged or entry.from_p2: - return - self._addpath(f, possibly_dirty=True) - self._map.copymap.pop(f, None) - - def otherparent(self, f): - '''Mark as coming from the other parent, always dirty.''' - if self.pendingparentchange(): - util.nouideprecwarn( - b"do not use `otherparent` inside of update/merge context." - b" Use `update_file` or `update_file_p1`", - b'6.0', - stacklevel=2, - ) - else: - util.nouideprecwarn( - b"do not use `otherparent` outside of update/merge context." - b"It should have been set by the update/merge code", - b'6.0', - stacklevel=2, - ) - self._otherparent(f) - - def _otherparent(self, f): - if not self.in_merge: - msg = _(b"setting %r to other parent only allowed in merges") % f - raise error.Abort(msg) - entry = self._map.get(f) - if entry is not None and entry.tracked: - # merge-like - self._addpath(f, merged=True) - else: - # add-like - self._addpath(f, from_p2=True) - self._map.copymap.pop(f, None) - - def add(self, f): - '''Mark a file added.''' - if self.pendingparentchange(): - util.nouideprecwarn( - b"do not use `add` inside of update/merge context." - b" Use `update_file`", - b'6.0', - stacklevel=2, - ) - else: - util.nouideprecwarn( - b"do not use `add` outside of update/merge context." - b" Use `set_tracked`", - b'6.0', - stacklevel=2, - ) - self._add(f) - - def _add(self, filename): - """internal function to mark a file as added""" - self._addpath(filename, added=True) - self._map.copymap.pop(filename, None) - - def remove(self, f): - '''Mark a file removed''' - if self.pendingparentchange(): - util.nouideprecwarn( - b"do not use `remove` insde of update/merge context." - b" Use `update_file` or `update_file_p1`", - b'6.0', - stacklevel=2, - ) - else: - util.nouideprecwarn( - b"do not use `remove` outside of update/merge context." - b" Use `set_untracked`", - b'6.0', - stacklevel=2, - ) - self._remove(f) - - def _remove(self, filename): - """internal function to mark a file removed""" - self._dirty = True - self._updatedfiles.add(filename) - self._map.removefile(filename, in_merge=self.in_merge) - - def merge(self, f): - '''Mark a file merged.''' - if self.pendingparentchange(): - util.nouideprecwarn( - b"do not use `merge` inside of update/merge context." - b" Use `update_file`", - b'6.0', - stacklevel=2, - ) - else: - util.nouideprecwarn( - b"do not use `merge` outside of update/merge context." - b"It should have been set by the update/merge code", - b'6.0', - stacklevel=2, - ) - self._merge(f) - - def _merge(self, f): - if not self.in_merge: - return self._normallookup(f) - return self._otherparent(f) - - def drop(self, f): - '''Drop a file from the dirstate''' - if self.pendingparentchange(): - util.nouideprecwarn( - b"do not use `drop` inside of update/merge context." - b" Use `update_file`", - b'6.0', - stacklevel=2, - ) - else: - util.nouideprecwarn( - b"do not use `drop` outside of update/merge context." - b" Use `set_untracked`", - b'6.0', - stacklevel=2, - ) - self._drop(f) - - def _drop(self, filename): - """internal function to drop a file from the dirstate""" - if self._map.dropfile(filename): - self._dirty = True - self._updatedfiles.add(filename) - self._map.copymap.pop(filename, None) - def _discoverpath(self, path, normed, ignoremissing, exists, storemap): if exists is None: exists = os.path.lexists(os.path.join(self._root, path)) @@ -1022,9 +799,20 @@ self._map.setparents(parent, self._nodeconstants.nullid) for f in to_lookup: - self._normallookup(f) + + if self.in_merge: + self.set_tracked(f) + else: + self._map.reset_state( + f, + wc_tracked=True, + p1_tracked=True, + possibly_dirty=True, + ) + self._updatedfiles.add(f) for f in to_drop: - self._drop(f) + self._map.reset_state(f) + self._updatedfiles.add(f) self._dirty = True
--- a/mercurial/dirstatemap.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/dirstatemap.py Tue Sep 28 09:40:57 2021 +0200 @@ -29,16 +29,6 @@ DirstateItem = parsers.DirstateItem - -# a special value used internally for `size` if the file come from the other parent -FROM_P2 = -2 - -# a special value used internally for `size` if the file is modified/merged/added -NONNORMAL = -1 - -# a special value used internally for `time` if the time is ambigeous -AMBIGUOUS_TIME = -1 - rangemask = 0x7FFFFFFF @@ -56,8 +46,14 @@ - the state map maps filenames to tuples of (state, mode, size, mtime), where state is a single character representing 'normal', 'added', 'removed', or 'merged'. It is read by treating the dirstate as a - dict. File state is updated by calling the `addfile`, `removefile` and - `dropfile` methods. + dict. File state is updated by calling various methods (see each + documentation for details): + + - `reset_state`, + - `set_tracked` + - `set_untracked` + - `set_clean` + - `set_possibly_dirty` - `copymap` maps destination filenames to their source filename. @@ -122,7 +118,14 @@ # forward for python2,3 compat iteritems = items - debug_iter = items + def debug_iter(self, all): + """ + Return an iterator of (filename, state, mode, size, mtime) tuples + + `all` is unused when Rust is not enabled + """ + for (filename, item) in self.items(): + yield (filename, item.state, item.mode, item.size, item.mtime) def __len__(self): return len(self._map) @@ -172,65 +175,20 @@ """record that the current state of the file on disk is unknown""" self[filename].set_possibly_dirty() - def addfile( - self, - f, - mode=0, - size=None, - mtime=None, - added=False, - merged=False, - from_p2=False, - possibly_dirty=False, - ): - """Add a tracked file to the dirstate.""" - if added: - assert not merged - assert not possibly_dirty - assert not from_p2 - state = b'a' - size = NONNORMAL - mtime = AMBIGUOUS_TIME - elif merged: - assert not possibly_dirty - assert not from_p2 - state = b'm' - size = FROM_P2 - mtime = AMBIGUOUS_TIME - elif from_p2: - assert not possibly_dirty - state = b'n' - size = FROM_P2 - mtime = AMBIGUOUS_TIME - elif possibly_dirty: - state = b'n' - size = NONNORMAL - mtime = AMBIGUOUS_TIME - else: - assert size != FROM_P2 - assert size != NONNORMAL - assert size is not None - assert mtime is not None - - state = b'n' - size = size & rangemask - mtime = mtime & rangemask - assert state is not None - assert size is not None - assert mtime is not None - old_entry = self.get(f) - self._dirs_incr(f, old_entry) - e = self._map[f] = DirstateItem(state, mode, size, mtime) - if e.dm_nonnormal: - self.nonnormalset.add(f) - if e.dm_otherparent: - self.otherparentset.add(f) + def set_clean(self, filename, mode, size, mtime): + """mark a file as back to a clean state""" + entry = self[filename] + mtime = mtime & rangemask + size = size & rangemask + entry.set_clean(mode, size, mtime) + self.copymap.pop(filename, None) + self.nonnormalset.discard(filename) def reset_state( self, filename, - wc_tracked, - p1_tracked, + wc_tracked=False, + p1_tracked=False, p2_tracked=False, merged=False, clean_p1=False, @@ -255,26 +213,25 @@ self.copymap.pop(filename, None) if not (p1_tracked or p2_tracked or wc_tracked): - self.dropfile(filename) + old_entry = self._map.pop(filename, None) + self._dirs_decr(filename, old_entry=old_entry) + self.nonnormalset.discard(filename) + self.copymap.pop(filename, None) + return elif merged: # XXX might be merged and removed ? entry = self.get(filename) - if entry is not None and entry.tracked: + if entry is None or not entry.tracked: # XXX mostly replicate dirstate.other parent. We should get # the higher layer to pass us more reliable data where `merged` - # actually mean merged. Dropping the else clause will show - # failure in `test-graft.t` - self.addfile(filename, merged=True) - else: - self.addfile(filename, from_p2=True) + # actually mean merged. Dropping this clause will show failure + # in `test-graft.t` + merged = False + clean_p2 = True elif not (p1_tracked or p2_tracked) and wc_tracked: - self.addfile(filename, added=True, possibly_dirty=possibly_dirty) + pass # file is added, nothing special to adjust elif (p1_tracked or p2_tracked) and not wc_tracked: - # XXX might be merged and removed ? - old_entry = self._map.get(filename) - self._dirs_decr(filename, old_entry=old_entry, remove_variant=True) - self._map[filename] = DirstateItem(b'r', 0, 0, 0) - self.nonnormalset.add(filename) + pass elif clean_p2 and wc_tracked: if p1_tracked or self.get(filename) is not None: # XXX the `self.get` call is catching some case in @@ -284,62 +241,91 @@ # In addition, this seems to be a case where the file is marked # as merged without actually being the result of a merge # action. So thing are not ideal here. - self.addfile(filename, merged=True) - else: - self.addfile(filename, from_p2=True) + merged = True + clean_p2 = False elif not p1_tracked and p2_tracked and wc_tracked: - self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty) + clean_p2 = True elif possibly_dirty: - self.addfile(filename, possibly_dirty=possibly_dirty) + pass elif wc_tracked: # this is a "normal" file if parentfiledata is None: msg = b'failed to pass parentfiledata for a normal file: %s' msg %= filename raise error.ProgrammingError(msg) - mode, size, mtime = parentfiledata - self.addfile(filename, mode=mode, size=size, mtime=mtime) - self.nonnormalset.discard(filename) else: assert False, 'unreachable' - def removefile(self, f, in_merge=False): - """ - Mark a file as removed in the dirstate. + old_entry = self._map.get(filename) + self._dirs_incr(filename, old_entry) + entry = DirstateItem( + wc_tracked=wc_tracked, + p1_tracked=p1_tracked, + p2_tracked=p2_tracked, + merged=merged, + clean_p1=clean_p1, + clean_p2=clean_p2, + possibly_dirty=possibly_dirty, + parentfiledata=parentfiledata, + ) + if entry.dm_nonnormal: + self.nonnormalset.add(filename) + else: + self.nonnormalset.discard(filename) + if entry.dm_otherparent: + self.otherparentset.add(filename) + else: + self.otherparentset.discard(filename) + self._map[filename] = entry - The `size` parameter is used to store sentinel values that indicate - the file's previous state. In the future, we should refactor this - to be more explicit about what that state is. - """ + def set_tracked(self, filename): + new = False + entry = self.get(filename) + if entry is None: + self._dirs_incr(filename) + entry = DirstateItem( + p1_tracked=False, + p2_tracked=False, + wc_tracked=True, + merged=False, + clean_p1=False, + clean_p2=False, + possibly_dirty=False, + parentfiledata=None, + ) + self._map[filename] = entry + if entry.dm_nonnormal: + self.nonnormalset.add(filename) + new = True + elif not entry.tracked: + self._dirs_incr(filename, entry) + entry.set_tracked() + new = True + else: + # XXX This is probably overkill for more case, but we need this to + # fully replace the `normallookup` call with `set_tracked` one. + # Consider smoothing this in the future. + self.set_possibly_dirty(filename) + return new + + def set_untracked(self, f): + """Mark a file as no longer tracked in the dirstate map""" entry = self.get(f) - size = 0 - if in_merge: - # XXX we should not be able to have 'm' state and 'FROM_P2' if not - # during a merge. So I (marmoute) am not sure we need the - # conditionnal at all. Adding double checking this with assert - # would be nice. - if entry is not None: - # backup the previous state - if entry.merged: # merge - size = NONNORMAL - elif entry.from_p2: - size = FROM_P2 + if entry is None: + return False + else: + self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) + if not entry.merged: + self.copymap.pop(f, None) + if entry.added: + self.nonnormalset.discard(f) + self._map.pop(f, None) + else: + self.nonnormalset.add(f) + if entry.from_p2: self.otherparentset.add(f) - if entry is not None and not (entry.merged or entry.from_p2): - self.copymap.pop(f, None) - self._dirs_decr(f, old_entry=entry, remove_variant=True) - self._map[f] = DirstateItem(b'r', 0, size, 0) - self.nonnormalset.add(f) - - def dropfile(self, f): - """ - Remove a file from the dirstate. Returns True if the file was - previously recorded. - """ - old_entry = self._map.pop(f, None) - self._dirs_decr(f, old_entry=old_entry) - self.nonnormalset.discard(f) - return old_entry is not None + entry.set_untracked() + return True def clearambiguoustimes(self, files, now): for f in files: @@ -400,7 +386,7 @@ @propertycache def _dirs(self): - return pathutil.dirs(self._map, b'r') + return pathutil.dirs(self._map, only_tracked=True) @propertycache def _alldirs(self): @@ -572,7 +558,7 @@ from_p2=False, possibly_dirty=False, ): - return self._rustmap.addfile( + ret = self._rustmap.addfile( f, mode, size, @@ -582,12 +568,15 @@ from_p2, possibly_dirty, ) + if added: + self.copymap.pop(f, None) + return ret def reset_state( self, filename, - wc_tracked, - p1_tracked, + wc_tracked=False, + p1_tracked=False, p2_tracked=False, merged=False, clean_p1=False, @@ -632,7 +621,7 @@ ) elif (p1_tracked or p2_tracked) and not wc_tracked: # XXX might be merged and removed ? - self[filename] = DirstateItem(b'r', 0, 0, 0) + self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0) self.nonnormalset.add(filename) elif clean_p2 and wc_tracked: if p1_tracked or self.get(filename) is not None: @@ -664,11 +653,46 @@ else: assert False, 'unreachable' + def set_tracked(self, filename): + new = False + entry = self.get(filename) + if entry is None: + self.addfile(filename, added=True) + new = True + elif not entry.tracked: + entry.set_tracked() + self._rustmap.set_v1(filename, entry) + new = True + else: + # XXX This is probably overkill for more case, but we need this to + # fully replace the `normallookup` call with `set_tracked` one. + # Consider smoothing this in the future. + self.set_possibly_dirty(filename) + return new + + def set_untracked(self, f): + """Mark a file as no longer tracked in the dirstate map""" + # in merge is only trigger more logic, so it "fine" to pass it. + # + # the inner rust dirstate map code need to be adjusted once the API + # for dirstate/dirstatemap/DirstateItem is a bit more settled + entry = self.get(f) + if entry is None: + return False + else: + if entry.added: + self._rustmap.copymap().pop(f, None) + self._rustmap.dropfile(f) + else: + self._rustmap.removefile(f, in_merge=True) + return True + def removefile(self, *args, **kwargs): return self._rustmap.removefile(*args, **kwargs) - def dropfile(self, *args, **kwargs): - return self._rustmap.dropfile(*args, **kwargs) + def dropfile(self, f, *args, **kwargs): + self._rustmap.copymap().pop(f, None) + return self._rustmap.dropfile(f, *args, **kwargs) def clearambiguoustimes(self, *args, **kwargs): return self._rustmap.clearambiguoustimes(*args, **kwargs) @@ -683,11 +707,15 @@ def copymap(self): return self._rustmap.copymap() - def directories(self): - return self._rustmap.directories() + def debug_iter(self, all): + """ + Return an iterator of (filename, state, mode, size, mtime) tuples - def debug_iter(self): - return self._rustmap.debug_iter() + `all`: also include with `state == b' '` dirstate tree nodes that + don't have an associated `DirstateItem`. + + """ + return self._rustmap.debug_iter(all) def preload(self): self._rustmap @@ -920,6 +948,15 @@ entry.set_possibly_dirty() self._rustmap.set_v1(filename, entry) + def set_clean(self, filename, mode, size, mtime): + """mark a file as back to a clean state""" + entry = self[filename] + mtime = mtime & rangemask + size = size & rangemask + entry.set_clean(mode, size, mtime) + self._rustmap.set_v1(filename, entry) + self._rustmap.copymap().pop(filename, None) + def __setitem__(self, key, value): assert isinstance(value, DirstateItem) self._rustmap.set_v1(key, value)
--- a/mercurial/dirstateutils/docket.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/dirstateutils/docket.py Tue Sep 28 09:40:57 2021 +0200 @@ -34,7 +34,7 @@ class DirstateDocket(object): - data_filename_pattern = b'dirstate.%s.d' + data_filename_pattern = b'dirstate.%s' def __init__(self, parents, data_size, tree_metadata, uuid): self.parents = parents
--- a/mercurial/encoding.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/encoding.py Tue Sep 28 09:40:57 2021 +0200 @@ -240,7 +240,9 @@ b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst)) ) except LookupError as k: - raise error.Abort(k, hint=b"please check your locale settings") + raise error.Abort( + pycompat.bytestr(k), hint=b"please check your locale settings" + ) def unitolocal(u): @@ -306,7 +308,9 @@ except UnicodeError: return s.lower() # we don't know how to fold this except in ASCII except LookupError as k: - raise error.Abort(k, hint=b"please check your locale settings") + raise error.Abort( + pycompat.bytestr(k), hint=b"please check your locale settings" + ) def upper(s): @@ -333,7 +337,9 @@ except UnicodeError: return s.upper() # we don't know how to fold this except in ASCII except LookupError as k: - raise error.Abort(k, hint=b"please check your locale settings") + raise error.Abort( + pycompat.bytestr(k), hint=b"please check your locale settings" + ) if not _nativeenviron:
--- a/mercurial/extensions.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/extensions.py Tue Sep 28 09:40:57 2021 +0200 @@ -224,8 +224,12 @@ minver = getattr(mod, 'minimumhgversion', None) if minver: curver = util.versiontuple(n=2) + extmin = util.versiontuple(stringutil.forcebytestr(minver), 2) - if None in curver or util.versiontuple(minver, 2) > curver: + if None in extmin: + extmin = (extmin[0] or 0, extmin[1] or 0) + + if None in curver or extmin > curver: msg = _( b'(third party extension %s requires version %s or newer ' b'of Mercurial (current: %s); disabling)\n'
--- a/mercurial/helptext/config.txt Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/helptext/config.txt Tue Sep 28 09:40:57 2021 +0200 @@ -418,6 +418,16 @@ If no suitable authentication entry is found, the user is prompted for credentials as usual if required by the remote. +``bookmarks`` +------------- + +Controls some aspect of bookmarks. + +``mirror`` + When pulling, instead of merging local bookmarks and remote bookmarks, + replace local bookmarks by remote bookmarks. This is useful to replicate + a repository, or as an optimization. (default: False) + ``cmdserver`` -------------
--- a/mercurial/hgweb/hgwebdir_mod.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/hgweb/hgwebdir_mod.py Tue Sep 28 09:40:57 2021 +0200 @@ -285,6 +285,7 @@ self.lastrefresh = 0 self.motd = None self.refresh() + self.requests_count = 0 if not baseui: # set up environment for new ui extensions.loadall(self.ui) @@ -341,6 +342,10 @@ self.repos = repos self.ui = u + self.gc_full_collect_rate = self.ui.configint( + b'experimental', b'web.full-garbage-collection-rate' + ) + self.gc_full_collections_done = 0 encoding.encoding = self.ui.config(b'web', b'encoding') self.style = self.ui.config(b'web', b'style') self.templatepath = self.ui.config( @@ -383,12 +388,27 @@ finally: # There are known cycles in localrepository that prevent # those objects (and tons of held references) from being - # collected through normal refcounting. We mitigate those - # leaks by performing an explicit GC on every request. - # TODO remove this once leaks are fixed. - # TODO only run this on requests that create localrepository - # instances instead of every request. - gc.collect() + # collected through normal refcounting. + # In some cases, the resulting memory consumption can + # be tamed by performing explicit garbage collections. + # In presence of actual leaks or big long-lived caches, the + # impact on performance of such collections can become a + # problem, hence the rate shouldn't be set too low. + # See "Collecting the oldest generation" in + # https://devguide.python.org/garbage_collector + # for more about such trade-offs. + rate = self.gc_full_collect_rate + + # this is not thread safe, but the consequence (skipping + # a garbage collection) is arguably better than risking + # to have several threads perform a collection in parallel + # (long useless wait on all threads). + self.requests_count += 1 + if rate > 0 and self.requests_count % rate == 0: + gc.collect() + self.gc_full_collections_done += 1 + else: + gc.collect(generation=1) def _runwsgi(self, req, res): try:
--- a/mercurial/interfaces/dirstate.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/interfaces/dirstate.py Tue Sep 28 09:40:57 2021 +0200 @@ -132,36 +132,6 @@ def copies(): pass - def normal(f, parentfiledata=None): - """Mark a file normal and clean. - - parentfiledata: (mode, size, mtime) of the clean file - - parentfiledata should be computed from memory (for mode, - size), as or close as possible from the point where we - determined the file was clean, to limit the risk of the - file having been changed by an external process between the - moment where the file was determined to be clean and now.""" - pass - - def normallookup(f): - '''Mark a file normal, but possibly dirty.''' - - def otherparent(f): - '''Mark as coming from the other parent, always dirty.''' - - def add(f): - '''Mark a file added.''' - - def remove(f): - '''Mark a file removed.''' - - def merge(f): - '''Mark a file merged.''' - - def drop(f): - '''Drop a file from the dirstate''' - def normalize(path, isknown=False, ignoremissing=False): """ normalize the case of a pathname when on a casefolding filesystem
--- a/mercurial/logcmdutil.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/logcmdutil.py Tue Sep 28 09:40:57 2021 +0200 @@ -46,13 +46,12 @@ Any, Callable, Dict, - List, Optional, Sequence, Tuple, ) - for t in (Any, Callable, Dict, List, Optional, Tuple): + for t in (Any, Callable, Dict, Optional, Tuple): assert t @@ -714,43 +713,43 @@ """ # raw command-line parameters, which a matcher will be built from - pats = attr.ib() # type: List[bytes] - opts = attr.ib() # type: Dict[bytes, Any] + pats = attr.ib() + opts = attr.ib() # a list of revset expressions to be traversed; if follow, it specifies # the start revisions - revspec = attr.ib() # type: List[bytes] + revspec = attr.ib() # miscellaneous queries to filter revisions (see "hg help log" for details) - bookmarks = attr.ib(default=attr.Factory(list)) # type: List[bytes] - branches = attr.ib(default=attr.Factory(list)) # type: List[bytes] - date = attr.ib(default=None) # type: Optional[bytes] - keywords = attr.ib(default=attr.Factory(list)) # type: List[bytes] - no_merges = attr.ib(default=False) # type: bool - only_merges = attr.ib(default=False) # type: bool - prune_ancestors = attr.ib(default=attr.Factory(list)) # type: List[bytes] - users = attr.ib(default=attr.Factory(list)) # type: List[bytes] + bookmarks = attr.ib(default=attr.Factory(list)) + branches = attr.ib(default=attr.Factory(list)) + date = attr.ib(default=None) + keywords = attr.ib(default=attr.Factory(list)) + no_merges = attr.ib(default=False) + only_merges = attr.ib(default=False) + prune_ancestors = attr.ib(default=attr.Factory(list)) + users = attr.ib(default=attr.Factory(list)) # miscellaneous matcher arguments - include_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes] - exclude_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes] + include_pats = attr.ib(default=attr.Factory(list)) + exclude_pats = attr.ib(default=attr.Factory(list)) # 0: no follow, 1: follow first, 2: follow both parents - follow = attr.ib(default=0) # type: int + follow = attr.ib(default=0) # do not attempt filelog-based traversal, which may be fast but cannot # include revisions where files were removed - force_changelog_traversal = attr.ib(default=False) # type: bool + force_changelog_traversal = attr.ib(default=False) # filter revisions by file patterns, which should be disabled only if # you want to include revisions where files were unmodified - filter_revisions_by_pats = attr.ib(default=True) # type: bool + filter_revisions_by_pats = attr.ib(default=True) # sort revisions prior to traversal: 'desc', 'topo', or None - sort_revisions = attr.ib(default=None) # type: Optional[bytes] + sort_revisions = attr.ib(default=None) # limit number of changes displayed; None means unlimited - limit = attr.ib(default=None) # type: Optional[int] + limit = attr.ib(default=None) def parseopts(ui, pats, opts):
--- a/mercurial/parser.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/parser.py Tue Sep 28 09:40:57 2021 +0200 @@ -21,7 +21,6 @@ from .i18n import _ from . import ( error, - pycompat, util, ) from .utils import stringutil @@ -216,7 +215,11 @@ return stringutil.unescapestr(s) except ValueError as e: # mangle Python's exception into our format - raise error.ParseError(pycompat.bytestr(e).lower()) + # TODO: remove this suppression. For some reason, pytype 2021.09.09 + # thinks .lower() is being called on Union[ValueError, bytes]. + # pytype: disable=attribute-error + raise error.ParseError(stringutil.forcebytestr(e).lower()) + # pytype: enable=attribute-error def _prettyformat(tree, leafnodes, level, lines):
--- a/mercurial/pathutil.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/pathutil.py Tue Sep 28 09:40:57 2021 +0200 @@ -315,20 +315,19 @@ class dirs(object): '''a multiset of directory names from a set of file paths''' - def __init__(self, map, skip=None): + def __init__(self, map, only_tracked=False): """ a dict map indicates a dirstate while a list indicates a manifest """ self._dirs = {} addpath = self.addpath - if isinstance(map, dict) and skip is not None: + if isinstance(map, dict) and only_tracked: for f, s in pycompat.iteritems(map): - if s.state != skip: + if s.state != b'r': addpath(f) - elif skip is not None: - raise error.ProgrammingError( - b"skip character is only supported with a dict source" - ) + elif only_tracked: + msg = b"`only_tracked` is only supported with a dict source" + raise error.ProgrammingError(msg) else: for f in map: addpath(f)
--- a/mercurial/pure/parsers.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/pure/parsers.py Tue Sep 28 09:40:57 2021 +0200 @@ -56,16 +56,117 @@ - mtime, """ - _state = attr.ib() + _wc_tracked = attr.ib() + _p1_tracked = attr.ib() + _p2_tracked = attr.ib() + # the three item above should probably be combined + # + # However it is unclear if they properly cover some of the most advanced + # merge case. So we should probably wait on this to be settled. + _merged = attr.ib() + _clean_p1 = attr.ib() + _clean_p2 = attr.ib() + _possibly_dirty = attr.ib() _mode = attr.ib() _size = attr.ib() _mtime = attr.ib() - def __init__(self, state, mode, size, mtime): - self._state = state - self._mode = mode - self._size = size - self._mtime = mtime + def __init__( + self, + wc_tracked=False, + p1_tracked=False, + p2_tracked=False, + merged=False, + clean_p1=False, + clean_p2=False, + possibly_dirty=False, + parentfiledata=None, + ): + if merged and (clean_p1 or clean_p2): + msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' + raise error.ProgrammingError(msg) + + self._wc_tracked = wc_tracked + self._p1_tracked = p1_tracked + self._p2_tracked = p2_tracked + self._merged = merged + self._clean_p1 = clean_p1 + self._clean_p2 = clean_p2 + self._possibly_dirty = possibly_dirty + if parentfiledata is None: + self._mode = None + self._size = None + self._mtime = None + else: + self._mode = parentfiledata[0] + self._size = parentfiledata[1] + self._mtime = parentfiledata[2] + + @classmethod + def new_added(cls): + """constructor to help legacy API to build a new "added" item + + Should eventually be removed + """ + instance = cls() + instance._wc_tracked = True + instance._p1_tracked = False + instance._p2_tracked = False + return instance + + @classmethod + def new_merged(cls): + """constructor to help legacy API to build a new "merged" item + + Should eventually be removed + """ + instance = cls() + instance._wc_tracked = True + instance._p1_tracked = True # might not be True because of rename ? + instance._p2_tracked = True # might not be True because of rename ? + instance._merged = True + return instance + + @classmethod + def new_from_p2(cls): + """constructor to help legacy API to build a new "from_p2" item + + Should eventually be removed + """ + instance = cls() + instance._wc_tracked = True + instance._p1_tracked = False # might actually be True + instance._p2_tracked = True + instance._clean_p2 = True + return instance + + @classmethod + def new_possibly_dirty(cls): + """constructor to help legacy API to build a new "possibly_dirty" item + + Should eventually be removed + """ + instance = cls() + instance._wc_tracked = True + instance._p1_tracked = True + instance._possibly_dirty = True + return instance + + @classmethod + def new_normal(cls, mode, size, mtime): + """constructor to help legacy API to build a new "normal" item + + Should eventually be removed + """ + assert size != FROM_P2 + assert size != NONNORMAL + instance = cls() + instance._wc_tracked = True + instance._p1_tracked = True + instance._mode = mode + instance._size = size + instance._mtime = mtime + return instance @classmethod def from_v1_data(cls, state, mode, size, mtime): @@ -74,12 +175,44 @@ Since the dirstate-v1 format is frozen, the signature of this function is not expected to change, unlike the __init__ one. """ - return cls( - state=state, - mode=mode, - size=size, - mtime=mtime, - ) + if state == b'm': + return cls.new_merged() + elif state == b'a': + return cls.new_added() + elif state == b'r': + instance = cls() + instance._wc_tracked = False + if size == NONNORMAL: + instance._merged = True + instance._p1_tracked = ( + True # might not be True because of rename ? + ) + instance._p2_tracked = ( + True # might not be True because of rename ? + ) + elif size == FROM_P2: + instance._clean_p2 = True + instance._p1_tracked = ( + False # We actually don't know (file history) + ) + instance._p2_tracked = True + else: + instance._p1_tracked = True + return instance + elif state == b'n': + if size == FROM_P2: + return cls.new_from_p2() + elif size == NONNORMAL: + return cls.new_possibly_dirty() + elif mtime == AMBIGUOUS_TIME: + instance = cls.new_normal(mode, size, 42) + instance._mtime = None + instance._possibly_dirty = True + return instance + else: + return cls.new_normal(mode, size, mtime) + else: + raise RuntimeError(b'unknown state: %s' % state) def set_possibly_dirty(self): """Mark a file as "possibly dirty" @@ -87,39 +220,60 @@ This means the next status call will have to actually check its content to make sure it is correct. """ - self._mtime = AMBIGUOUS_TIME + self._possibly_dirty = True + + def set_clean(self, mode, size, mtime): + """mark a file as "clean" cancelling potential "possibly dirty call" + + Note: this function is a descendant of `dirstate.normal` and is + currently expected to be call on "normal" entry only. There are not + reason for this to not change in the future as long as the ccode is + updated to preserve the proper state of the non-normal files. + """ + self._wc_tracked = True + self._p1_tracked = True + self._p2_tracked = False # this might be wrong + self._merged = False + self._clean_p2 = False + self._possibly_dirty = False + self._mode = mode + self._size = size + self._mtime = mtime - def __getitem__(self, idx): - if idx == 0 or idx == -4: - msg = b"do not use item[x], use item.state" - util.nouideprecwarn(msg, b'6.0', stacklevel=2) - return self._state - elif idx == 1 or idx == -3: - msg = b"do not use item[x], use item.mode" - util.nouideprecwarn(msg, b'6.0', stacklevel=2) - return self._mode - elif idx == 2 or idx == -2: - msg = b"do not use item[x], use item.size" - util.nouideprecwarn(msg, b'6.0', stacklevel=2) - return self._size - elif idx == 3 or idx == -1: - msg = b"do not use item[x], use item.mtime" - util.nouideprecwarn(msg, b'6.0', stacklevel=2) - return self._mtime - else: - raise IndexError(idx) + def set_tracked(self): + """mark a file as tracked in the working copy + + This will ultimately be called by command like `hg add`. + """ + self._wc_tracked = True + # `set_tracked` is replacing various `normallookup` call. So we set + # "possibly dirty" to stay on the safe side. + # + # Consider dropping this in the future in favor of something less broad. + self._possibly_dirty = True + + def set_untracked(self): + """mark a file as untracked in the working copy + + This will ultimately be called by command like `hg remove`. + """ + # backup the previous state (useful for merge) + self._wc_tracked = False + self._mode = None + self._size = None + self._mtime = None @property def mode(self): - return self._mode + return self.v1_mode() @property def size(self): - return self._size + return self.v1_size() @property def mtime(self): - return self._mtime + return self.v1_mtime() @property def state(self): @@ -134,17 +288,17 @@ dirstatev1 format. It would make sense to ultimately deprecate it in favor of the more "semantic" attributes. """ - return self._state + return self.v1_state() @property def tracked(self): """True is the file is tracked in the working copy""" - return self._state in b"nma" + return self._wc_tracked @property def added(self): """True if the file has been added""" - return self._state == b'a' + return self._wc_tracked and not (self._p1_tracked or self._p2_tracked) @property def merged(self): @@ -152,7 +306,7 @@ Should only be set if a merge is in progress in the dirstate """ - return self._state == b'm' + return self._wc_tracked and self._merged @property def from_p2(self): @@ -162,7 +316,9 @@ Should only be set if a merge is in progress in the dirstate """ - return self._state == b'n' and self._size == FROM_P2 + if not self._wc_tracked: + return False + return self._clean_p2 or (not self._p1_tracked and self._p2_tracked) @property def from_p2_removed(self): @@ -171,12 +327,12 @@ This property seems like an abstraction leakage and should probably be dealt in this class (or maybe the dirstatemap) directly. """ - return self._state == b'r' and self._size == FROM_P2 + return self.removed and self._clean_p2 @property def removed(self): """True if the file has been removed""" - return self._state == b'r' + return not self._wc_tracked and (self._p1_tracked or self._p2_tracked) @property def merged_removed(self): @@ -185,7 +341,7 @@ This property seems like an abstraction leakage and should probably be dealt in this class (or maybe the dirstatemap) directly. """ - return self._state == b'r' and self._size == NONNORMAL + return self.removed and self._merged @property def dm_nonnormal(self): @@ -193,7 +349,7 @@ There is no reason for any code, but the dirstatemap one to use this. """ - return self.state != b'n' or self.mtime == AMBIGUOUS_TIME + return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME @property def dm_otherparent(self): @@ -201,27 +357,72 @@ There is no reason for any code, but the dirstatemap one to use this. """ - return self._size == FROM_P2 + return self.v1_size() == FROM_P2 def v1_state(self): """return a "state" suitable for v1 serialization""" - return self._state + if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): + # the object has no state to record, this is -currently- + # unsupported + raise RuntimeError('untracked item') + elif self.removed: + return b'r' + elif self.merged: + return b'm' + elif self.added: + return b'a' + else: + return b'n' def v1_mode(self): """return a "mode" suitable for v1 serialization""" - return self._mode + return self._mode if self._mode is not None else 0 def v1_size(self): """return a "size" suitable for v1 serialization""" - return self._size + if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): + # the object has no state to record, this is -currently- + # unsupported + raise RuntimeError('untracked item') + elif self.merged_removed: + return NONNORMAL + elif self.from_p2_removed: + return FROM_P2 + elif self.removed: + return 0 + elif self.merged: + return FROM_P2 + elif self.added: + return NONNORMAL + elif self.from_p2: + return FROM_P2 + elif self._possibly_dirty: + return self._size if self._size is not None else NONNORMAL + else: + return self._size def v1_mtime(self): """return a "mtime" suitable for v1 serialization""" - return self._mtime + if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): + # the object has no state to record, this is -currently- + # unsupported + raise RuntimeError('untracked item') + elif self.removed: + return 0 + elif self._possibly_dirty: + return AMBIGUOUS_TIME + elif self.merged: + return AMBIGUOUS_TIME + elif self.added: + return AMBIGUOUS_TIME + elif self.from_p2: + return AMBIGUOUS_TIME + else: + return self._mtime if self._mtime is not None else 0 def need_delay(self, now): """True if the stored mtime would be ambiguous with the current time""" - return self._state == b'n' and self._mtime == now + return self.v1_state() == b'n' and self.v1_mtime() == now def gettype(q):
--- a/mercurial/pycompat.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/pycompat.py Tue Sep 28 09:40:57 2021 +0200 @@ -222,6 +222,15 @@ >>> assert type(t) is bytes """ + # Trick pytype into not demanding Iterable[int] be passed to __new__(), + # since the appropriate bytes format is done internally. + # + # https://github.com/google/pytype/issues/500 + if TYPE_CHECKING: + + def __init__(self, s=b''): + pass + def __new__(cls, s=b''): if isinstance(s, bytestr): return s
--- a/mercurial/repair.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/repair.py Tue Sep 28 09:40:57 2021 +0200 @@ -433,7 +433,7 @@ if scmutil.istreemanifest(repo): # This logic is safe if treemanifest isn't enabled, but also # pointless, so we skip it if treemanifest isn't enabled. - for t, unencoded, encoded, size in repo.store.datafiles(): + for t, unencoded, size in repo.store.datafiles(): if unencoded.startswith(b'meta/') and unencoded.endswith( b'00manifest.i' ): @@ -441,7 +441,7 @@ yield repo.manifestlog.getstorage(dir) -def rebuildfncache(ui, repo): +def rebuildfncache(ui, repo, only_data=False): """Rebuilds the fncache file from repo history. Missing entries will be added. Extra entries will be removed. @@ -465,28 +465,40 @@ newentries = set() seenfiles = set() - progress = ui.makeprogress( - _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) - ) - for rev in repo: - progress.update(rev) + if only_data: + # Trust the listing of .i from the fncache, but not the .d. This is + # much faster, because we only need to stat every possible .d files, + # instead of reading the full changelog + for f in fnc: + if f[:5] == b'data/' and f[-2:] == b'.i': + seenfiles.add(f[5:-2]) + newentries.add(f) + dataf = f[:-2] + b'.d' + if repo.store._exists(dataf): + newentries.add(dataf) + else: + progress = ui.makeprogress( + _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) + ) + for rev in repo: + progress.update(rev) - ctx = repo[rev] - for f in ctx.files(): - # This is to minimize I/O. - if f in seenfiles: - continue - seenfiles.add(f) + ctx = repo[rev] + for f in ctx.files(): + # This is to minimize I/O. + if f in seenfiles: + continue + seenfiles.add(f) - i = b'data/%s.i' % f - d = b'data/%s.d' % f + i = b'data/%s.i' % f + d = b'data/%s.d' % f - if repo.store._exists(i): - newentries.add(i) - if repo.store._exists(d): - newentries.add(d) + if repo.store._exists(i): + newentries.add(i) + if repo.store._exists(d): + newentries.add(d) - progress.complete() + progress.complete() if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: # This logic is safe if treemanifest isn't enabled, but also
--- a/mercurial/revlogutils/rewrite.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/revlogutils/rewrite.py Tue Sep 28 09:40:57 2021 +0200 @@ -824,7 +824,7 @@ with context(): files = list( (file_type, path) - for (file_type, path, _e, _s) in repo.store.datafiles() + for (file_type, path, _s) in repo.store.datafiles() if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG )
--- a/mercurial/scmutil.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/scmutil.py Tue Sep 28 09:40:57 2021 +0200 @@ -689,7 +689,7 @@ l = revrange(repo, [revspec], localalias=localalias) if not l: - raise error.Abort(_(b'empty revision set')) + raise error.InputError(_(b'empty revision set')) return repo[l.last()] @@ -710,7 +710,7 @@ l = revrange(repo, revs) if not l: - raise error.Abort(_(b'empty revision range')) + raise error.InputError(_(b'empty revision range')) first = l.first() second = l.last() @@ -720,7 +720,7 @@ and len(revs) >= 2 and not all(revrange(repo, [r]) for r in revs) ): - raise error.Abort(_(b'empty revision on one side of range')) + raise error.InputError(_(b'empty revision on one side of range')) # if top-level is range expression, the result must always be a pair if first == second and len(revs) == 1 and not _pairspec(revs[0]): @@ -1211,9 +1211,9 @@ try: similarity = float(opts.get(b'similarity') or 0) except ValueError: - raise error.Abort(_(b'similarity must be a number')) + raise error.InputError(_(b'similarity must be a number')) if similarity < 0 or similarity > 100: - raise error.Abort(_(b'similarity must be between 0 and 100')) + raise error.InputError(_(b'similarity must be between 0 and 100')) similarity /= 100.0 ret = 0
--- a/mercurial/store.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/store.py Tue Sep 28 09:40:57 2021 +0200 @@ -472,7 +472,7 @@ return self.path + b'/' + encodedir(f) def _walk(self, relpath, recurse): - '''yields (unencoded, encoded, size)''' + '''yields (revlog_type, unencoded, size)''' path = self.path if relpath: path += b'/' + relpath @@ -488,7 +488,7 @@ rl_type = is_revlog(f, kind, st) if rl_type is not None: n = util.pconvert(fp[striplen:]) - l.append((rl_type, decodedir(n), n, st.st_size)) + l.append((rl_type, decodedir(n), st.st_size)) elif kind == stat.S_IFDIR and recurse: visit.append(fp) l.sort() @@ -505,26 +505,32 @@ rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs) return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch) - def datafiles(self, matcher=None): + def datafiles(self, matcher=None, undecodable=None): + """Like walk, but excluding the changelog and root manifest. + + When [undecodable] is None, revlogs names that can't be + decoded cause an exception. When it is provided, it should + be a list and the filenames that can't be decoded are added + to it instead. This is very rarely needed.""" files = self._walk(b'data', True) + self._walk(b'meta', True) - for (t, u, e, s) in files: - yield (FILEFLAGS_FILELOG | t, u, e, s) + for (t, u, s) in files: + yield (FILEFLAGS_FILELOG | t, u, s) def topfiles(self): # yield manifest before changelog files = reversed(self._walk(b'', False)) - for (t, u, e, s) in files: + for (t, u, s) in files: if u.startswith(b'00changelog'): - yield (FILEFLAGS_CHANGELOG | t, u, e, s) + yield (FILEFLAGS_CHANGELOG | t, u, s) elif u.startswith(b'00manifest'): - yield (FILEFLAGS_MANIFESTLOG | t, u, e, s) + yield (FILEFLAGS_MANIFESTLOG | t, u, s) else: - yield (FILETYPE_OTHER | t, u, e, s) + yield (FILETYPE_OTHER | t, u, s) def walk(self, matcher=None): """return file related to data storage (ie: revlogs) - yields (file_type, unencoded, encoded, size) + yields (file_type, unencoded, size) if a matcher is passed, storage files of only those tracked paths are passed with matches the matcher @@ -574,15 +580,20 @@ # However that might change so we should probably add a test and encoding # decoding for it too. see issue6548 - def datafiles(self, matcher=None): - for t, a, b, size in super(encodedstore, self).datafiles(): + def datafiles(self, matcher=None, undecodable=None): + for t, f1, size in super(encodedstore, self).datafiles(): try: - a = decodefilename(a) + f2 = decodefilename(f1) except KeyError: - a = None - if a is not None and not _matchtrackedpath(a, matcher): + if undecodable is None: + msg = _(b'undecodable revlog name %s') % f1 + raise error.StorageError(msg) + else: + undecodable.append(f1) + continue + if not _matchtrackedpath(f2, matcher): continue - yield t, a, b, size + yield t, f2, size def join(self, f): return self.path + b'/' + encodefilename(f) @@ -770,7 +781,7 @@ def getsize(self, path): return self.rawvfs.stat(path).st_size - def datafiles(self, matcher=None): + def datafiles(self, matcher=None, undecodable=None): for f in sorted(self.fncache): if not _matchtrackedpath(f, matcher): continue @@ -779,7 +790,7 @@ t = revlog_type(f) assert t is not None, f t |= FILEFLAGS_FILELOG - yield t, f, ef, self.getsize(ef) + yield t, f, self.getsize(ef) except OSError as err: if err.errno != errno.ENOENT: raise
--- a/mercurial/streamclone.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/streamclone.py Tue Sep 28 09:40:57 2021 +0200 @@ -248,7 +248,7 @@ # Get consistent snapshot of repo, lock during scan. with repo.lock(): repo.ui.debug(b'scanning\n') - for file_type, name, ename, size in _walkstreamfiles(repo): + for file_type, name, size in _walkstreamfiles(repo): if size: entries.append((name, size)) total_bytes += size @@ -650,7 +650,7 @@ if includes or excludes: matcher = narrowspec.match(repo.root, includes, excludes) - for rl_type, name, ename, size in _walkstreamfiles(repo, matcher): + for rl_type, name, size in _walkstreamfiles(repo, matcher): if size: ft = _fileappend if rl_type & store.FILEFLAGS_VOLATILE:
--- a/mercurial/upgrade_utils/engine.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/upgrade_utils/engine.py Tue Sep 28 09:40:57 2021 +0200 @@ -201,7 +201,7 @@ # Perform a pass to collect metadata. This validates we can open all # source files and allows a unified progress bar to be displayed. - for rl_type, unencoded, encoded, size in alldatafiles: + for rl_type, unencoded, size in alldatafiles: if not rl_type & store.FILEFLAGS_REVLOG_MAIN: continue
--- a/mercurial/util.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/util.py Tue Sep 28 09:40:57 2021 +0200 @@ -449,8 +449,8 @@ return b'' elif size is None: size = 0 + fd = getattr(fp, 'fileno', lambda: fp)() try: - fd = getattr(fp, 'fileno', lambda: fp)() return mmap.mmap(fd, size, access=mmap.ACCESS_READ) except ValueError: # Empty files cannot be mmapped, but mmapread should still work. Check @@ -1225,6 +1225,8 @@ if n == 4: return (vints[0], vints[1], vints[2], extra) + raise error.ProgrammingError(b"invalid version part request: %d" % n) + def cachefunc(func): '''cache the result of function calls'''
--- a/mercurial/utils/resourceutil.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/utils/resourceutil.py Tue Sep 28 09:40:57 2021 +0200 @@ -57,30 +57,11 @@ try: # importlib.resources exists from Python 3.7; see fallback in except clause # further down - from importlib import resources - - from .. import encoding + from importlib import resources # pytype: disable=import-error # Force loading of the resources module resources.open_binary # pytype: disable=module-attr - def open_resource(package, name): - return resources.open_binary( # pytype: disable=module-attr - pycompat.sysstr(package), pycompat.sysstr(name) - ) - - def is_resource(package, name): - return resources.is_resource( # pytype: disable=module-attr - pycompat.sysstr(package), encoding.strfromlocal(name) - ) - - def contents(package): - # pytype: disable=module-attr - for r in resources.contents(pycompat.sysstr(package)): - # pytype: enable=module-attr - yield encoding.strtolocal(r) - - except (ImportError, AttributeError): # importlib.resources was not found (almost definitely because we're on a # Python version before 3.7) @@ -102,3 +83,23 @@ for p in os.listdir(path): yield pycompat.fsencode(p) + + +else: + from .. import encoding + + def open_resource(package, name): + return resources.open_binary( # pytype: disable=module-attr + pycompat.sysstr(package), pycompat.sysstr(name) + ) + + def is_resource(package, name): + return resources.is_resource( # pytype: disable=module-attr + pycompat.sysstr(package), encoding.strfromlocal(name) + ) + + def contents(package): + # pytype: disable=module-attr + for r in resources.contents(pycompat.sysstr(package)): + # pytype: enable=module-attr + yield encoding.strtolocal(r)
--- a/mercurial/verify.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/verify.py Tue Sep 28 09:40:57 2021 +0200 @@ -395,12 +395,13 @@ storefiles = set() subdirs = set() revlogv1 = self.revlogv1 - for t, f, f2, size in repo.store.datafiles(): - if not f: - self._err(None, _(b"cannot decode filename '%s'") % f2) - elif (size > 0 or not revlogv1) and f.startswith(b'meta/'): + undecodable = [] + for t, f, size in repo.store.datafiles(undecodable=undecodable): + if (size > 0 or not revlogv1) and f.startswith(b'meta/'): storefiles.add(_normpath(f)) subdirs.add(os.path.dirname(f)) + for f in undecodable: + self._err(None, _(b"cannot decode filename '%s'") % f) subdirprogress = ui.makeprogress( _(b'checking'), unit=_(b'manifests'), total=len(subdirs) ) @@ -459,11 +460,12 @@ ui.status(_(b"checking files\n")) storefiles = set() - for rl_type, f, f2, size in repo.store.datafiles(): - if not f: - self._err(None, _(b"cannot decode filename '%s'") % f2) - elif (size > 0 or not revlogv1) and f.startswith(b'data/'): + undecodable = [] + for t, f, size in repo.store.datafiles(undecodable=undecodable): + if (size > 0 or not revlogv1) and f.startswith(b'data/'): storefiles.add(_normpath(f)) + for f in undecodable: + self._err(None, _(b"cannot decode filename '%s'") % f) state = { # TODO this assumes revlog storage for changelog.
--- a/mercurial/windows.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/windows.py Tue Sep 28 09:40:57 2021 +0200 @@ -175,7 +175,7 @@ return mixedfilemodewrapper(fp) return fp - except WindowsError as err: + except WindowsError as err: # pytype: disable=name-error # convert to a friendlier exception raise IOError( err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
--- a/mercurial/wireprotov1peer.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/wireprotov1peer.py Tue Sep 28 09:40:57 2021 +0200 @@ -44,13 +44,9 @@ def sample(self, one, two=None): # Build list of encoded arguments suitable for your wire protocol: encoded_args = [('one', encode(one),), ('two', encode(two),)] - # Create future for injection of encoded result: - encoded_res_future = future() - # Return encoded arguments and future: - yield encoded_args, encoded_res_future - # Assuming the future to be filled with the result from the batched - # request now. Decode it: - yield decode(encoded_res_future.value) + # Return it, along with a function that will receive the result + # from the batched request. + return encoded_args, decode The decorator returns a function which wraps this coroutine as a plain method, but adds the original method as an attribute called "batchable", @@ -59,29 +55,19 @@ """ def plain(*args, **opts): - batchable = f(*args, **opts) - encoded_args_or_res, encoded_res_future = next(batchable) - if not encoded_res_future: + encoded_args_or_res, decode = f(*args, **opts) + if not decode: return encoded_args_or_res # a local result in this case self = args[0] cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr - encoded_res_future.set(self._submitone(cmd, encoded_args_or_res)) - return next(batchable) + encoded_res = self._submitone(cmd, encoded_args_or_res) + return decode(encoded_res) setattr(plain, 'batchable', f) setattr(plain, '__name__', f.__name__) return plain -class future(object): - '''placeholder for a value to be set later''' - - def set(self, value): - if util.safehasattr(self, b'value'): - raise error.RepoError(b"future is already set") - self.value = value - - def encodebatchcmds(req): """Return a ``cmds`` argument value for the ``batch`` command.""" escapearg = wireprototypes.escapebatcharg @@ -248,25 +234,18 @@ continue try: - batchable = fn.batchable( + encoded_args_or_res, decode = fn.batchable( fn.__self__, **pycompat.strkwargs(args) ) except Exception: pycompat.future_set_exception_info(f, sys.exc_info()[1:]) return - # Encoded arguments and future holding remote result. - try: - encoded_args_or_res, fremote = next(batchable) - except Exception: - pycompat.future_set_exception_info(f, sys.exc_info()[1:]) - return - - if not fremote: + if not decode: f.set_result(encoded_args_or_res) else: requests.append((command, encoded_args_or_res)) - states.append((command, f, batchable, fremote)) + states.append((command, f, batchable, decode)) if not requests: return @@ -319,7 +298,7 @@ def _readbatchresponse(self, states, wireresults): # Executes in a thread to read data off the wire. - for command, f, batchable, fremote in states: + for command, f, batchable, decode in states: # Grab raw result off the wire and teach the internal future # about it. try: @@ -334,11 +313,8 @@ ) ) else: - fremote.set(remoteresult) - - # And ask the coroutine to decode that value. try: - result = next(batchable) + result = decode(remoteresult) except Exception: pycompat.future_set_exception_info(f, sys.exc_info()[1:]) else: @@ -369,87 +345,90 @@ @batchable def lookup(self, key): self.requirecap(b'lookup', _(b'look up remote revision')) - f = future() - yield {b'key': encoding.fromlocal(key)}, f - d = f.value - success, data = d[:-1].split(b" ", 1) - if int(success): - yield bin(data) - else: - self._abort(error.RepoError(data)) + + def decode(d): + success, data = d[:-1].split(b" ", 1) + if int(success): + return bin(data) + else: + self._abort(error.RepoError(data)) + + return {b'key': encoding.fromlocal(key)}, decode @batchable def heads(self): - f = future() - yield {}, f - d = f.value - try: - yield wireprototypes.decodelist(d[:-1]) - except ValueError: - self._abort(error.ResponseError(_(b"unexpected response:"), d)) + def decode(d): + try: + return wireprototypes.decodelist(d[:-1]) + except ValueError: + self._abort(error.ResponseError(_(b"unexpected response:"), d)) + + return {}, decode @batchable def known(self, nodes): - f = future() - yield {b'nodes': wireprototypes.encodelist(nodes)}, f - d = f.value - try: - yield [bool(int(b)) for b in pycompat.iterbytestr(d)] - except ValueError: - self._abort(error.ResponseError(_(b"unexpected response:"), d)) + def decode(d): + try: + return [bool(int(b)) for b in pycompat.iterbytestr(d)] + except ValueError: + self._abort(error.ResponseError(_(b"unexpected response:"), d)) + + return {b'nodes': wireprototypes.encodelist(nodes)}, decode @batchable def branchmap(self): - f = future() - yield {}, f - d = f.value - try: - branchmap = {} - for branchpart in d.splitlines(): - branchname, branchheads = branchpart.split(b' ', 1) - branchname = encoding.tolocal(urlreq.unquote(branchname)) - branchheads = wireprototypes.decodelist(branchheads) - branchmap[branchname] = branchheads - yield branchmap - except TypeError: - self._abort(error.ResponseError(_(b"unexpected response:"), d)) + def decode(d): + try: + branchmap = {} + for branchpart in d.splitlines(): + branchname, branchheads = branchpart.split(b' ', 1) + branchname = encoding.tolocal(urlreq.unquote(branchname)) + branchheads = wireprototypes.decodelist(branchheads) + branchmap[branchname] = branchheads + return branchmap + except TypeError: + self._abort(error.ResponseError(_(b"unexpected response:"), d)) + + return {}, decode @batchable def listkeys(self, namespace): if not self.capable(b'pushkey'): - yield {}, None - f = future() + return {}, None self.ui.debug(b'preparing listkeys for "%s"\n' % namespace) - yield {b'namespace': encoding.fromlocal(namespace)}, f - d = f.value - self.ui.debug( - b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) - ) - yield pushkeymod.decodekeys(d) + + def decode(d): + self.ui.debug( + b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) + ) + return pushkeymod.decodekeys(d) + + return {b'namespace': encoding.fromlocal(namespace)}, decode @batchable def pushkey(self, namespace, key, old, new): if not self.capable(b'pushkey'): - yield False, None - f = future() + return False, None self.ui.debug(b'preparing pushkey for "%s:%s"\n' % (namespace, key)) - yield { + + def decode(d): + d, output = d.split(b'\n', 1) + try: + d = bool(int(d)) + except ValueError: + raise error.ResponseError( + _(b'push failed (unexpected response):'), d + ) + for l in output.splitlines(True): + self.ui.status(_(b'remote: '), l) + return d + + return { b'namespace': encoding.fromlocal(namespace), b'key': encoding.fromlocal(key), b'old': encoding.fromlocal(old), b'new': encoding.fromlocal(new), - }, f - d = f.value - d, output = d.split(b'\n', 1) - try: - d = bool(int(d)) - except ValueError: - raise error.ResponseError( - _(b'push failed (unexpected response):'), d - ) - for l in output.splitlines(True): - self.ui.status(_(b'remote: '), l) - yield d + }, decode def stream_out(self): return self._callstream(b'stream_out')
--- a/mercurial/wireprotov2server.py Tue Sep 21 18:18:56 2021 +0200 +++ b/mercurial/wireprotov2server.py Tue Sep 28 09:40:57 2021 +0200 @@ -1579,7 +1579,7 @@ # TODO this is a bunch of storage layer interface abstractions because # it assumes revlogs. - for rl_type, name, encodedname, size in topfiles: + for rl_type, name, size in topfiles: # XXX use the `rl_type` for that if b'changelog' in files and name.startswith(b'00changelog'): pass
--- a/rust/Cargo.lock Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/Cargo.lock Tue Sep 28 09:40:57 2021 +0200 @@ -1,5 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. + [[package]] name = "adler" version = "0.2.3" @@ -386,7 +387,7 @@ "itertools", "lazy_static", "log", - "memmap", + "memmap2", "micro-timer", "pretty_assertions", "rand", @@ -396,6 +397,7 @@ "regex", "same-file", "sha-1", + "stable_deref_trait", "tempfile", "twox-hash", "zstd", @@ -411,6 +413,7 @@ "hg-core", "libc", "log", + "stable_deref_trait", ] [[package]] @@ -508,13 +511,13 @@ checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] -name = "memmap" -version = "0.7.0" +name = "memmap2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +checksum = "de5d3112c080d58ce560081baeaab7e1e864ca21795ddbf533d5b1842bb1ecf8" dependencies = [ "libc", - "winapi", + "stable_deref_trait", ] [[package]] @@ -865,6 +868,12 @@ ] [[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index"
--- a/rust/hg-core/Cargo.toml Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/Cargo.toml Tue Sep 28 09:40:57 2021 +0200 @@ -24,11 +24,12 @@ sha-1 = "0.9.6" twox-hash = "1.5.0" same-file = "1.0.6" +stable_deref_trait = "1.2.0" tempfile = "3.1.0" crossbeam-channel = "0.4" micro-timer = "0.3.0" log = "0.4.8" -memmap = "0.7.0" +memmap2 = {version = "0.4", features = ["stable_deref_trait"]} zstd = "0.5.3" format-bytes = "0.2.2"
--- a/rust/hg-core/examples/nodemap/index.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/examples/nodemap/index.rs Tue Sep 28 09:40:57 2021 +0200 @@ -5,7 +5,7 @@ //! Minimal `RevlogIndex`, readable from standard Mercurial file format use hg::*; -use memmap::*; +use memmap2::*; use std::fs::File; use std::ops::Deref; use std::path::Path;
--- a/rust/hg-core/examples/nodemap/main.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/examples/nodemap/main.rs Tue Sep 28 09:40:57 2021 +0200 @@ -7,7 +7,7 @@ use hg::revlog::node::*; use hg::revlog::nodemap::*; use hg::revlog::*; -use memmap::MmapOptions; +use memmap2::MmapOptions; use rand::Rng; use std::fs::File; use std::io;
--- a/rust/hg-core/src/config/config.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/config/config.rs Tue Sep 28 09:40:57 2021 +0200 @@ -13,7 +13,6 @@ ConfigError, ConfigLayer, ConfigOrigin, ConfigValue, }; use crate::utils::files::get_bytes_from_os_str; -use crate::utils::SliceExt; use format_bytes::{write_bytes, DisplayBytes}; use std::collections::HashSet; use std::env; @@ -362,30 +361,14 @@ Ok(self.get_option(section, item)?.unwrap_or(false)) } - /// Returns the corresponding list-value in the config if found, or `None`. - /// - /// This is appropriate for new configuration keys. The value syntax is - /// **not** the same as most existing list-valued config, which has Python - /// parsing implemented in `parselist()` in - /// `mercurial/utils/stringutil.py`. Faithfully porting that parsing - /// algorithm to Rust (including behavior that are arguably bugs) - /// turned out to be non-trivial and hasn’t been completed as of this - /// writing. - /// - /// Instead, the "simple" syntax is: split on comma, then trim leading and - /// trailing whitespace of each component. Quotes or backslashes are not - /// interpreted in any way. Commas are mandatory between values. Values - /// that contain a comma are not supported. - pub fn get_simple_list( + /// If there is an `item` value in `section`, parse and return a list of + /// byte strings. + pub fn get_list( &self, section: &[u8], item: &[u8], - ) -> Option<impl Iterator<Item = &[u8]>> { - self.get(section, item).map(|value| { - value - .split(|&byte| byte == b',') - .map(|component| component.trim()) - }) + ) -> Option<Vec<Vec<u8>>> { + self.get(section, item).map(values::parse_list) } /// Returns the raw value bytes of the first one found, or `None`.
--- a/rust/hg-core/src/config/values.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/config/values.rs Tue Sep 28 09:40:57 2021 +0200 @@ -8,6 +8,8 @@ //! details about where the value came from (but omits details of what’s //! invalid inside the value). +use crate::utils::SliceExt; + pub(super) fn parse_bool(v: &[u8]) -> Option<bool> { match v.to_ascii_lowercase().as_slice() { b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true), @@ -42,6 +44,216 @@ value.parse().ok() } +/// Parse a config value as a list of sub-values. +/// +/// Ported from `parselist` in `mercurial/utils/stringutil.py` + +// Note: keep behavior in sync with the Python one. + +// Note: this could return `Vec<Cow<[u8]>>` instead and borrow `input` when +// possible (when there’s no backslash-escapes) but this is probably not worth +// the complexity as config is presumably not accessed inside +// preformance-sensitive loops. +pub(super) fn parse_list(input: &[u8]) -> Vec<Vec<u8>> { + // Port of Python’s `value.lstrip(b' ,\n')` + // TODO: is this really what we want? + let input = + input.trim_start_matches(|b| b == b' ' || b == b',' || b == b'\n'); + parse_list_without_trim_start(input) +} + +fn parse_list_without_trim_start(input: &[u8]) -> Vec<Vec<u8>> { + // Start of port of Python’s `_configlist` + let input = input.trim_end_matches(|b| b == b' ' || b == b','); + if input.is_empty() { + return Vec::new(); + } + + // Just to make “a string” less confusable with “a list of strings”. + type ByteString = Vec<u8>; + + // These correspond to Python’s… + let mut mode = ParserMode::Plain; // `parser` + let mut values = Vec::new(); // `parts[:-1]` + let mut next_value = ByteString::new(); // `parts[-1]` + let mut offset = 0; // `offset` + + // Setting `parser` to `None` is instead handled by returning immediately + enum ParserMode { + Plain, + Quoted, + } + + loop { + match mode { + ParserMode::Plain => { + // Start of port of Python’s `_parse_plain` + let mut whitespace = false; + while let Some(&byte) = input.get(offset) { + if is_space(byte) || byte == b',' { + whitespace = true; + offset += 1; + } else { + break; + } + } + if let Some(&byte) = input.get(offset) { + if whitespace { + values.push(std::mem::take(&mut next_value)) + } + if byte == b'"' && next_value.is_empty() { + mode = ParserMode::Quoted; + } else { + if byte == b'"' && next_value.ends_with(b"\\") { + next_value.pop(); + } + next_value.push(byte); + } + offset += 1; + } else { + values.push(next_value); + return values; + } + } + ParserMode::Quoted => { + // Start of port of Python’s `_parse_quote` + if let Some(&byte) = input.get(offset) { + if byte == b'"' { + // The input contains a quoted zero-length value `""` + debug_assert_eq!(next_value, b""); + values.push(std::mem::take(&mut next_value)); + offset += 1; + while let Some(&byte) = input.get(offset) { + if is_space(byte) || byte == b',' { + offset += 1; + } else { + break; + } + } + mode = ParserMode::Plain; + continue; + } + } + + while let Some(&byte) = input.get(offset) { + if byte == b'"' { + break; + } + if byte == b'\\' && input.get(offset + 1) == Some(&b'"') { + next_value.push(b'"'); + offset += 2; + } else { + next_value.push(byte); + offset += 1; + } + } + + if offset >= input.len() { + // We didn’t find a closing double-quote, + // so treat the opening one as part of an unquoted value + // instead of delimiting the start of a quoted value. + + // `next_value` may have had some backslash-escapes + // unescaped. TODO: shouldn’t we use a slice of `input` + // instead? + let mut real_values = + parse_list_without_trim_start(&next_value); + + if let Some(first) = real_values.first_mut() { + first.insert(0, b'"'); + // Drop `next_value` + values.extend(real_values) + } else { + next_value.push(b'"'); + values.push(next_value); + } + return values; + } + + // We’re not at the end of the input, which means the `while` + // loop above ended at at double quote. Skip + // over that. + offset += 1; + + while let Some(&byte) = input.get(offset) { + if byte == b' ' || byte == b',' { + offset += 1; + } else { + break; + } + } + + if offset >= input.len() { + values.push(next_value); + return values; + } + + if offset + 1 == input.len() && input[offset] == b'"' { + next_value.push(b'"'); + offset += 1; + } else { + values.push(std::mem::take(&mut next_value)); + } + + mode = ParserMode::Plain; + } + } + } + + // https://docs.python.org/3/library/stdtypes.html?#bytes.isspace + fn is_space(byte: u8) -> bool { + if let b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c' = byte { + true + } else { + false + } + } +} + +#[test] +fn test_parse_list() { + // Make `assert_eq` error messages nicer + fn as_strings(values: &[Vec<u8>]) -> Vec<String> { + values + .iter() + .map(|v| std::str::from_utf8(v.as_ref()).unwrap().to_owned()) + .collect() + } + macro_rules! assert_parse_list { + ( $input: expr => [ $( $output: expr ),* ] ) => { + assert_eq!( + as_strings(&parse_list($input)), + as_strings(&[ $( Vec::from(&$output[..]) ),* ]), + ); + } + } + + // Keep these Rust tests in sync with the Python ones in + // `tests/test-config-parselist.py` + assert_parse_list!(b"" => []); + assert_parse_list!(b"," => []); + assert_parse_list!(b"A" => [b"A"]); + assert_parse_list!(b"B,B" => [b"B", b"B"]); + assert_parse_list!(b", C, ,C," => [b"C", b"C"]); + assert_parse_list!(b"\"" => [b"\""]); + assert_parse_list!(b"\"\"" => [b"", b""]); + assert_parse_list!(b"D,\"" => [b"D", b"\""]); + assert_parse_list!(b"E,\"\"" => [b"E", b"", b""]); + assert_parse_list!(b"\"F,F\"" => [b"F,F"]); + assert_parse_list!(b"\"G,G" => [b"\"G", b"G"]); + assert_parse_list!(b"\"H \\\",\\\"H" => [b"\"H", b",", b"H"]); + assert_parse_list!(b"I,I\"" => [b"I", b"I\""]); + assert_parse_list!(b"J,\"J" => [b"J", b"\"J"]); + assert_parse_list!(b"K K" => [b"K", b"K"]); + assert_parse_list!(b"\"K\" K" => [b"K", b"K"]); + assert_parse_list!(b"L\tL" => [b"L", b"L"]); + assert_parse_list!(b"\"L\"\tL" => [b"L", b"", b"L"]); + assert_parse_list!(b"M\x0bM" => [b"M", b"M"]); + assert_parse_list!(b"\"M\"\x0bM" => [b"M", b"", b"M"]); + assert_parse_list!(b"\"N\" , ,\"" => [b"N\""]); + assert_parse_list!(b"\" ,O, " => [b"\"", b"O"]); +} + #[test] fn test_parse_byte_size() { assert_eq!(parse_byte_size(b""), None);
--- a/rust/hg-core/src/dirstate.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate.rs Tue Sep 28 09:40:57 2021 +0200 @@ -6,20 +6,21 @@ // GNU General Public License version 2 or any later version. use crate::dirstate_tree::on_disk::DirstateV2ParseError; -use crate::errors::HgError; use crate::revlog::node::NULL_NODE; use crate::revlog::Node; use crate::utils::hg_path::{HgPath, HgPathBuf}; use crate::FastHashMap; -use bytes_cast::{unaligned, BytesCast}; -use std::convert::TryFrom; +use bytes_cast::BytesCast; pub mod dirs_multiset; pub mod dirstate_map; +pub mod entry; pub mod parsers; pub mod status; -#[derive(Debug, PartialEq, Clone, BytesCast)] +pub use self::entry::*; + +#[derive(Debug, PartialEq, Copy, Clone, BytesCast)] #[repr(C)] pub struct DirstateParents { pub p1: Node, @@ -33,68 +34,6 @@ }; } -/// The C implementation uses all signed types. This will be an issue -/// either when 4GB+ source files are commonplace or in 2038, whichever -/// comes first. -#[derive(Debug, PartialEq, Copy, Clone)] -pub struct DirstateEntry { - pub state: EntryState, - pub mode: i32, - pub mtime: i32, - pub size: i32, -} - -impl DirstateEntry { - pub fn is_non_normal(&self) -> bool { - self.state != EntryState::Normal || self.mtime == MTIME_UNSET - } - - pub fn is_from_other_parent(&self) -> bool { - self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT - } - - // TODO: other platforms - #[cfg(unix)] - pub fn mode_changed( - &self, - filesystem_metadata: &std::fs::Metadata, - ) -> bool { - use std::os::unix::fs::MetadataExt; - const EXEC_BIT_MASK: u32 = 0o100; - let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK; - let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK; - dirstate_exec_bit != fs_exec_bit - } - - /// Returns a `(state, mode, size, mtime)` tuple as for - /// `DirstateMapMethods::debug_iter`. - pub fn debug_tuple(&self) -> (u8, i32, i32, i32) { - (self.state.into(), self.mode, self.size, self.mtime) - } -} - -#[derive(BytesCast)] -#[repr(C)] -struct RawEntry { - state: u8, - mode: unaligned::I32Be, - size: unaligned::I32Be, - mtime: unaligned::I32Be, - length: unaligned::I32Be, -} - -pub const V1_RANGEMASK: i32 = 0x7FFFFFFF; - -pub const MTIME_UNSET: i32 = -1; - -/// A `DirstateEntry` with a size of `-2` means that it was merged from the -/// other parent. This allows revert to pick the right status back during a -/// merge. -pub const SIZE_FROM_OTHER_PARENT: i32 = -2; -/// A special value used for internal representation of special case in -/// dirstate v1 format. -pub const SIZE_NON_NORMAL: i32 = -1; - pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>; pub type StateMapIter<'a> = Box< dyn Iterator< @@ -109,52 +48,3 @@ + Send + 'a, >; - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum EntryState { - Normal, - Added, - Removed, - Merged, - Unknown, -} - -impl EntryState { - pub fn is_tracked(self) -> bool { - use EntryState::*; - match self { - Normal | Added | Merged => true, - Removed | Unknown => false, - } - } -} - -impl TryFrom<u8> for EntryState { - type Error = HgError; - - fn try_from(value: u8) -> Result<Self, Self::Error> { - match value { - b'n' => Ok(EntryState::Normal), - b'a' => Ok(EntryState::Added), - b'r' => Ok(EntryState::Removed), - b'm' => Ok(EntryState::Merged), - b'?' => Ok(EntryState::Unknown), - _ => Err(HgError::CorruptedRepository(format!( - "Incorrect dirstate entry state {}", - value - ))), - } - } -} - -impl Into<u8> for EntryState { - fn into(self) -> u8 { - match self { - EntryState::Normal => b'n', - EntryState::Added => b'a', - EntryState::Removed => b'r', - EntryState::Merged => b'm', - EntryState::Unknown => b'?', - } - } -}
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate/dirs_multiset.rs Tue Sep 28 09:40:57 2021 +0200 @@ -33,7 +33,7 @@ /// If `skip_state` is provided, skips dirstate entries with equal state. pub fn from_dirstate<I, P>( dirstate: I, - skip_state: Option<EntryState>, + only_tracked: bool, ) -> Result<Self, DirstateError> where I: IntoIterator< @@ -48,8 +48,8 @@ let (filename, entry) = item?; let filename = filename.as_ref(); // This `if` is optimized out of the loop - if let Some(skip) = skip_state { - if skip != entry.state { + if only_tracked { + if entry.state() != EntryState::Removed { multiset.add_path(filename)?; } } else { @@ -343,7 +343,7 @@ let new = DirsMultiset::from_dirstate( StateMap::default().into_iter().map(Ok), - None, + false, ) .unwrap(); let expected = DirsMultiset { @@ -372,12 +372,7 @@ let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| { Ok(( HgPathBuf::from_bytes(f.as_bytes()), - DirstateEntry { - state: EntryState::Normal, - mode: 0, - mtime: 0, - size: 0, - }, + DirstateEntry::from_v1_data(EntryState::Normal, 0, 0, 0), )) }); let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)] @@ -385,7 +380,7 @@ .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v)) .collect(); - let new = DirsMultiset::from_dirstate(input_map, None).unwrap(); + let new = DirsMultiset::from_dirstate(input_map, false).unwrap(); let expected = DirsMultiset { inner: expected_inner, }; @@ -404,24 +399,17 @@ .map(|(f, state)| { Ok(( HgPathBuf::from_bytes(f.as_bytes()), - DirstateEntry { - state: *state, - mode: 0, - mtime: 0, - size: 0, - }, + DirstateEntry::from_v1_data(*state, 0, 0, 0), )) }); // "a" incremented with "a/c" and "a/d/" - let expected_inner = [("", 1), ("a", 2)] + let expected_inner = [("", 1), ("a", 3)] .iter() .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v)) .collect(); - let new = - DirsMultiset::from_dirstate(input_map, Some(EntryState::Normal)) - .unwrap(); + let new = DirsMultiset::from_dirstate(input_map, true).unwrap(); let expected = DirsMultiset { inner: expected_inner, };
--- a/rust/hg-core/src/dirstate/dirstate_map.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate/dirstate_map.rs Tue Sep 28 09:40:57 2021 +0200 @@ -79,45 +79,45 @@ from_p2: bool, possibly_dirty: bool, ) -> Result<(), DirstateError> { - let mut entry = entry; + let state; + let size; + let mtime; if added { - assert!(!merged); assert!(!possibly_dirty); assert!(!from_p2); - entry.state = EntryState::Added; - entry.size = SIZE_NON_NORMAL; - entry.mtime = MTIME_UNSET; + state = EntryState::Added; + size = SIZE_NON_NORMAL; + mtime = MTIME_UNSET; } else if merged { assert!(!possibly_dirty); assert!(!from_p2); - entry.state = EntryState::Merged; - entry.size = SIZE_FROM_OTHER_PARENT; - entry.mtime = MTIME_UNSET; + state = EntryState::Merged; + size = SIZE_FROM_OTHER_PARENT; + mtime = MTIME_UNSET; } else if from_p2 { assert!(!possibly_dirty); - entry.state = EntryState::Normal; - entry.size = SIZE_FROM_OTHER_PARENT; - entry.mtime = MTIME_UNSET; + state = EntryState::Normal; + size = SIZE_FROM_OTHER_PARENT; + mtime = MTIME_UNSET; } else if possibly_dirty { - entry.state = EntryState::Normal; - entry.size = SIZE_NON_NORMAL; - entry.mtime = MTIME_UNSET; + state = EntryState::Normal; + size = SIZE_NON_NORMAL; + mtime = MTIME_UNSET; } else { - entry.state = EntryState::Normal; - entry.size = entry.size & V1_RANGEMASK; - entry.mtime = entry.mtime & V1_RANGEMASK; + state = EntryState::Normal; + size = entry.size() & V1_RANGEMASK; + mtime = entry.mtime() & V1_RANGEMASK; } - let old_state = match self.get(filename) { - Some(e) => e.state, - None => EntryState::Unknown, - }; - if old_state == EntryState::Unknown || old_state == EntryState::Removed - { + let mode = entry.mode(); + let entry = DirstateEntry::from_v1_data(state, mode, size, mtime); + + let old_state = self.get(filename).map(|e| e.state()); + if old_state.is_none() || old_state == Some(EntryState::Removed) { if let Some(ref mut dirs) = self.dirs { dirs.add_path(filename)?; } } - if old_state == EntryState::Unknown { + if old_state.is_none() { if let Some(ref mut all_dirs) = self.all_dirs { all_dirs.add_path(filename)?; } @@ -149,10 +149,7 @@ in_merge: bool, ) -> Result<(), DirstateError> { let old_entry_opt = self.get(filename); - let old_state = match old_entry_opt { - Some(e) => e.state, - None => EntryState::Unknown, - }; + let old_state = old_entry_opt.map(|e| e.state()); let mut size = 0; if in_merge { // XXX we should not be able to have 'm' state and 'FROM_P2' if not @@ -161,10 +158,10 @@ // would be nice. if let Some(old_entry) = old_entry_opt { // backup the previous state - if old_entry.state == EntryState::Merged { + if old_entry.state() == EntryState::Merged { size = SIZE_NON_NORMAL; - } else if old_entry.state == EntryState::Normal - && old_entry.size == SIZE_FROM_OTHER_PARENT + } else if old_entry.state() == EntryState::Normal + && old_entry.size() == SIZE_FROM_OTHER_PARENT { // other parent size = SIZE_FROM_OTHER_PARENT; @@ -174,13 +171,12 @@ } } } - if old_state != EntryState::Unknown && old_state != EntryState::Removed - { + if old_state.is_some() && old_state != Some(EntryState::Removed) { if let Some(ref mut dirs) = self.dirs { dirs.delete_path(filename)?; } } - if old_state == EntryState::Unknown { + if old_state.is_none() { if let Some(ref mut all_dirs) = self.all_dirs { all_dirs.add_path(filename)?; } @@ -189,15 +185,8 @@ self.copy_map.remove(filename); } - self.state_map.insert( - filename.to_owned(), - DirstateEntry { - state: EntryState::Removed, - mode: 0, - size, - mtime: 0, - }, - ); + self.state_map + .insert(filename.to_owned(), DirstateEntry::new_removed(size)); self.get_non_normal_other_parent_entries() .0 .insert(filename.to_owned()); @@ -210,14 +199,11 @@ &mut self, filename: &HgPath, ) -> Result<bool, DirstateError> { - let old_state = match self.get(filename) { - Some(e) => e.state, - None => EntryState::Unknown, - }; + let old_state = self.get(filename).map(|e| e.state()); let exists = self.state_map.remove(filename).is_some(); if exists { - if old_state != EntryState::Removed { + if old_state != Some(EntryState::Removed) { if let Some(ref mut dirs) = self.dirs { dirs.delete_path(filename)?; } @@ -334,7 +320,7 @@ if self.all_dirs.is_none() { self.all_dirs = Some(DirsMultiset::from_dirstate( self.state_map.iter().map(|(k, v)| Ok((k, *v))), - None, + false, )?); } Ok(()) @@ -344,7 +330,7 @@ if self.dirs.is_none() { self.dirs = Some(DirsMultiset::from_dirstate( self.state_map.iter().map(|(k, v)| Ok((k, *v))), - Some(EntryState::Removed), + true, )?); } Ok(()) @@ -428,12 +414,7 @@ map.add_file( HgPath::new(b"meh"), - DirstateEntry { - state: EntryState::Normal, - mode: 1337, - mtime: 1337, - size: 1337, - }, + DirstateEntry::from_v1_data(EntryState::Normal, 1337, 1337, 1337), false, false, false, @@ -465,12 +446,7 @@ .map(|(fname, (state, mode, size, mtime))| { ( HgPathBuf::from_bytes(fname.as_ref()), - DirstateEntry { - state: *state, - mode: *mode, - size: *size, - mtime: *mtime, - }, + DirstateEntry::from_v1_data(*state, *mode, *size, *mtime), ) }) .collect();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/dirstate/entry.rs Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,190 @@ +use crate::errors::HgError; +use std::convert::TryFrom; + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum EntryState { + Normal, + Added, + Removed, + Merged, +} + +/// The C implementation uses all signed types. This will be an issue +/// either when 4GB+ source files are commonplace or in 2038, whichever +/// comes first. +#[derive(Debug, PartialEq, Copy, Clone)] +pub struct DirstateEntry { + state: EntryState, + mode: i32, + size: i32, + mtime: i32, +} + +pub const V1_RANGEMASK: i32 = 0x7FFFFFFF; + +pub const MTIME_UNSET: i32 = -1; + +/// A `DirstateEntry` with a size of `-2` means that it was merged from the +/// other parent. This allows revert to pick the right status back during a +/// merge. +pub const SIZE_FROM_OTHER_PARENT: i32 = -2; +/// A special value used for internal representation of special case in +/// dirstate v1 format. +pub const SIZE_NON_NORMAL: i32 = -1; + +impl DirstateEntry { + pub fn from_v1_data( + state: EntryState, + mode: i32, + size: i32, + mtime: i32, + ) -> Self { + Self { + state, + mode, + size, + mtime, + } + } + + /// Creates a new entry in "removed" state. + /// + /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or + /// `SIZE_FROM_OTHER_PARENT` + pub fn new_removed(size: i32) -> Self { + Self { + state: EntryState::Removed, + mode: 0, + size, + mtime: 0, + } + } + + /// TODO: refactor `DirstateMap::add_file` to not take a `DirstateEntry` + /// parameter and remove this constructor + pub fn new_for_add_file(mode: i32, size: i32, mtime: i32) -> Self { + Self { + // XXX Arbitrary default value since the value is determined later + state: EntryState::Normal, + mode, + size, + mtime, + } + } + + pub fn state(&self) -> EntryState { + self.state + } + + pub fn mode(&self) -> i32 { + self.mode + } + + pub fn size(&self) -> i32 { + self.size + } + + pub fn mtime(&self) -> i32 { + self.mtime + } + + /// Returns `(state, mode, size, mtime)` for the puprose of serialization + /// in the dirstate-v1 format. + /// + /// This includes marker values such as `mtime == -1`. In the future we may + /// want to not represent these cases that way in memory, but serialization + /// will need to keep the same format. + pub fn v1_data(&self) -> (u8, i32, i32, i32) { + (self.state.into(), self.mode, self.size, self.mtime) + } + + pub fn is_non_normal(&self) -> bool { + self.state != EntryState::Normal || self.mtime == MTIME_UNSET + } + + pub fn is_from_other_parent(&self) -> bool { + self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT + } + + // TODO: other platforms + #[cfg(unix)] + pub fn mode_changed( + &self, + filesystem_metadata: &std::fs::Metadata, + ) -> bool { + use std::os::unix::fs::MetadataExt; + const EXEC_BIT_MASK: u32 = 0o100; + let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK; + let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK; + dirstate_exec_bit != fs_exec_bit + } + + /// Returns a `(state, mode, size, mtime)` tuple as for + /// `DirstateMapMethods::debug_iter`. + pub fn debug_tuple(&self) -> (u8, i32, i32, i32) { + (self.state.into(), self.mode, self.size, self.mtime) + } + + pub fn mtime_is_ambiguous(&self, now: i32) -> bool { + self.state == EntryState::Normal && self.mtime == now + } + + pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool { + let ambiguous = self.mtime_is_ambiguous(now); + if ambiguous { + // The file was last modified "simultaneously" with the current + // write to dirstate (i.e. within the same second for file- + // systems with a granularity of 1 sec). This commonly happens + // for at least a couple of files on 'update'. + // The user could change the file without changing its size + // within the same second. Invalidate the file's mtime in + // dirstate, forcing future 'status' calls to compare the + // contents of the file if the size is the same. This prevents + // mistakenly treating such files as clean. + self.clear_mtime() + } + ambiguous + } + + pub fn clear_mtime(&mut self) { + self.mtime = -1; + } +} + +impl EntryState { + pub fn is_tracked(self) -> bool { + use EntryState::*; + match self { + Normal | Added | Merged => true, + Removed => false, + } + } +} + +impl TryFrom<u8> for EntryState { + type Error = HgError; + + fn try_from(value: u8) -> Result<Self, Self::Error> { + match value { + b'n' => Ok(EntryState::Normal), + b'a' => Ok(EntryState::Added), + b'r' => Ok(EntryState::Removed), + b'm' => Ok(EntryState::Merged), + _ => Err(HgError::CorruptedRepository(format!( + "Incorrect dirstate entry state {}", + value + ))), + } + } +} + +impl Into<u8> for EntryState { + fn into(self) -> u8 { + match self { + EntryState::Normal => b'n', + EntryState::Added => b'a', + EntryState::Removed => b'r', + EntryState::Merged => b'm', + } + } +}
--- a/rust/hg-core/src/dirstate/parsers.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate/parsers.rs Tue Sep 28 09:40:57 2021 +0200 @@ -6,11 +6,11 @@ use crate::errors::HgError; use crate::utils::hg_path::HgPath; use crate::{ - dirstate::{CopyMap, EntryState, RawEntry, StateMap}, + dirstate::{CopyMap, EntryState, StateMap}, DirstateEntry, DirstateParents, }; use byteorder::{BigEndian, WriteBytesExt}; -use bytes_cast::BytesCast; +use bytes_cast::{unaligned, BytesCast}; use micro_timer::timed; use std::convert::{TryFrom, TryInto}; @@ -48,6 +48,16 @@ Ok((parents, entries, copies)) } +#[derive(BytesCast)] +#[repr(C)] +struct RawEntry { + state: u8, + mode: unaligned::I32Be, + size: unaligned::I32Be, + mtime: unaligned::I32Be, + length: unaligned::I32Be, +} + pub fn parse_dirstate_entries<'a>( mut contents: &'a [u8], mut each_entry: impl FnMut( @@ -63,12 +73,12 @@ let (raw_entry, rest) = RawEntry::from_bytes(contents) .map_err(|_| HgError::corrupted("Overflow in dirstate."))?; - let entry = DirstateEntry { - state: EntryState::try_from(raw_entry.state)?, - mode: raw_entry.mode.get(), - mtime: raw_entry.mtime.get(), - size: raw_entry.size.get(), - }; + let entry = DirstateEntry::from_v1_data( + EntryState::try_from(raw_entry.state)?, + raw_entry.mode.get(), + raw_entry.size.get(), + raw_entry.mtime.get(), + ); let (paths, rest) = u8::slice_from_bytes(rest, raw_entry.length.get() as usize) .map_err(|_| HgError::corrupted("Overflow in dirstate."))?; @@ -114,12 +124,13 @@ packed: &mut Vec<u8>, ) { let length = packed_filename_and_copy_source_size(filename, copy_source); + let (state, mode, size, mtime) = entry.v1_data(); // Unwrapping because `impl std::io::Write for Vec<u8>` never errors - packed.write_u8(entry.state.into()).unwrap(); - packed.write_i32::<BigEndian>(entry.mode).unwrap(); - packed.write_i32::<BigEndian>(entry.size).unwrap(); - packed.write_i32::<BigEndian>(entry.mtime).unwrap(); + packed.write_u8(state).unwrap(); + packed.write_i32::<BigEndian>(mode).unwrap(); + packed.write_i32::<BigEndian>(size).unwrap(); + packed.write_i32::<BigEndian>(mtime).unwrap(); packed.write_i32::<BigEndian>(length as i32).unwrap(); packed.extend(filename.as_bytes()); if let Some(source) = copy_source { @@ -131,33 +142,6 @@ /// Seconds since the Unix epoch pub struct Timestamp(pub i64); -impl DirstateEntry { - pub fn mtime_is_ambiguous(&self, now: i32) -> bool { - self.state == EntryState::Normal && self.mtime == now - } - - pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool { - let ambiguous = self.mtime_is_ambiguous(now); - if ambiguous { - // The file was last modified "simultaneously" with the current - // write to dirstate (i.e. within the same second for file- - // systems with a granularity of 1 sec). This commonly happens - // for at least a couple of files on 'update'. - // The user could change the file without changing its size - // within the same second. Invalidate the file's mtime in - // dirstate, forcing future 'status' calls to compare the - // contents of the file if the size is the same. This prevents - // mistakenly treating such files as clean. - self.clear_mtime() - } - ambiguous - } - - pub fn clear_mtime(&mut self) { - self.mtime = -1; - } -} - pub fn pack_dirstate( state_map: &mut StateMap, copy_map: &CopyMap, @@ -229,12 +213,12 @@ fn test_pack_dirstate_one_entry() { let expected_state_map: StateMap = [( HgPathBuf::from_bytes(b"f1"), - DirstateEntry { - state: EntryState::Normal, - mode: 0o644, - size: 0, - mtime: 791231220, - }, + DirstateEntry::from_v1_data( + EntryState::Normal, + 0o644, + 0, + 791231220, + ), )] .iter() .cloned() @@ -266,12 +250,12 @@ fn test_pack_dirstate_one_entry_with_copy() { let expected_state_map: StateMap = [( HgPathBuf::from_bytes(b"f1"), - DirstateEntry { - state: EntryState::Normal, - mode: 0o644, - size: 0, - mtime: 791231220, - }, + DirstateEntry::from_v1_data( + EntryState::Normal, + 0o644, + 0, + 791231220, + ), )] .iter() .cloned() @@ -307,12 +291,12 @@ fn test_parse_pack_one_entry_with_copy() { let mut state_map: StateMap = [( HgPathBuf::from_bytes(b"f1"), - DirstateEntry { - state: EntryState::Normal, - mode: 0o644, - size: 0, - mtime: 791231220, - }, + DirstateEntry::from_v1_data( + EntryState::Normal, + 0o644, + 0, + 791231220, + ), )] .iter() .cloned() @@ -353,39 +337,34 @@ let mut state_map: StateMap = [ ( HgPathBuf::from_bytes(b"f1"), - DirstateEntry { - state: EntryState::Normal, - mode: 0o644, - size: 0, - mtime: 791231220, - }, + DirstateEntry::from_v1_data( + EntryState::Normal, + 0o644, + 0, + 791231220, + ), ), ( HgPathBuf::from_bytes(b"f2"), - DirstateEntry { - state: EntryState::Merged, - mode: 0o777, - size: 1000, - mtime: 791231220, - }, + DirstateEntry::from_v1_data( + EntryState::Merged, + 0o777, + 1000, + 791231220, + ), ), ( HgPathBuf::from_bytes(b"f3"), - DirstateEntry { - state: EntryState::Removed, - mode: 0o644, - size: 234553, - mtime: 791231220, - }, + DirstateEntry::from_v1_data( + EntryState::Removed, + 0o644, + 234553, + 791231220, + ), ), ( HgPathBuf::from_bytes(b"f4\xF6"), - DirstateEntry { - state: EntryState::Added, - mode: 0o644, - size: -1, - mtime: -1, - }, + DirstateEntry::from_v1_data(EntryState::Added, 0o644, -1, -1), ), ] .iter() @@ -431,12 +410,12 @@ fn test_parse_pack_one_entry_with_copy_and_time_conflict() { let mut state_map: StateMap = [( HgPathBuf::from_bytes(b"f1"), - DirstateEntry { - state: EntryState::Normal, - mode: 0o644, - size: 0, - mtime: 15000000, - }, + DirstateEntry::from_v1_data( + EntryState::Normal, + 0o644, + 0, + 15000000, + ), )] .iter() .cloned() @@ -471,12 +450,12 @@ &parents, [( HgPathBuf::from_bytes(b"f1"), - DirstateEntry { - state: EntryState::Normal, - mode: 0o644, - size: 0, - mtime: -1 - } + DirstateEntry::from_v1_data( + EntryState::Normal, + 0o644, + 0, + -1 + ) )] .iter() .cloned()
--- a/rust/hg-core/src/dirstate/status.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate/status.rs Tue Sep 28 09:40:57 2021 +0200 @@ -157,22 +157,19 @@ copy_map: &CopyMap, options: StatusOptions, ) -> Dispatch { - let DirstateEntry { - state, - mode, - mtime, - size, - } = entry; + match entry.state() { + EntryState::Normal => { + let mode = entry.mode(); + let size = entry.size(); + let mtime = entry.mtime(); - let HgMetadata { - st_mode, - st_size, - st_mtime, - .. - } = metadata; + let HgMetadata { + st_mode, + st_size, + st_mtime, + .. + } = metadata; - match state { - EntryState::Normal => { let size_changed = mod_compare(size, st_size as i32); let mode_changed = (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec; @@ -208,7 +205,6 @@ EntryState::Merged => Dispatch::Modified, EntryState::Added => Dispatch::Added, EntryState::Removed => Dispatch::Removed, - EntryState::Unknown => Dispatch::Unknown, } } @@ -221,8 +217,6 @@ } // File was removed, everything is normal EntryState::Removed => Dispatch::Removed, - // File is unknown to Mercurial, everything is normal - EntryState::Unknown => Dispatch::Unknown, } } @@ -473,7 +467,7 @@ if let Some(entry) = in_dmap { return Some(( Cow::Borrowed(normalized), - dispatch_missing(entry.state), + dispatch_missing(entry.state()), )); } } @@ -605,7 +599,10 @@ || self.matcher.matches(&filename) { files_sender - .send((filename.to_owned(), dispatch_missing(entry.state))) + .send(( + filename.to_owned(), + dispatch_missing(entry.state()), + )) .unwrap(); } } @@ -635,7 +632,7 @@ files_sender .send(( directory.to_owned(), - dispatch_missing(entry.state), + dispatch_missing(entry.state()), )) .unwrap(); } @@ -767,7 +764,7 @@ { ( Cow::Borrowed(filename), - dispatch_missing(entry.state), + dispatch_missing(entry.state()), ) } Ok(m) => ( @@ -791,7 +788,7 @@ // directory ( Cow::Borrowed(filename), - dispatch_missing(entry.state), + dispatch_missing(entry.state()), ) } Err(e) => { @@ -863,7 +860,7 @@ ) } // File doesn't exist - Err(_) => dispatch_missing(entry.state), + Err(_) => dispatch_missing(entry.state()), }, )) } else { @@ -871,7 +868,7 @@ // we, in this case, report as missing. Some(( Cow::Owned(filename.to_owned()), - dispatch_missing(entry.state), + dispatch_missing(entry.state()), )) } },
--- a/rust/hg-core/src/dirstate_tree.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate_tree.rs Tue Sep 28 09:40:57 2021 +0200 @@ -1,5 +1,7 @@ pub mod dirstate_map; pub mod dispatch; pub mod on_disk; +pub mod owning; +mod owning_dispatch; pub mod path_with_basename; pub mod status;
--- a/rust/hg-core/src/dirstate_tree/dirstate_map.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs Tue Sep 28 09:40:57 2021 +0200 @@ -328,7 +328,7 @@ ) -> Result<Option<EntryState>, DirstateV2ParseError> { match self { NodeRef::InMemory(_path, node) => { - Ok(node.data.as_entry().map(|entry| entry.state)) + Ok(node.data.as_entry().map(|entry| entry.state())) } NodeRef::OnDisk(node) => node.state(), } @@ -445,7 +445,7 @@ let parents = parse_dirstate_entries( map.on_disk, |path, entry, copy_source| { - let tracked = entry.state.is_tracked(); + let tracked = entry.state().is_tracked(); let node = Self::get_or_insert_node( map.on_disk, &mut map.unreachable_bytes, @@ -593,12 +593,13 @@ fn add_or_remove_file( &mut self, path: &HgPath, - old_state: EntryState, + old_state: Option<EntryState>, new_entry: DirstateEntry, ) -> Result<(), DirstateV2ParseError> { - let had_entry = old_state != EntryState::Unknown; + let had_entry = old_state.is_some(); + let was_tracked = old_state.map_or(false, |s| s.is_tracked()); let tracked_count_increment = - match (old_state.is_tracked(), new_entry.state.is_tracked()) { + match (was_tracked, new_entry.state().is_tracked()) { (false, true) => 1, (true, false) => -1, _ => 0, @@ -776,38 +777,39 @@ from_p2: bool, possibly_dirty: bool, ) -> Result<(), DirstateError> { - let mut entry = entry; + let state; + let size; + let mtime; if added { assert!(!possibly_dirty); assert!(!from_p2); - entry.state = EntryState::Added; - entry.size = SIZE_NON_NORMAL; - entry.mtime = MTIME_UNSET; + state = EntryState::Added; + size = SIZE_NON_NORMAL; + mtime = MTIME_UNSET; } else if merged { assert!(!possibly_dirty); assert!(!from_p2); - entry.state = EntryState::Merged; - entry.size = SIZE_FROM_OTHER_PARENT; - entry.mtime = MTIME_UNSET; + state = EntryState::Merged; + size = SIZE_FROM_OTHER_PARENT; + mtime = MTIME_UNSET; } else if from_p2 { assert!(!possibly_dirty); - entry.state = EntryState::Normal; - entry.size = SIZE_FROM_OTHER_PARENT; - entry.mtime = MTIME_UNSET; + state = EntryState::Normal; + size = SIZE_FROM_OTHER_PARENT; + mtime = MTIME_UNSET; } else if possibly_dirty { - entry.state = EntryState::Normal; - entry.size = SIZE_NON_NORMAL; - entry.mtime = MTIME_UNSET; + state = EntryState::Normal; + size = SIZE_NON_NORMAL; + mtime = MTIME_UNSET; } else { - entry.state = EntryState::Normal; - entry.size = entry.size & V1_RANGEMASK; - entry.mtime = entry.mtime & V1_RANGEMASK; + state = EntryState::Normal; + size = entry.size() & V1_RANGEMASK; + mtime = entry.mtime() & V1_RANGEMASK; } + let mode = entry.mode(); + let entry = DirstateEntry::from_v1_data(state, mode, size, mtime); - let old_state = match self.get(filename)? { - Some(e) => e.state, - None => EntryState::Unknown, - }; + let old_state = self.get(filename)?.map(|e| e.state()); Ok(self.add_or_remove_file(filename, old_state, entry)?) } @@ -818,10 +820,7 @@ in_merge: bool, ) -> Result<(), DirstateError> { let old_entry_opt = self.get(filename)?; - let old_state = match old_entry_opt { - Some(e) => e.state, - None => EntryState::Unknown, - }; + let old_state = old_entry_opt.map(|e| e.state()); let mut size = 0; if in_merge { // XXX we should not be able to have 'm' state and 'FROM_P2' if not @@ -830,10 +829,10 @@ // would be nice. if let Some(old_entry) = old_entry_opt { // backup the previous state - if old_entry.state == EntryState::Merged { + if old_entry.state() == EntryState::Merged { size = SIZE_NON_NORMAL; - } else if old_entry.state == EntryState::Normal - && old_entry.size == SIZE_FROM_OTHER_PARENT + } else if old_entry.state() == EntryState::Normal + && old_entry.size() == SIZE_FROM_OTHER_PARENT { // other parent size = SIZE_FROM_OTHER_PARENT; @@ -843,20 +842,14 @@ if size == 0 { self.copy_map_remove(filename)?; } - let entry = DirstateEntry { - state: EntryState::Removed, - mode: 0, - size, - mtime: 0, - }; + let entry = DirstateEntry::new_removed(size); Ok(self.add_or_remove_file(filename, old_state, entry)?) } fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> { - let old_state = match self.get(filename)? { - Some(e) => e.state, - None => EntryState::Unknown, - }; + let was_tracked = self + .get(filename)? + .map_or(false, |e| e.state().is_tracked()); struct Dropped { was_tracked: bool, had_entry: bool, @@ -921,7 +914,7 @@ was_tracked: node .data .as_entry() - .map_or(false, |entry| entry.state.is_tracked()), + .map_or(false, |entry| entry.state().is_tracked()), had_entry, had_copy_source: node.copy_source.take().is_some(), }; @@ -956,7 +949,7 @@ } Ok(dropped.had_entry) } else { - debug_assert!(!old_state.is_tracked()); + debug_assert!(!was_tracked); Ok(false) } } @@ -1290,6 +1283,7 @@ fn debug_iter( &self, + all: bool, ) -> Box< dyn Iterator< Item = Result< @@ -1299,16 +1293,17 @@ > + Send + '_, > { - Box::new(self.iter_nodes().map(move |node| { - let node = node?; + Box::new(filter_map_results(self.iter_nodes(), move |node| { let debug_tuple = if let Some(entry) = node.entry()? { entry.debug_tuple() + } else if !all { + return Ok(None); } else if let Some(mtime) = node.cached_directory_mtime() { (b' ', 0, -1, mtime.seconds() as i32) } else { (b' ', 0, -1, -1) }; - Ok((node.full_path(self.on_disk)?, debug_tuple)) + Ok(Some((node.full_path(self.on_disk)?, debug_tuple))) })) } }
--- a/rust/hg-core/src/dirstate_tree/dispatch.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate_tree/dispatch.rs Tue Sep 28 09:40:57 2021 +0200 @@ -290,13 +290,15 @@ /// node stored in this dirstate map, for the purpose of the `hg /// debugdirstate` command. /// - /// For nodes that don’t have an entry, `state` is the ASCII space. + /// If `all` is true, include nodes that don’t have an entry. + /// For such nodes `state` is the ASCII space. /// An `mtime` may still be present. It is used to optimize `status`. /// /// Because parse errors can happen during iteration, the iterated items /// are `Result`s. fn debug_iter( &self, + all: bool, ) -> Box< dyn Iterator< Item = Result< @@ -538,6 +540,7 @@ fn debug_iter( &self, + all: bool, ) -> Box< dyn Iterator< Item = Result< @@ -547,6 +550,9 @@ > + Send + '_, > { + // Not used for the flat (not tree-based) DirstateMap + let _ = all; + Box::new( (&**self) .iter()
--- a/rust/hg-core/src/dirstate_tree/on_disk.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate_tree/on_disk.rs Tue Sep 28 09:40:57 2021 +0200 @@ -265,7 +265,7 @@ } pub fn data_filename(&self) -> String { - String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap() + String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap() } } @@ -403,12 +403,15 @@ } fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry { - DirstateEntry { + // For now, the on-disk representation of DirstateEntry in dirstate-v2 + // format is equivalent to that of dirstate-v1. When that changes, add + // a new constructor. + DirstateEntry::from_v1_data( state, - mode: self.data.mode.get(), - mtime: self.data.mtime.get(), - size: self.data.size.get(), - } + self.data.mode.get(), + self.data.size.get(), + self.data.mtime.get(), + ) } pub(super) fn entry( @@ -640,11 +643,11 @@ NodeRef::InMemory(path, node) => { let (state, data) = match &node.data { dirstate_map::NodeData::Entry(entry) => ( - entry.state.into(), + entry.state().into(), Entry { - mode: entry.mode.into(), - mtime: entry.mtime.into(), - size: entry.size.into(), + mode: entry.mode().into(), + mtime: entry.mtime().into(), + size: entry.size().into(), }, ), dirstate_map::NodeData::CachedDirectory { mtime } => {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/dirstate_tree/owning.rs Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,105 @@ +use super::dirstate_map::DirstateMap; +use stable_deref_trait::StableDeref; +use std::ops::Deref; + +/// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it +/// borrows. +/// +/// This is similar to [`OwningRef`] which is more limited because it +/// represents exactly one `&T` reference next to the value it borrows, as +/// opposed to a struct that may contain an arbitrary number of references in +/// arbitrarily-nested data structures. +/// +/// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html +pub struct OwningDirstateMap { + /// Owned handle to a bytes buffer with a stable address. + /// + /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>. + on_disk: Box<dyn Deref<Target = [u8]> + Send>, + + /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the + /// language cannot represent a lifetime referencing a sibling field. + /// This is not quite a self-referencial struct (moving this struct is not + /// a problem as it doesn’t change the address of the bytes buffer owned + /// by `PyBytes`) but touches similar borrow-checker limitations. + ptr: *mut (), +} + +impl OwningDirstateMap { + pub fn new_empty<OnDisk>(on_disk: OnDisk) -> Self + where + OnDisk: Deref<Target = [u8]> + StableDeref + Send + 'static, + { + let on_disk = Box::new(on_disk); + let bytes: &'_ [u8] = &on_disk; + let map = DirstateMap::empty(bytes); + + // Like in `bytes` above, this `'_` lifetime parameter borrows from + // the bytes buffer owned by `on_disk`. + let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map)); + + // Erase the pointed type entirely in order to erase the lifetime. + let ptr: *mut () = ptr.cast(); + + Self { on_disk, ptr } + } + + pub fn get_mut_pair<'a>( + &'a mut self, + ) -> (&'a [u8], &'a mut DirstateMap<'a>) { + // SAFETY: We cast the type-erased pointer back to the same type it had + // in `new`, except with a different lifetime parameter. This time we + // connect the lifetime to that of `self`. This cast is valid because + // `self` owns the same `PyBytes` whose buffer `DirstateMap` + // references. That buffer has a stable memory address because the byte + // string value of a `PyBytes` is immutable. + let ptr: *mut DirstateMap<'a> = self.ptr.cast(); + // SAFETY: we dereference that pointer, connecting the lifetime of the + // new `&mut` to that of `self`. This is valid because the + // raw pointer is to a boxed value, and `self` owns that box. + (&self.on_disk, unsafe { &mut *ptr }) + } + + pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> { + self.get_mut_pair().1 + } + + pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> { + // SAFETY: same reasoning as in `get_mut` above. + let ptr: *mut DirstateMap<'a> = self.ptr.cast(); + unsafe { &*ptr } + } + + pub fn on_disk<'a>(&'a self) -> &'a [u8] { + &self.on_disk + } +} + +impl Drop for OwningDirstateMap { + fn drop(&mut self) { + // Silence a "field is never read" warning, and demonstrate that this + // value is still alive. + let _ = &self.on_disk; + // SAFETY: this cast is the same as in `get_mut`, and is valid for the + // same reason. `self.on_disk` still exists at this point, drop glue + // will drop it implicitly after this `drop` method returns. + let ptr: *mut DirstateMap<'_> = self.ptr.cast(); + // SAFETY: `Box::from_raw` takes ownership of the box away from `self`. + // This is fine because drop glue does nothig for `*mut ()` and we’re + // in `drop`, so `get` and `get_mut` cannot be called again. + unsafe { drop(Box::from_raw(ptr)) } + } +} + +fn _static_assert_is_send<T: Send>() {} + +fn _static_assert_fields_are_send() { + _static_assert_is_send::<Box<DirstateMap<'_>>>(); +} + +// SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because +// thread-safety of raw pointers is unknown in the general case. However this +// particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we +// own. Since that `Box` is `Send` as shown in above, it is sound to mark +// this struct as `Send` too. +unsafe impl Send for OwningDirstateMap {}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/dirstate_tree/owning_dispatch.rs Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,241 @@ +use crate::dirstate::parsers::Timestamp; +use crate::dirstate_tree::dispatch::DirstateMapMethods; +use crate::dirstate_tree::on_disk::DirstateV2ParseError; +use crate::dirstate_tree::owning::OwningDirstateMap; +use crate::matchers::Matcher; +use crate::utils::hg_path::{HgPath, HgPathBuf}; +use crate::CopyMapIter; +use crate::DirstateEntry; +use crate::DirstateError; +use crate::DirstateParents; +use crate::DirstateStatus; +use crate::PatternFileWarning; +use crate::StateMapIter; +use crate::StatusError; +use crate::StatusOptions; +use std::path::PathBuf; + +impl DirstateMapMethods for OwningDirstateMap { + fn clear(&mut self) { + self.get_mut().clear() + } + + fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) { + self.get_mut().set_v1(filename, entry) + } + + fn add_file( + &mut self, + filename: &HgPath, + entry: DirstateEntry, + added: bool, + merged: bool, + from_p2: bool, + possibly_dirty: bool, + ) -> Result<(), DirstateError> { + self.get_mut().add_file( + filename, + entry, + added, + merged, + from_p2, + possibly_dirty, + ) + } + + fn remove_file( + &mut self, + filename: &HgPath, + in_merge: bool, + ) -> Result<(), DirstateError> { + self.get_mut().remove_file(filename, in_merge) + } + + fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> { + self.get_mut().drop_file(filename) + } + + fn clear_ambiguous_times( + &mut self, + filenames: Vec<HgPathBuf>, + now: i32, + ) -> Result<(), DirstateV2ParseError> { + self.get_mut().clear_ambiguous_times(filenames, now) + } + + fn non_normal_entries_contains( + &mut self, + key: &HgPath, + ) -> Result<bool, DirstateV2ParseError> { + self.get_mut().non_normal_entries_contains(key) + } + + fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool { + self.get_mut().non_normal_entries_remove(key) + } + + fn non_normal_entries_add(&mut self, key: &HgPath) { + self.get_mut().non_normal_entries_add(key) + } + + fn non_normal_or_other_parent_paths( + &mut self, + ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_> + { + self.get_mut().non_normal_or_other_parent_paths() + } + + fn set_non_normal_other_parent_entries(&mut self, force: bool) { + self.get_mut().set_non_normal_other_parent_entries(force) + } + + fn iter_non_normal_paths( + &mut self, + ) -> Box< + dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, + > { + self.get_mut().iter_non_normal_paths() + } + + fn iter_non_normal_paths_panic( + &self, + ) -> Box< + dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, + > { + self.get().iter_non_normal_paths_panic() + } + + fn iter_other_parent_paths( + &mut self, + ) -> Box< + dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, + > { + self.get_mut().iter_other_parent_paths() + } + + fn has_tracked_dir( + &mut self, + directory: &HgPath, + ) -> Result<bool, DirstateError> { + self.get_mut().has_tracked_dir(directory) + } + + fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> { + self.get_mut().has_dir(directory) + } + + fn pack_v1( + &mut self, + parents: DirstateParents, + now: Timestamp, + ) -> Result<Vec<u8>, DirstateError> { + self.get_mut().pack_v1(parents, now) + } + + fn pack_v2( + &mut self, + now: Timestamp, + can_append: bool, + ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { + self.get_mut().pack_v2(now, can_append) + } + + fn status<'a>( + &'a mut self, + matcher: &'a (dyn Matcher + Sync), + root_dir: PathBuf, + ignore_files: Vec<PathBuf>, + options: StatusOptions, + ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError> + { + self.get_mut() + .status(matcher, root_dir, ignore_files, options) + } + + fn copy_map_len(&self) -> usize { + self.get().copy_map_len() + } + + fn copy_map_iter(&self) -> CopyMapIter<'_> { + self.get().copy_map_iter() + } + + fn copy_map_contains_key( + &self, + key: &HgPath, + ) -> Result<bool, DirstateV2ParseError> { + self.get().copy_map_contains_key(key) + } + + fn copy_map_get( + &self, + key: &HgPath, + ) -> Result<Option<&HgPath>, DirstateV2ParseError> { + self.get().copy_map_get(key) + } + + fn copy_map_remove( + &mut self, + key: &HgPath, + ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { + self.get_mut().copy_map_remove(key) + } + + fn copy_map_insert( + &mut self, + key: HgPathBuf, + value: HgPathBuf, + ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { + self.get_mut().copy_map_insert(key, value) + } + + fn len(&self) -> usize { + self.get().len() + } + + fn contains_key( + &self, + key: &HgPath, + ) -> Result<bool, DirstateV2ParseError> { + self.get().contains_key(key) + } + + fn get( + &self, + key: &HgPath, + ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { + self.get().get(key) + } + + fn iter(&self) -> StateMapIter<'_> { + self.get().iter() + } + + fn iter_tracked_dirs( + &mut self, + ) -> Result< + Box< + dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + + Send + + '_, + >, + DirstateError, + > { + self.get_mut().iter_tracked_dirs() + } + + fn debug_iter( + &self, + all: bool, + ) -> Box< + dyn Iterator< + Item = Result< + (&HgPath, (u8, i32, i32, i32)), + DirstateV2ParseError, + >, + > + Send + + '_, + > { + self.get().debug_iter(all) + } +}
--- a/rust/hg-core/src/dirstate_tree/status.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/dirstate_tree/status.rs Tue Sep 28 09:40:57 2021 +0200 @@ -394,9 +394,6 @@ .push(hg_path.detach_from_tree()), EntryState::Normal => self .handle_normal_file(&dirstate_node, fs_metadata)?, - // This variant is not used in DirstateMap - // nodes - EntryState::Unknown => unreachable!(), } } else { // `node.entry.is_none()` indicates a "directory" @@ -506,11 +503,9 @@ let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?; let mode_changed = || self.options.check_exec && entry.mode_changed(fs_metadata); - let size_changed = entry.size != truncate_u64(fs_metadata.len()); - if entry.size >= 0 - && size_changed - && fs_metadata.file_type().is_symlink() - { + let size = entry.size(); + let size_changed = size != truncate_u64(fs_metadata.len()); + if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() { // issue6456: Size returned may be longer due to encryption // on EXT-4 fscrypt. TODO maybe only do it on EXT4? self.outcome @@ -520,7 +515,7 @@ .push(hg_path.detach_from_tree()) } else if dirstate_node.has_copy_source() || entry.is_from_other_parent() - || (entry.size >= 0 && (size_changed || mode_changed())) + || (size >= 0 && (size_changed || mode_changed())) { self.outcome .lock() @@ -529,7 +524,7 @@ .push(hg_path.detach_from_tree()) } else { let mtime = mtime_seconds(fs_metadata); - if truncate_i64(mtime) != entry.mtime + if truncate_i64(mtime) != entry.mtime() || mtime == self.options.last_normal_time { self.outcome
--- a/rust/hg-core/src/lib.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/lib.rs Tue Sep 28 09:40:57 2021 +0200 @@ -36,6 +36,7 @@ pub mod operations; pub mod revset; pub mod utils; +pub mod vfs; use crate::utils::hg_path::{HgPathBuf, HgPathError}; pub use filepatterns::{
--- a/rust/hg-core/src/logging.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/logging.rs Tue Sep 28 09:40:57 2021 +0200 @@ -1,5 +1,5 @@ use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt}; -use crate::repo::Vfs; +use crate::vfs::Vfs; use std::io::Write; /// An utility to append to a log file with the given name, and optionally
--- a/rust/hg-core/src/operations/cat.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/operations/cat.rs Tue Sep 28 09:40:57 2021 +0200 @@ -5,17 +5,11 @@ // This software may be used and distributed according to the terms of the // GNU General Public License version 2 or any later version. -use std::path::PathBuf; - use crate::repo::Repo; -use crate::revlog::changelog::Changelog; -use crate::revlog::manifest::Manifest; -use crate::revlog::path_encode::path_encode; -use crate::revlog::revlog::Revlog; use crate::revlog::revlog::RevlogError; use crate::revlog::Node; -use crate::utils::files::get_path_from_bytes; -use crate::utils::hg_path::{HgPath, HgPathBuf}; + +use crate::utils::hg_path::HgPathBuf; pub struct CatOutput { /// Whether any file in the manifest matched the paths given as CLI @@ -29,8 +23,6 @@ pub node: Node, } -const METADATA_DELIMITER: [u8; 2] = [b'\x01', b'\n']; - /// Output the given revision of files /// /// * `root`: Repository root @@ -42,44 +34,24 @@ files: &'a [HgPathBuf], ) -> Result<CatOutput, RevlogError> { let rev = crate::revset::resolve_single(revset, repo)?; - let changelog = Changelog::open(repo)?; - let manifest = Manifest::open(repo)?; - let changelog_entry = changelog.get_rev(rev)?; - let node = *changelog + let manifest = repo.manifest_for_rev(rev)?; + let node = *repo + .changelog()? .node_from_rev(rev) - .expect("should succeed when changelog.get_rev did"); - let manifest_node = - Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?; - let manifest_entry = manifest.get_node(manifest_node.into())?; + .expect("should succeed when repo.manifest did"); let mut bytes = vec![]; let mut matched = vec![false; files.len()]; let mut found_any = false; - for (manifest_file, node_bytes) in manifest_entry.files_with_nodes() { + for (manifest_file, node_bytes) in manifest.files_with_nodes() { for (cat_file, is_matched) in files.iter().zip(&mut matched) { if cat_file.as_bytes() == manifest_file.as_bytes() { *is_matched = true; found_any = true; - let index_path = store_path(manifest_file, b".i"); - let data_path = store_path(manifest_file, b".d"); - - let file_log = - Revlog::open(repo, &index_path, Some(&data_path))?; + let file_log = repo.filelog(manifest_file)?; let file_node = Node::from_hex_for_repo(node_bytes)?; - let file_rev = file_log.get_node_rev(file_node.into())?; - let data = file_log.get_rev_data(file_rev)?; - if data.starts_with(&METADATA_DELIMITER) { - let end_delimiter_position = data - [METADATA_DELIMITER.len()..] - .windows(METADATA_DELIMITER.len()) - .position(|bytes| bytes == METADATA_DELIMITER); - if let Some(position) = end_delimiter_position { - let offset = METADATA_DELIMITER.len() * 2; - bytes.extend(data[position + offset..].iter()); - } - } else { - bytes.extend(data); - } + let entry = file_log.data_for_node(file_node)?; + bytes.extend(entry.data()?) } } } @@ -97,9 +69,3 @@ node, }) } - -fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf { - let encoded_bytes = - path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat()); - get_path_from_bytes(&encoded_bytes).into() -}
--- a/rust/hg-core/src/operations/list_tracked_files.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/operations/list_tracked_files.rs Tue Sep 28 09:40:57 2021 +0200 @@ -9,9 +9,7 @@ use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket}; use crate::errors::HgError; use crate::repo::Repo; -use crate::revlog::changelog::Changelog; -use crate::revlog::manifest::{Manifest, ManifestEntry}; -use crate::revlog::node::Node; +use crate::revlog::manifest::Manifest; use crate::revlog::revlog::RevlogError; use crate::utils::hg_path::HgPath; use crate::DirstateError; @@ -53,7 +51,7 @@ let _parents = parse_dirstate_entries( &self.content, |path, entry, _copy_source| { - if entry.state.is_tracked() { + if entry.state().is_tracked() { files.push(path) } Ok(()) @@ -72,16 +70,10 @@ revset: &str, ) -> Result<FilesForRev, RevlogError> { let rev = crate::revset::resolve_single(revset, repo)?; - let changelog = Changelog::open(repo)?; - let manifest = Manifest::open(repo)?; - let changelog_entry = changelog.get_rev(rev)?; - let manifest_node = - Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?; - let manifest_entry = manifest.get_node(manifest_node.into())?; - Ok(FilesForRev(manifest_entry)) + Ok(FilesForRev(repo.manifest_for_rev(rev)?)) } -pub struct FilesForRev(ManifestEntry); +pub struct FilesForRev(Manifest); impl FilesForRev { pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
--- a/rust/hg-core/src/repo.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/repo.rs Tue Sep 28 09:40:57 2021 +0200 @@ -1,12 +1,22 @@ +use crate::changelog::Changelog; use crate::config::{Config, ConfigError, ConfigParseError}; -use crate::errors::{HgError, IoErrorContext, IoResultExt}; +use crate::dirstate::DirstateParents; +use crate::dirstate_tree::dirstate_map::DirstateMap; +use crate::dirstate_tree::owning::OwningDirstateMap; +use crate::errors::HgError; +use crate::errors::HgResultExt; use crate::exit_codes; -use crate::requirements; +use crate::manifest::{Manifest, Manifestlog}; +use crate::revlog::filelog::Filelog; +use crate::revlog::revlog::RevlogError; use crate::utils::files::get_path_from_bytes; +use crate::utils::hg_path::HgPath; use crate::utils::SliceExt; -use memmap::{Mmap, MmapOptions}; +use crate::vfs::{is_dir, is_file, Vfs}; +use crate::{requirements, NodePrefix}; +use crate::{DirstateError, Revision}; +use std::cell::{Cell, Ref, RefCell, RefMut}; use std::collections::HashSet; -use std::io::ErrorKind; use std::path::{Path, PathBuf}; /// A repository on disk @@ -16,6 +26,11 @@ store: PathBuf, requirements: HashSet<String>, config: Config, + // None means not known/initialized yet + dirstate_parents: Cell<Option<DirstateParents>>, + dirstate_map: LazyCell<OwningDirstateMap, DirstateError>, + changelog: LazyCell<Changelog, HgError>, + manifestlog: LazyCell<Manifestlog, HgError>, } #[derive(Debug, derive_more::From)] @@ -38,12 +53,6 @@ } } -/// Filesystem access abstraction for the contents of a given "base" diretory -#[derive(Clone, Copy)] -pub struct Vfs<'a> { - pub(crate) base: &'a Path, -} - impl Repo { /// tries to find nearest repository root in current working directory or /// its ancestors @@ -127,7 +136,8 @@ } else { let bytes = hg_vfs.read("sharedpath")?; let mut shared_path = - get_path_from_bytes(bytes.trim_end_newlines()).to_owned(); + get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n')) + .to_owned(); if relative { shared_path = dot_hg.join(shared_path) } @@ -192,6 +202,10 @@ store: store_path, dot_hg, config: repo_config, + dirstate_parents: Cell::new(None), + dirstate_map: LazyCell::new(Self::new_dirstate_map), + changelog: LazyCell::new(Changelog::open), + manifestlog: LazyCell::new(Manifestlog::open), }; requirements::check(&repo)?; @@ -234,82 +248,162 @@ .contains(requirements::DIRSTATE_V2_REQUIREMENT) } - pub fn dirstate_parents( - &self, - ) -> Result<crate::dirstate::DirstateParents, HgError> { - let dirstate = self.hg_vfs().mmap_open("dirstate")?; - if dirstate.is_empty() { - return Ok(crate::dirstate::DirstateParents::NULL); + fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> { + Ok(self + .hg_vfs() + .read("dirstate") + .io_not_found_as_none()? + .unwrap_or(Vec::new())) + } + + pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> { + if let Some(parents) = self.dirstate_parents.get() { + return Ok(parents); } - let parents = if self.has_dirstate_v2() { + let dirstate = self.dirstate_file_contents()?; + let parents = if dirstate.is_empty() { + DirstateParents::NULL + } else if self.has_dirstate_v2() { crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents() } else { crate::dirstate::parsers::parse_dirstate_parents(&dirstate)? .clone() }; + self.dirstate_parents.set(Some(parents)); Ok(parents) } + + fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> { + let dirstate_file_contents = self.dirstate_file_contents()?; + if dirstate_file_contents.is_empty() { + self.dirstate_parents.set(Some(DirstateParents::NULL)); + Ok(OwningDirstateMap::new_empty(Vec::new())) + } else if self.has_dirstate_v2() { + let docket = crate::dirstate_tree::on_disk::read_docket( + &dirstate_file_contents, + )?; + self.dirstate_parents.set(Some(docket.parents())); + let data_size = docket.data_size(); + let metadata = docket.tree_metadata(); + let mut map = if let Some(data_mmap) = self + .hg_vfs() + .mmap_open(docket.data_filename()) + .io_not_found_as_none()? + { + OwningDirstateMap::new_empty(data_mmap) + } else { + OwningDirstateMap::new_empty(Vec::new()) + }; + let (on_disk, placeholder) = map.get_mut_pair(); + *placeholder = DirstateMap::new_v2(on_disk, data_size, metadata)?; + Ok(map) + } else { + let mut map = OwningDirstateMap::new_empty(dirstate_file_contents); + let (on_disk, placeholder) = map.get_mut_pair(); + let (inner, parents) = DirstateMap::new_v1(on_disk)?; + self.dirstate_parents + .set(Some(parents.unwrap_or(DirstateParents::NULL))); + *placeholder = inner; + Ok(map) + } + } + + pub fn dirstate_map( + &self, + ) -> Result<Ref<OwningDirstateMap>, DirstateError> { + self.dirstate_map.get_or_init(self) + } + + pub fn dirstate_map_mut( + &self, + ) -> Result<RefMut<OwningDirstateMap>, DirstateError> { + self.dirstate_map.get_mut_or_init(self) + } + + pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> { + self.changelog.get_or_init(self) + } + + pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> { + self.changelog.get_mut_or_init(self) + } + + pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> { + self.manifestlog.get_or_init(self) + } + + pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> { + self.manifestlog.get_mut_or_init(self) + } + + /// Returns the manifest of the *changeset* with the given node ID + pub fn manifest_for_node( + &self, + node: impl Into<NodePrefix>, + ) -> Result<Manifest, RevlogError> { + self.manifestlog()?.data_for_node( + self.changelog()? + .data_for_node(node.into())? + .manifest_node()? + .into(), + ) + } + + /// Returns the manifest of the *changeset* with the given revision number + pub fn manifest_for_rev( + &self, + revision: Revision, + ) -> Result<Manifest, RevlogError> { + self.manifestlog()?.data_for_node( + self.changelog()? + .data_for_rev(revision)? + .manifest_node()? + .into(), + ) + } + + pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> { + Filelog::open(self, path) + } } -impl Vfs<'_> { - pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf { - self.base.join(relative_path) - } +/// Lazily-initialized component of `Repo` with interior mutability +/// +/// This differs from `OnceCell` in that the value can still be "deinitialized" +/// later by setting its inner `Option` to `None`. +struct LazyCell<T, E> { + value: RefCell<Option<T>>, + // `Fn`s that don’t capture environment are zero-size, so this box does + // not allocate: + init: Box<dyn Fn(&Repo) -> Result<T, E>>, +} - pub fn read( - &self, - relative_path: impl AsRef<Path>, - ) -> Result<Vec<u8>, HgError> { - let path = self.join(relative_path); - std::fs::read(&path).when_reading_file(&path) - } - - pub fn mmap_open( - &self, - relative_path: impl AsRef<Path>, - ) -> Result<Mmap, HgError> { - let path = self.base.join(relative_path); - let file = std::fs::File::open(&path).when_reading_file(&path)?; - // TODO: what are the safety requirements here? - let mmap = unsafe { MmapOptions::new().map(&file) } - .when_reading_file(&path)?; - Ok(mmap) +impl<T, E> LazyCell<T, E> { + fn new(init: impl Fn(&Repo) -> Result<T, E> + 'static) -> Self { + Self { + value: RefCell::new(None), + init: Box::new(init), + } } - pub fn rename( - &self, - relative_from: impl AsRef<Path>, - relative_to: impl AsRef<Path>, - ) -> Result<(), HgError> { - let from = self.join(relative_from); - let to = self.join(relative_to); - std::fs::rename(&from, &to) - .with_context(|| IoErrorContext::RenamingFile { from, to }) + fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> { + let mut borrowed = self.value.borrow(); + if borrowed.is_none() { + drop(borrowed); + // Only use `borrow_mut` if it is really needed to avoid panic in + // case there is another outstanding borrow but mutation is not + // needed. + *self.value.borrow_mut() = Some((self.init)(repo)?); + borrowed = self.value.borrow() + } + Ok(Ref::map(borrowed, |option| option.as_ref().unwrap())) + } + + pub fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> { + let mut borrowed = self.value.borrow_mut(); + if borrowed.is_none() { + *borrowed = Some((self.init)(repo)?); + } + Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap())) } } - -fn fs_metadata( - path: impl AsRef<Path>, -) -> Result<Option<std::fs::Metadata>, HgError> { - let path = path.as_ref(); - match std::fs::metadata(path) { - Ok(meta) => Ok(Some(meta)), - Err(error) => match error.kind() { - // TODO: when we require a Rust version where `NotADirectory` is - // stable, invert this logic and return None for it and `NotFound` - // and propagate any other error. - ErrorKind::PermissionDenied => Err(error).with_context(|| { - IoErrorContext::ReadingMetadata(path.to_owned()) - }), - _ => Ok(None), - }, - } -} - -fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> { - Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir())) -} - -fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> { - Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file())) -}
--- a/rust/hg-core/src/requirements.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/requirements.rs Tue Sep 28 09:40:57 2021 +0200 @@ -1,6 +1,7 @@ use crate::errors::{HgError, HgResultExt}; -use crate::repo::{Repo, Vfs}; +use crate::repo::Repo; use crate::utils::join_display; +use crate::vfs::Vfs; use std::collections::HashSet; fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
--- a/rust/hg-core/src/revlog.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/revlog.rs Tue Sep 28 09:40:57 2021 +0200 @@ -11,6 +11,7 @@ pub mod path_encode; pub use node::{FromHexError, Node, NodePrefix}; pub mod changelog; +pub mod filelog; pub mod index; pub mod manifest; pub mod patch;
--- a/rust/hg-core/src/revlog/changelog.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/revlog/changelog.rs Tue Sep 28 09:40:57 2021 +0200 @@ -12,22 +12,22 @@ impl Changelog { /// Open the `changelog` of a repository given by its root. - pub fn open(repo: &Repo) -> Result<Self, RevlogError> { + pub fn open(repo: &Repo) -> Result<Self, HgError> { let revlog = Revlog::open(repo, "00changelog.i", None)?; Ok(Self { revlog }) } - /// Return the `ChangelogEntry` a given node id. - pub fn get_node( + /// Return the `ChangelogEntry` for the given node ID. + pub fn data_for_node( &self, node: NodePrefix, ) -> Result<ChangelogEntry, RevlogError> { - let rev = self.revlog.get_node_rev(node)?; - self.get_rev(rev) + let rev = self.revlog.rev_from_node(node)?; + self.data_for_rev(rev) } - /// Return the `ChangelogEntry` of a given node revision. - pub fn get_rev( + /// Return the `ChangelogEntry` of the given revision number. + pub fn data_for_rev( &self, rev: Revision, ) -> Result<ChangelogEntry, RevlogError> { @@ -36,7 +36,7 @@ } pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> { - Some(self.revlog.index.get_entry(rev)?.hash()) + self.revlog.node_from_rev(rev) } } @@ -57,9 +57,11 @@ /// Return the node id of the `manifest` referenced by this `changelog` /// entry. - pub fn manifest_node(&self) -> Result<&[u8], RevlogError> { - self.lines() - .next() - .ok_or_else(|| HgError::corrupted("empty changelog entry").into()) + pub fn manifest_node(&self) -> Result<Node, HgError> { + Node::from_hex_for_repo( + self.lines() + .next() + .ok_or_else(|| HgError::corrupted("empty changelog entry"))?, + ) } }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/revlog/filelog.rs Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,79 @@ +use crate::errors::HgError; +use crate::repo::Repo; +use crate::revlog::path_encode::path_encode; +use crate::revlog::revlog::{Revlog, RevlogError}; +use crate::revlog::NodePrefix; +use crate::revlog::Revision; +use crate::utils::files::get_path_from_bytes; +use crate::utils::hg_path::HgPath; +use crate::utils::SliceExt; +use std::borrow::Cow; +use std::path::PathBuf; + +/// A specialized `Revlog` to work with file data logs. +pub struct Filelog { + /// The generic `revlog` format. + revlog: Revlog, +} + +impl Filelog { + pub fn open(repo: &Repo, file_path: &HgPath) -> Result<Self, HgError> { + let index_path = store_path(file_path, b".i"); + let data_path = store_path(file_path, b".d"); + let revlog = Revlog::open(repo, index_path, Some(&data_path))?; + Ok(Self { revlog }) + } + + /// The given node ID is that of the file as found in a manifest, not of a + /// changeset. + pub fn data_for_node( + &self, + file_node: impl Into<NodePrefix>, + ) -> Result<FilelogEntry, RevlogError> { + let file_rev = self.revlog.rev_from_node(file_node.into())?; + self.data_for_rev(file_rev) + } + + /// The given revision is that of the file as found in a manifest, not of a + /// changeset. + pub fn data_for_rev( + &self, + file_rev: Revision, + ) -> Result<FilelogEntry, RevlogError> { + let data = self.revlog.get_rev_data(file_rev)?; + Ok(FilelogEntry(data.into())) + } +} + +fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf { + let encoded_bytes = + path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat()); + get_path_from_bytes(&encoded_bytes).into() +} + +pub struct FilelogEntry<'filelog>(Cow<'filelog, [u8]>); + +impl<'filelog> FilelogEntry<'filelog> { + /// Split into metadata and data + pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> { + const DELIMITER: &[u8; 2] = &[b'\x01', b'\n']; + + if let Some(rest) = self.0.drop_prefix(DELIMITER) { + if let Some((metadata, data)) = rest.split_2_by_slice(DELIMITER) { + Ok((Some(metadata), data)) + } else { + Err(HgError::corrupted( + "Missing metadata end delimiter in filelog entry", + )) + } + } else { + Ok((None, &self.0)) + } + } + + /// Returns the file contents at this revision, stripped of any metadata + pub fn data(&self) -> Result<&[u8], HgError> { + let (_metadata, data) = self.split()?; + Ok(data) + } +}
--- a/rust/hg-core/src/revlog/index.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/revlog/index.rs Tue Sep 28 09:40:57 2021 +0200 @@ -5,7 +5,6 @@ use crate::errors::HgError; use crate::revlog::node::Node; -use crate::revlog::revlog::RevlogError; use crate::revlog::{Revision, NULL_REVISION}; pub const INDEX_ENTRY_SIZE: usize = 64; @@ -23,7 +22,7 @@ /// Calculate the start of each entry when is_inline is true. pub fn new( bytes: Box<dyn Deref<Target = [u8]> + Send>, - ) -> Result<Self, RevlogError> { + ) -> Result<Self, HgError> { if is_inline(&bytes) { let mut offset: usize = 0; let mut offsets = Vec::new();
--- a/rust/hg-core/src/revlog/manifest.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/revlog/manifest.rs Tue Sep 28 09:40:57 2021 +0200 @@ -1,48 +1,60 @@ +use crate::errors::HgError; use crate::repo::Repo; use crate::revlog::revlog::{Revlog, RevlogError}; -use crate::revlog::NodePrefix; use crate::revlog::Revision; +use crate::revlog::{Node, NodePrefix}; use crate::utils::hg_path::HgPath; /// A specialized `Revlog` to work with `manifest` data format. -pub struct Manifest { +pub struct Manifestlog { /// The generic `revlog` format. revlog: Revlog, } -impl Manifest { +impl Manifestlog { /// Open the `manifest` of a repository given by its root. - pub fn open(repo: &Repo) -> Result<Self, RevlogError> { + pub fn open(repo: &Repo) -> Result<Self, HgError> { let revlog = Revlog::open(repo, "00manifest.i", None)?; Ok(Self { revlog }) } - /// Return the `ManifestEntry` of a given node id. - pub fn get_node( + /// Return the `Manifest` for the given node ID. + /// + /// Note: this is a node ID in the manifestlog, typically found through + /// `ChangelogEntry::manifest_node`. It is *not* the node ID of any + /// changeset. + /// + /// See also `Repo::manifest_for_node` + pub fn data_for_node( &self, node: NodePrefix, - ) -> Result<ManifestEntry, RevlogError> { - let rev = self.revlog.get_node_rev(node)?; - self.get_rev(rev) + ) -> Result<Manifest, RevlogError> { + let rev = self.revlog.rev_from_node(node)?; + self.data_for_rev(rev) } - /// Return the `ManifestEntry` of a given node revision. - pub fn get_rev( + /// Return the `Manifest` of a given revision number. + /// + /// Note: this is a revision number in the manifestlog, *not* of any + /// changeset. + /// + /// See also `Repo::manifest_for_rev` + pub fn data_for_rev( &self, rev: Revision, - ) -> Result<ManifestEntry, RevlogError> { + ) -> Result<Manifest, RevlogError> { let bytes = self.revlog.get_rev_data(rev)?; - Ok(ManifestEntry { bytes }) + Ok(Manifest { bytes }) } } -/// `Manifest` entry which knows how to interpret the `manifest` data bytes. +/// `Manifestlog` entry which knows how to interpret the `manifest` data bytes. #[derive(Debug)] -pub struct ManifestEntry { +pub struct Manifest { bytes: Vec<u8>, } -impl ManifestEntry { +impl Manifest { /// Return an iterator over the lines of the entry. pub fn lines(&self) -> impl Iterator<Item = &[u8]> { self.bytes @@ -73,4 +85,17 @@ (HgPath::new(&line[..pos]), &line[hash_start..hash_end]) }) } + + /// If the given path is in this manifest, return its filelog node ID + pub fn find_file(&self, path: &HgPath) -> Result<Option<Node>, HgError> { + // TODO: use binary search instead of linear scan. This may involve + // building (and caching) an index of the byte indicex of each manifest + // line. + for (manifest_path, node) in self.files_with_nodes() { + if manifest_path == path { + return Ok(Some(Node::from_hex_for_repo(node)?)); + } + } + Ok(None) + } }
--- a/rust/hg-core/src/revlog/nodemap_docket.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/revlog/nodemap_docket.rs Tue Sep 28 09:40:57 2021 +0200 @@ -1,10 +1,9 @@ use crate::errors::{HgError, HgResultExt}; use crate::requirements; use bytes_cast::{unaligned, BytesCast}; -use memmap::Mmap; +use memmap2::Mmap; use std::path::{Path, PathBuf}; -use super::revlog::RevlogError; use crate::repo::Repo; use crate::utils::strip_suffix; @@ -38,7 +37,7 @@ pub fn read_from_file( repo: &Repo, index_path: &Path, - ) -> Result<Option<(Self, Mmap)>, RevlogError> { + ) -> Result<Option<(Self, Mmap)>, HgError> { if !repo .requirements() .contains(requirements::NODEMAP_REQUIREMENT) @@ -65,10 +64,9 @@ }; /// Treat any error as a parse error - fn parse<T, E>(result: Result<T, E>) -> Result<T, RevlogError> { - result.map_err(|_| { - HgError::corrupted("nodemap docket parse error").into() - }) + fn parse<T, E>(result: Result<T, E>) -> Result<T, HgError> { + result + .map_err(|_| HgError::corrupted("nodemap docket parse error")) } let (header, rest) = parse(DocketHeader::from_bytes(input))?; @@ -94,7 +92,7 @@ if mmap.len() >= data_length { Ok(Some((docket, mmap))) } else { - Err(HgError::corrupted("persistent nodemap too short").into()) + Err(HgError::corrupted("persistent nodemap too short")) } } else { // Even if .hg/requires opted in, some revlogs are deemed small
--- a/rust/hg-core/src/revlog/revlog.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/revlog/revlog.rs Tue Sep 28 09:40:57 2021 +0200 @@ -18,6 +18,7 @@ use crate::errors::HgError; use crate::repo::Repo; use crate::revlog::Revision; +use crate::{Node, NULL_REVISION}; #[derive(derive_more::From)] pub enum RevlogError { @@ -50,7 +51,7 @@ /// When index and data are not interleaved: bytes of the revlog index. /// When index and data are interleaved: bytes of the revlog index and /// data. - pub(crate) index: Index, + index: Index, /// When index and data are not interleaved: bytes of the revlog data data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>, /// When present on disk: the persistent nodemap for this revlog @@ -67,14 +68,14 @@ repo: &Repo, index_path: impl AsRef<Path>, data_path: Option<&Path>, - ) -> Result<Self, RevlogError> { + ) -> Result<Self, HgError> { let index_path = index_path.as_ref(); let index_mmap = repo.store_vfs().mmap_open(&index_path)?; let version = get_version(&index_mmap); if version != 1 { // A proper new version should have had a repo/store requirement. - return Err(RevlogError::corrupted()); + return Err(HgError::corrupted("corrupted revlog")); } let index = Index::new(Box::new(index_mmap))?; @@ -118,12 +119,23 @@ self.index.is_empty() } - /// Return the full data associated to a node. + /// Returns the node ID for the given revision number, if it exists in this + /// revlog + pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> { + Some(self.index.get_entry(rev)?.hash()) + } + + /// Return the revision number for the given node ID, if it exists in this + /// revlog #[timed] - pub fn get_node_rev( + pub fn rev_from_node( &self, node: NodePrefix, ) -> Result<Revision, RevlogError> { + if node.is_prefix_of(&NULL_NODE) { + return Ok(NULL_REVISION); + } + if let Some(nodemap) = &self.nodemap { return nodemap .find_bin(&self.index, node)?
--- a/rust/hg-core/src/revset.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/revset.rs Tue Sep 28 09:40:57 2021 +0200 @@ -4,7 +4,6 @@ use crate::errors::HgError; use crate::repo::Repo; -use crate::revlog::changelog::Changelog; use crate::revlog::revlog::{Revlog, RevlogError}; use crate::revlog::NodePrefix; use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX}; @@ -17,7 +16,7 @@ input: &str, repo: &Repo, ) -> Result<Revision, RevlogError> { - let changelog = Changelog::open(repo)?; + let changelog = repo.changelog()?; match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) { Err(RevlogError::InvalidRevision) => {} // Try other syntax @@ -46,8 +45,14 @@ input: &str, revlog: &Revlog, ) -> Result<Revision, RevlogError> { + // The Python equivalent of this is part of `revsymbol` in + // `mercurial/scmutil.py` + if let Ok(integer) = input.parse::<i32>() { - if integer >= 0 && revlog.has_rev(integer) { + if integer.to_string() == input + && integer >= 0 + && revlog.has_rev(integer) + { return Ok(integer); } } @@ -56,7 +61,7 @@ { return Err(RevlogError::WDirUnsupported); } - return revlog.get_node_rev(prefix); + return revlog.rev_from_node(prefix); } Err(RevlogError::InvalidRevision) }
--- a/rust/hg-core/src/utils.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-core/src/utils.rs Tue Sep 28 09:40:57 2021 +0200 @@ -67,36 +67,35 @@ } pub trait SliceExt { - fn trim_end_newlines(&self) -> &Self; fn trim_end(&self) -> &Self; fn trim_start(&self) -> &Self; + fn trim_end_matches(&self, f: impl FnMut(u8) -> bool) -> &Self; + fn trim_start_matches(&self, f: impl FnMut(u8) -> bool) -> &Self; fn trim(&self) -> &Self; fn drop_prefix(&self, needle: &Self) -> Option<&Self>; fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])>; -} - -#[allow(clippy::trivially_copy_pass_by_ref)] -fn is_not_whitespace(c: &u8) -> bool { - !(*c as char).is_whitespace() + fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])>; } impl SliceExt for [u8] { - fn trim_end_newlines(&self) -> &[u8] { - if let Some(last) = self.iter().rposition(|&byte| byte != b'\n') { + fn trim_end(&self) -> &[u8] { + self.trim_end_matches(|byte| byte.is_ascii_whitespace()) + } + + fn trim_start(&self) -> &[u8] { + self.trim_start_matches(|byte| byte.is_ascii_whitespace()) + } + + fn trim_end_matches(&self, mut f: impl FnMut(u8) -> bool) -> &Self { + if let Some(last) = self.iter().rposition(|&byte| !f(byte)) { &self[..=last] } else { &[] } } - fn trim_end(&self) -> &[u8] { - if let Some(last) = self.iter().rposition(is_not_whitespace) { - &self[..=last] - } else { - &[] - } - } - fn trim_start(&self) -> &[u8] { - if let Some(first) = self.iter().position(is_not_whitespace) { + + fn trim_start_matches(&self, mut f: impl FnMut(u8) -> bool) -> &Self { + if let Some(first) = self.iter().position(|&byte| !f(byte)) { &self[first..] } else { &[] @@ -136,6 +135,14 @@ let b = iter.next()?; Some((a, b)) } + + fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])> { + if let Some(pos) = find_slice_in_slice(self, separator) { + Some((&self[..pos], &self[pos + separator.len()..])) + } else { + None + } + } } pub trait Escaped {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/vfs.rs Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,73 @@ +use crate::errors::{HgError, IoErrorContext, IoResultExt}; +use memmap2::{Mmap, MmapOptions}; +use std::io::ErrorKind; +use std::path::{Path, PathBuf}; + +/// Filesystem access abstraction for the contents of a given "base" diretory +#[derive(Clone, Copy)] +pub struct Vfs<'a> { + pub(crate) base: &'a Path, +} + +impl Vfs<'_> { + pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf { + self.base.join(relative_path) + } + + pub fn read( + &self, + relative_path: impl AsRef<Path>, + ) -> Result<Vec<u8>, HgError> { + let path = self.join(relative_path); + std::fs::read(&path).when_reading_file(&path) + } + + pub fn mmap_open( + &self, + relative_path: impl AsRef<Path>, + ) -> Result<Mmap, HgError> { + let path = self.base.join(relative_path); + let file = std::fs::File::open(&path).when_reading_file(&path)?; + // TODO: what are the safety requirements here? + let mmap = unsafe { MmapOptions::new().map(&file) } + .when_reading_file(&path)?; + Ok(mmap) + } + + pub fn rename( + &self, + relative_from: impl AsRef<Path>, + relative_to: impl AsRef<Path>, + ) -> Result<(), HgError> { + let from = self.join(relative_from); + let to = self.join(relative_to); + std::fs::rename(&from, &to) + .with_context(|| IoErrorContext::RenamingFile { from, to }) + } +} + +fn fs_metadata( + path: impl AsRef<Path>, +) -> Result<Option<std::fs::Metadata>, HgError> { + let path = path.as_ref(); + match std::fs::metadata(path) { + Ok(meta) => Ok(Some(meta)), + Err(error) => match error.kind() { + // TODO: when we require a Rust version where `NotADirectory` is + // stable, invert this logic and return None for it and `NotFound` + // and propagate any other error. + ErrorKind::PermissionDenied => Err(error).with_context(|| { + IoErrorContext::ReadingMetadata(path.to_owned()) + }), + _ => Ok(None), + }, + } +} + +pub(crate) fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> { + Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir())) +} + +pub(crate) fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> { + Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file())) +}
--- a/rust/hg-cpython/Cargo.toml Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-cpython/Cargo.toml Tue Sep 28 09:40:57 2021 +0200 @@ -26,6 +26,7 @@ libc = '*' log = "0.4.8" env_logger = "0.7.1" +stable_deref_trait = "1.2.0" [dependencies.cpython] version = "0.6.0"
--- a/rust/hg-cpython/src/copy_tracing.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-cpython/src/copy_tracing.rs Tue Sep 28 09:40:57 2021 +0200 @@ -13,58 +13,7 @@ use hg::copy_tracing::CombineChangesetCopies; use hg::Revision; -use self::pybytes_with_data::PyBytesWithData; - -// Module to encapsulate private fields -mod pybytes_with_data { - use cpython::{PyBytes, Python}; - - /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice - /// that borrows it. - /// - /// Calling `PyBytes::data` requires a GIL marker but we want to access the - /// data in a thread that (ideally) does not need to acquire the GIL. - /// This type allows separating the call an the use. - pub(super) struct PyBytesWithData { - #[allow(unused)] - keep_alive: PyBytes, - - /// Borrows the buffer inside `self.keep_alive`, - /// but the borrow-checker cannot express self-referential structs. - data: *const [u8], - } - - fn require_send<T: Send>() {} - - #[allow(unused)] - fn static_assert_pybytes_is_send() { - require_send::<PyBytes>; - } - - // Safety: PyBytes is Send. Raw pointers are not by default, - // but here sending one to another thread is fine since we ensure it stays - // valid. - unsafe impl Send for PyBytesWithData {} - - impl PyBytesWithData { - pub fn new(py: Python, bytes: PyBytes) -> Self { - Self { - data: bytes.data(py), - keep_alive: bytes, - } - } - - pub fn data(&self) -> &[u8] { - // Safety: the raw pointer is valid as long as the PyBytes is still - // alive, and the returned slice borrows `self`. - unsafe { &*self.data } - } - - pub fn unwrap(self) -> PyBytes { - self.keep_alive - } - } -} +use crate::pybytes_deref::PyBytesDeref; /// Combines copies information contained into revision `revs` to build a copy /// map. @@ -123,7 +72,7 @@ // // TODO: tweak the bound? let (rev_info_sender, rev_info_receiver) = - crossbeam_channel::bounded::<RevInfo<PyBytesWithData>>(1000); + crossbeam_channel::bounded::<RevInfo<PyBytesDeref>>(1000); // This channel (going the other way around) however is unbounded. // If they were both bounded, there might potentially be deadlocks @@ -143,7 +92,7 @@ CombineChangesetCopies::new(children_count); for (rev, p1, p2, opt_bytes) in rev_info_receiver { let files = match &opt_bytes { - Some(raw) => ChangedFiles::new(raw.data()), + Some(raw) => ChangedFiles::new(raw.as_ref()), // Python None was extracted to Option::None, // meaning there was no copy data. None => ChangedFiles::new_empty(), @@ -169,7 +118,7 @@ for rev_info in revs_info { let (rev, p1, p2, opt_bytes) = rev_info?; - let opt_bytes = opt_bytes.map(|b| PyBytesWithData::new(py, b)); + let opt_bytes = opt_bytes.map(|b| PyBytesDeref::new(py, b)); // We’d prefer to avoid the child thread calling into Python code, // but this avoids a potential deadlock on the GIL if it does:
--- a/rust/hg-cpython/src/dirstate.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-cpython/src/dirstate.rs Tue Sep 28 09:40:57 2021 +0200 @@ -12,9 +12,7 @@ mod copymap; mod dirs_multiset; mod dirstate_map; -mod dispatch; mod non_normal_entries; -mod owning; mod status; use crate::{ dirstate::{ @@ -23,13 +21,11 @@ exceptions, }; use cpython::{ - exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult, - PySequence, Python, + PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult, Python, }; use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER; -use hg::{utils::hg_path::HgPathBuf, DirstateEntry, EntryState, StateMap}; +use hg::DirstateEntry; use libc::{c_char, c_int}; -use std::convert::TryFrom; // C code uses a custom `dirstate_tuple` type, checks in multiple instances // for this type, and raises a Python `Exception` if the check does not pass. @@ -52,62 +48,24 @@ py: Python, entry: &DirstateEntry, ) -> PyResult<PyObject> { - let &DirstateEntry { - state, - mode, - size, - mtime, - } = entry; // Explicitly go through u8 first, then cast to platform-specific `c_char` // because Into<u8> has a specific implementation while `as c_char` would // just do a naive enum cast. - let state_code: u8 = state.into(); - make_dirstate_item_raw(py, state_code, mode, size, mtime) -} + let state_code: u8 = entry.state().into(); -pub fn make_dirstate_item_raw( - py: Python, - state: u8, - mode: i32, - size: i32, - mtime: i32, -) -> PyResult<PyObject> { let make = make_dirstate_item_capi::retrieve(py)?; let maybe_obj = unsafe { - let ptr = make(state as c_char, mode, size, mtime); + let ptr = make( + state_code as c_char, + entry.mode(), + entry.size(), + entry.mtime(), + ); PyObject::from_owned_ptr_opt(py, ptr) }; maybe_obj.ok_or_else(|| PyErr::fetch(py)) } -pub fn extract_dirstate(py: Python, dmap: &PyDict) -> Result<StateMap, PyErr> { - dmap.items(py) - .iter() - .map(|(filename, stats)| { - let stats = stats.extract::<PySequence>(py)?; - let state = stats.get_item(py, 0)?.extract::<PyBytes>(py)?; - let state = - EntryState::try_from(state.data(py)[0]).map_err(|e| { - PyErr::new::<exc::ValueError, _>(py, e.to_string()) - })?; - let mode = stats.get_item(py, 1)?.extract(py)?; - let size = stats.get_item(py, 2)?.extract(py)?; - let mtime = stats.get_item(py, 3)?.extract(py)?; - let filename = filename.extract::<PyBytes>(py)?; - let filename = filename.data(py); - Ok(( - HgPathBuf::from(filename.to_owned()), - DirstateEntry { - state, - mode, - size, - mtime, - }, - )) - }) - .collect() -} - /// Create the module, with `__package__` given from parent pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> { let dotted_name = &format!("{}.dirstate", package);
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs Tue Sep 28 09:40:57 2021 +0200 @@ -9,19 +9,15 @@ //! `hg-core` package. use std::cell::RefCell; -use std::convert::TryInto; use cpython::{ exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult, Python, UnsafePyLeaked, }; -use crate::dirstate::extract_dirstate; use hg::{ - errors::HgError, utils::hg_path::{HgPath, HgPathBuf}, - DirsMultiset, DirsMultisetIter, DirstateError, DirstateMapError, - EntryState, + DirsMultiset, DirsMultisetIter, DirstateMapError, }; py_class!(pub class Dirs |py| { @@ -32,25 +28,11 @@ def __new__( _cls, map: PyObject, - skip: Option<PyObject> = None ) -> PyResult<Self> { - let mut skip_state: Option<EntryState> = None; - if let Some(skip) = skip { - skip_state = Some( - skip.extract::<PyBytes>(py)?.data(py)[0] - .try_into() - .map_err(|e: HgError| { - PyErr::new::<exc::ValueError, _>(py, e.to_string()) - })?, - ); - } - let inner = if let Ok(map) = map.cast_as::<PyDict>(py) { - let dirstate = extract_dirstate(py, &map)?; - let dirstate = dirstate.iter().map(|(k, v)| Ok((k, *v))); - DirsMultiset::from_dirstate(dirstate, skip_state) - .map_err(|e: DirstateError| { - PyErr::new::<exc::ValueError, _>(py, e.to_string()) - })? + let inner = if map.cast_as::<PyDict>(py).is_ok() { + let err = "pathutil.dirs() with a dict should only be used by the Python dirstatemap \ + and should not be used when Rust is enabled"; + return Err(PyErr::new::<exc::TypeError, _>(py, err.to_string())) } else { let map: Result<Vec<HgPathBuf>, PyErr> = map .iter(py)?
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Tue Sep 28 09:40:57 2021 +0200 @@ -20,19 +20,19 @@ use crate::{ dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator}, dirstate::make_dirstate_item, - dirstate::make_dirstate_item_raw, dirstate::non_normal_entries::{ NonNormalEntries, NonNormalEntriesIterator, }, - dirstate::owning::OwningDirstateMap, - parsers::dirstate_parents_to_pytuple, + pybytes_deref::PyBytesDeref, }; use hg::{ dirstate::parsers::Timestamp, dirstate::MTIME_UNSET, dirstate::SIZE_NON_NORMAL, + dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap, dirstate_tree::dispatch::DirstateMapMethods, dirstate_tree::on_disk::DirstateV2ParseError, + dirstate_tree::owning::OwningDirstateMap, revlog::Node, utils::files::normalize_case, utils::hg_path::{HgPath, HgPathBuf}, @@ -62,8 +62,13 @@ on_disk: PyBytes, ) -> PyResult<PyObject> { let (inner, parents) = if use_dirstate_tree { - let (map, parents) = OwningDirstateMap::new_v1(py, on_disk) + let on_disk = PyBytesDeref::new(py, on_disk); + let mut map = OwningDirstateMap::new_empty(on_disk); + let (on_disk, map_placeholder) = map.get_mut_pair(); + + let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk) .map_err(|e| dirstate_error(py, e))?; + *map_placeholder = actual_map; (Box::new(map) as _, parents) } else { let bytes = on_disk.data(py); @@ -72,7 +77,11 @@ (Box::new(map) as _, parents) }; let map = Self::create_instance(py, inner)?; - let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p)); + let parents = parents.map(|p| { + let p1 = PyBytes::new(py, p.p1.as_bytes()); + let p2 = PyBytes::new(py, p.p2.as_bytes()); + (p1, p2) + }); Ok((map, parents).to_py_object(py).into_object()) } @@ -86,10 +95,13 @@ let dirstate_error = |e: DirstateError| { PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e)) }; - let inner = OwningDirstateMap::new_v2( - py, on_disk, data_size, tree_metadata, + let on_disk = PyBytesDeref::new(py, on_disk); + let mut map = OwningDirstateMap::new_empty(on_disk); + let (on_disk, map_placeholder) = map.get_mut_pair(); + *map_placeholder = TreeDirstateMap::new_v2( + on_disk, data_size, tree_metadata.data(py), ).map_err(dirstate_error)?; - let map = Self::create_instance(py, Box::new(inner))?; + let map = Self::create_instance(py, Box::new(map))?; Ok(map.into_object()) } @@ -122,12 +134,12 @@ let filename = HgPath::new(f.data(py)); let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?; let state = state.data(py)[0]; - let entry = DirstateEntry { - state: state.try_into().expect("state is always valid"), - mtime: item.getattr(py, "mtime")?.extract(py)?, - size: item.getattr(py, "size")?.extract(py)?, - mode: item.getattr(py, "mode")?.extract(py)?, - }; + let entry = DirstateEntry::from_v1_data( + state.try_into().expect("state is always valid"), + item.getattr(py, "mode")?.extract(py)?, + item.getattr(py, "size")?.extract(py)?, + item.getattr(py, "mtime")?.extract(py)?, + ); self.inner(py).borrow_mut().set_v1(filename, entry); Ok(py.None()) } @@ -163,13 +175,7 @@ } else { mtime.extract(py)? }; - let entry = DirstateEntry { - // XXX Arbitrary default value since the value is determined later - state: EntryState::Normal, - mode: mode, - size: size, - mtime: mtime, - }; + let entry = DirstateEntry::new_for_add_file(mode, size, mtime); let added = added.extract::<PyBool>(py)?.is_true(); let merged = merged.extract::<PyBool>(py)?.is_true(); let from_p2 = from_p2.extract::<PyBool>(py)?.is_true(); @@ -409,7 +415,7 @@ let dict = PyDict::new(py); for item in self.inner(py).borrow_mut().iter() { let (path, entry) = item.map_err(|e| v2_error(py, e))?; - if entry.state != EntryState::Removed { + if entry.state() != EntryState::Removed { let key = normalize_case(path); let value = path; dict.set_item( @@ -599,14 +605,14 @@ Ok(dirs) } - def debug_iter(&self) -> PyResult<PyList> { + def debug_iter(&self, all: bool) -> PyResult<PyList> { let dirs = PyList::new(py, &[]); - for item in self.inner(py).borrow().debug_iter() { + for item in self.inner(py).borrow().debug_iter(all) { let (path, (state, mode, size, mtime)) = item.map_err(|e| v2_error(py, e))?; let path = PyBytes::new(py, path.as_bytes()); - let item = make_dirstate_item_raw(py, state, mode, size, mtime)?; - dirs.append(py, (path, item).to_py_object(py).into_object()) + let item = (path, state, mode, size, mtime); + dirs.append(py, item.to_py_object(py).into_object()) } Ok(dirs) }
--- a/rust/hg-cpython/src/dirstate/dispatch.rs Tue Sep 21 18:18:56 2021 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,240 +0,0 @@ -use crate::dirstate::owning::OwningDirstateMap; -use hg::dirstate::parsers::Timestamp; -use hg::dirstate_tree::dispatch::DirstateMapMethods; -use hg::dirstate_tree::on_disk::DirstateV2ParseError; -use hg::matchers::Matcher; -use hg::utils::hg_path::{HgPath, HgPathBuf}; -use hg::CopyMapIter; -use hg::DirstateEntry; -use hg::DirstateError; -use hg::DirstateParents; -use hg::DirstateStatus; -use hg::PatternFileWarning; -use hg::StateMapIter; -use hg::StatusError; -use hg::StatusOptions; -use std::path::PathBuf; - -impl DirstateMapMethods for OwningDirstateMap { - fn clear(&mut self) { - self.get_mut().clear() - } - - fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) { - self.get_mut().set_v1(filename, entry) - } - - fn add_file( - &mut self, - filename: &HgPath, - entry: DirstateEntry, - added: bool, - merged: bool, - from_p2: bool, - possibly_dirty: bool, - ) -> Result<(), DirstateError> { - self.get_mut().add_file( - filename, - entry, - added, - merged, - from_p2, - possibly_dirty, - ) - } - - fn remove_file( - &mut self, - filename: &HgPath, - in_merge: bool, - ) -> Result<(), DirstateError> { - self.get_mut().remove_file(filename, in_merge) - } - - fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> { - self.get_mut().drop_file(filename) - } - - fn clear_ambiguous_times( - &mut self, - filenames: Vec<HgPathBuf>, - now: i32, - ) -> Result<(), DirstateV2ParseError> { - self.get_mut().clear_ambiguous_times(filenames, now) - } - - fn non_normal_entries_contains( - &mut self, - key: &HgPath, - ) -> Result<bool, DirstateV2ParseError> { - self.get_mut().non_normal_entries_contains(key) - } - - fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool { - self.get_mut().non_normal_entries_remove(key) - } - - fn non_normal_entries_add(&mut self, key: &HgPath) { - self.get_mut().non_normal_entries_add(key) - } - - fn non_normal_or_other_parent_paths( - &mut self, - ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_> - { - self.get_mut().non_normal_or_other_parent_paths() - } - - fn set_non_normal_other_parent_entries(&mut self, force: bool) { - self.get_mut().set_non_normal_other_parent_entries(force) - } - - fn iter_non_normal_paths( - &mut self, - ) -> Box< - dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, - > { - self.get_mut().iter_non_normal_paths() - } - - fn iter_non_normal_paths_panic( - &self, - ) -> Box< - dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, - > { - self.get().iter_non_normal_paths_panic() - } - - fn iter_other_parent_paths( - &mut self, - ) -> Box< - dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_, - > { - self.get_mut().iter_other_parent_paths() - } - - fn has_tracked_dir( - &mut self, - directory: &HgPath, - ) -> Result<bool, DirstateError> { - self.get_mut().has_tracked_dir(directory) - } - - fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> { - self.get_mut().has_dir(directory) - } - - fn pack_v1( - &mut self, - parents: DirstateParents, - now: Timestamp, - ) -> Result<Vec<u8>, DirstateError> { - self.get_mut().pack_v1(parents, now) - } - - fn pack_v2( - &mut self, - now: Timestamp, - can_append: bool, - ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { - self.get_mut().pack_v2(now, can_append) - } - - fn status<'a>( - &'a mut self, - matcher: &'a (dyn Matcher + Sync), - root_dir: PathBuf, - ignore_files: Vec<PathBuf>, - options: StatusOptions, - ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError> - { - self.get_mut() - .status(matcher, root_dir, ignore_files, options) - } - - fn copy_map_len(&self) -> usize { - self.get().copy_map_len() - } - - fn copy_map_iter(&self) -> CopyMapIter<'_> { - self.get().copy_map_iter() - } - - fn copy_map_contains_key( - &self, - key: &HgPath, - ) -> Result<bool, DirstateV2ParseError> { - self.get().copy_map_contains_key(key) - } - - fn copy_map_get( - &self, - key: &HgPath, - ) -> Result<Option<&HgPath>, DirstateV2ParseError> { - self.get().copy_map_get(key) - } - - fn copy_map_remove( - &mut self, - key: &HgPath, - ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { - self.get_mut().copy_map_remove(key) - } - - fn copy_map_insert( - &mut self, - key: HgPathBuf, - value: HgPathBuf, - ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { - self.get_mut().copy_map_insert(key, value) - } - - fn len(&self) -> usize { - self.get().len() - } - - fn contains_key( - &self, - key: &HgPath, - ) -> Result<bool, DirstateV2ParseError> { - self.get().contains_key(key) - } - - fn get( - &self, - key: &HgPath, - ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { - self.get().get(key) - } - - fn iter(&self) -> StateMapIter<'_> { - self.get().iter() - } - - fn iter_tracked_dirs( - &mut self, - ) -> Result< - Box< - dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> - + Send - + '_, - >, - DirstateError, - > { - self.get_mut().iter_tracked_dirs() - } - - fn debug_iter( - &self, - ) -> Box< - dyn Iterator< - Item = Result< - (&HgPath, (u8, i32, i32, i32)), - DirstateV2ParseError, - >, - > + Send - + '_, - > { - self.get().debug_iter() - } -}
--- a/rust/hg-cpython/src/dirstate/owning.rs Tue Sep 21 18:18:56 2021 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,117 +0,0 @@ -use cpython::PyBytes; -use cpython::Python; -use hg::dirstate_tree::dirstate_map::DirstateMap; -use hg::DirstateError; -use hg::DirstateParents; - -/// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it -/// borrows. This is similar to the owning-ref crate. -/// -/// This is similar to [`OwningRef`] which is more limited because it -/// represents exactly one `&T` reference next to the value it borrows, as -/// opposed to a struct that may contain an arbitrary number of references in -/// arbitrarily-nested data structures. -/// -/// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html -pub(super) struct OwningDirstateMap { - /// Owned handle to a bytes buffer with a stable address. - /// - /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>. - on_disk: PyBytes, - - /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the - /// language cannot represent a lifetime referencing a sibling field. - /// This is not quite a self-referencial struct (moving this struct is not - /// a problem as it doesn’t change the address of the bytes buffer owned - /// by `PyBytes`) but touches similar borrow-checker limitations. - ptr: *mut (), -} - -impl OwningDirstateMap { - pub fn new_v1( - py: Python, - on_disk: PyBytes, - ) -> Result<(Self, Option<DirstateParents>), DirstateError> { - let bytes: &'_ [u8] = on_disk.data(py); - let (map, parents) = DirstateMap::new_v1(bytes)?; - - // Like in `bytes` above, this `'_` lifetime parameter borrows from - // the bytes buffer owned by `on_disk`. - let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map)); - - // Erase the pointed type entirely in order to erase the lifetime. - let ptr: *mut () = ptr.cast(); - - Ok((Self { on_disk, ptr }, parents)) - } - - pub fn new_v2( - py: Python, - on_disk: PyBytes, - data_size: usize, - tree_metadata: PyBytes, - ) -> Result<Self, DirstateError> { - let bytes: &'_ [u8] = on_disk.data(py); - let map = - DirstateMap::new_v2(bytes, data_size, tree_metadata.data(py))?; - - // Like in `bytes` above, this `'_` lifetime parameter borrows from - // the bytes buffer owned by `on_disk`. - let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map)); - - // Erase the pointed type entirely in order to erase the lifetime. - let ptr: *mut () = ptr.cast(); - - Ok(Self { on_disk, ptr }) - } - - pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> { - // SAFETY: We cast the type-erased pointer back to the same type it had - // in `new`, except with a different lifetime parameter. This time we - // connect the lifetime to that of `self`. This cast is valid because - // `self` owns the same `PyBytes` whose buffer `DirstateMap` - // references. That buffer has a stable memory address because the byte - // string value of a `PyBytes` is immutable. - let ptr: *mut DirstateMap<'a> = self.ptr.cast(); - // SAFETY: we dereference that pointer, connecting the lifetime of the - // new `&mut` to that of `self`. This is valid because the - // raw pointer is to a boxed value, and `self` owns that box. - unsafe { &mut *ptr } - } - - pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> { - // SAFETY: same reasoning as in `get_mut` above. - let ptr: *mut DirstateMap<'a> = self.ptr.cast(); - unsafe { &*ptr } - } -} - -impl Drop for OwningDirstateMap { - fn drop(&mut self) { - // Silence a "field is never read" warning, and demonstrate that this - // value is still alive. - let _ = &self.on_disk; - // SAFETY: this cast is the same as in `get_mut`, and is valid for the - // same reason. `self.on_disk` still exists at this point, drop glue - // will drop it implicitly after this `drop` method returns. - let ptr: *mut DirstateMap<'_> = self.ptr.cast(); - // SAFETY: `Box::from_raw` takes ownership of the box away from `self`. - // This is fine because drop glue does nothig for `*mut ()` and we’re - // in `drop`, so `get` and `get_mut` cannot be called again. - unsafe { drop(Box::from_raw(ptr)) } - } -} - -fn _static_assert_is_send<T: Send>() {} - -fn _static_assert_fields_are_send() { - _static_assert_is_send::<PyBytes>(); - _static_assert_is_send::<Box<DirstateMap<'_>>>(); -} - -// SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because -// thread-safety of raw pointers is unknown in the general case. However this -// particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we -// own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it -// is sound to mark this struct as `Send` too. -unsafe impl Send for OwningDirstateMap {}
--- a/rust/hg-cpython/src/lib.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/hg-cpython/src/lib.rs Tue Sep 28 09:40:57 2021 +0200 @@ -35,7 +35,7 @@ pub mod dirstate; pub mod discovery; pub mod exceptions; -pub mod parsers; +mod pybytes_deref; pub mod revlog; pub mod utils; @@ -58,11 +58,6 @@ m.add(py, "discovery", discovery::init_module(py, &dotted_name)?)?; m.add(py, "dirstate", dirstate::init_module(py, &dotted_name)?)?; m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?; - m.add( - py, - "parsers", - parsers::init_parsers_module(py, &dotted_name)?, - )?; m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?; Ok(()) });
--- a/rust/hg-cpython/src/parsers.rs Tue Sep 21 18:18:56 2021 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,163 +0,0 @@ -// parsers.rs -// -// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> -// -// This software may be used and distributed according to the terms of the -// GNU General Public License version 2 or any later version. - -//! Bindings for the `hg::dirstate::parsers` module provided by the -//! `hg-core` package. -//! -//! From Python, this will be seen as `mercurial.rustext.parsers` -use cpython::{ - exc, PyBytes, PyDict, PyErr, PyInt, PyModule, PyResult, PyTuple, Python, - PythonObject, ToPyObject, -}; -use hg::{ - dirstate::parsers::Timestamp, pack_dirstate, parse_dirstate, - utils::hg_path::HgPathBuf, DirstateEntry, DirstateParents, FastHashMap, - PARENT_SIZE, -}; -use std::convert::TryInto; - -use crate::dirstate::{extract_dirstate, make_dirstate_item}; - -fn parse_dirstate_wrapper( - py: Python, - dmap: PyDict, - copymap: PyDict, - st: PyBytes, -) -> PyResult<PyTuple> { - match parse_dirstate(st.data(py)) { - Ok((parents, entries, copies)) => { - let dirstate_map: FastHashMap<HgPathBuf, DirstateEntry> = entries - .into_iter() - .map(|(path, entry)| (path.to_owned(), entry)) - .collect(); - let copy_map: FastHashMap<HgPathBuf, HgPathBuf> = copies - .into_iter() - .map(|(path, copy)| (path.to_owned(), copy.to_owned())) - .collect(); - - for (filename, entry) in &dirstate_map { - dmap.set_item( - py, - PyBytes::new(py, filename.as_bytes()), - make_dirstate_item(py, entry)?, - )?; - } - for (path, copy_path) in copy_map { - copymap.set_item( - py, - PyBytes::new(py, path.as_bytes()), - PyBytes::new(py, copy_path.as_bytes()), - )?; - } - Ok(dirstate_parents_to_pytuple(py, parents)) - } - Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())), - } -} - -fn pack_dirstate_wrapper( - py: Python, - dmap: PyDict, - copymap: PyDict, - pl: PyTuple, - now: PyInt, -) -> PyResult<PyBytes> { - let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?; - let p1: &[u8] = p1.data(py); - let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?; - let p2: &[u8] = p2.data(py); - - let mut dirstate_map = extract_dirstate(py, &dmap)?; - - let copies: Result<FastHashMap<HgPathBuf, HgPathBuf>, PyErr> = copymap - .items(py) - .iter() - .map(|(key, value)| { - Ok(( - HgPathBuf::from_bytes(key.extract::<PyBytes>(py)?.data(py)), - HgPathBuf::from_bytes(value.extract::<PyBytes>(py)?.data(py)), - )) - }) - .collect(); - - if p1.len() != PARENT_SIZE || p2.len() != PARENT_SIZE { - return Err(PyErr::new::<exc::ValueError, _>( - py, - "expected a 20-byte hash".to_string(), - )); - } - - match pack_dirstate( - &mut dirstate_map, - &copies?, - DirstateParents { - p1: p1.try_into().unwrap(), - p2: p2.try_into().unwrap(), - }, - Timestamp(now.as_object().extract::<i64>(py)?), - ) { - Ok(packed) => { - for (filename, entry) in dirstate_map.iter() { - dmap.set_item( - py, - PyBytes::new(py, filename.as_bytes()), - make_dirstate_item(py, &entry)?, - )?; - } - Ok(PyBytes::new(py, &packed)) - } - Err(error) => { - Err(PyErr::new::<exc::ValueError, _>(py, error.to_string())) - } - } -} - -/// Create the module, with `__package__` given from parent -pub fn init_parsers_module(py: Python, package: &str) -> PyResult<PyModule> { - let dotted_name = &format!("{}.parsers", package); - let m = PyModule::new(py, dotted_name)?; - - m.add(py, "__package__", package)?; - m.add(py, "__doc__", "Parsers - Rust implementation")?; - - m.add( - py, - "parse_dirstate", - py_fn!( - py, - parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes) - ), - )?; - m.add( - py, - "pack_dirstate", - py_fn!( - py, - pack_dirstate_wrapper( - dmap: PyDict, - copymap: PyDict, - pl: PyTuple, - now: PyInt - ) - ), - )?; - - let sys = PyModule::import(py, "sys")?; - let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?; - sys_modules.set_item(py, dotted_name, &m)?; - - Ok(m) -} - -pub(crate) fn dirstate_parents_to_pytuple( - py: Python, - parents: &DirstateParents, -) -> PyTuple { - let p1 = PyBytes::new(py, parents.p1.as_bytes()); - let p2 = PyBytes::new(py, parents.p2.as_bytes()); - (p1, p2).to_py_object(py) -}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-cpython/src/pybytes_deref.rs Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,56 @@ +use cpython::{PyBytes, Python}; +use stable_deref_trait::StableDeref; + +/// Safe abstraction over a `PyBytes` together with the `&[u8]` slice +/// that borrows it. Implements `Deref<Target = [u8]>`. +/// +/// Calling `PyBytes::data` requires a GIL marker but we want to access the +/// data in a thread that (ideally) does not need to acquire the GIL. +/// This type allows separating the call an the use. +/// +/// It also enables using a (wrapped) `PyBytes` in GIL-unaware generic code. +pub struct PyBytesDeref { + #[allow(unused)] + keep_alive: PyBytes, + + /// Borrows the buffer inside `self.keep_alive`, + /// but the borrow-checker cannot express self-referential structs. + data: *const [u8], +} + +impl PyBytesDeref { + pub fn new(py: Python, bytes: PyBytes) -> Self { + Self { + data: bytes.data(py), + keep_alive: bytes, + } + } + + pub fn unwrap(self) -> PyBytes { + self.keep_alive + } +} + +impl std::ops::Deref for PyBytesDeref { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + // Safety: the raw pointer is valid as long as the PyBytes is still + // alive, and the returned slice borrows `self`. + unsafe { &*self.data } + } +} + +unsafe impl StableDeref for PyBytesDeref {} + +fn require_send<T: Send>() {} + +#[allow(unused)] +fn static_assert_pybytes_is_send() { + require_send::<PyBytes>; +} + +// Safety: PyBytes is Send. Raw pointers are not by default, +// but here sending one to another thread is fine since we ensure it stays +// valid. +unsafe impl Send for PyBytesDeref {}
--- a/rust/rhg/src/commands/status.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/rhg/src/commands/status.rs Tue Sep 28 09:40:57 2021 +0200 @@ -9,22 +9,14 @@ use crate::ui::Ui; use clap::{Arg, SubCommand}; use hg; -use hg::dirstate_tree::dirstate_map::DirstateMap; -use hg::dirstate_tree::on_disk; -use hg::errors::HgResultExt; -use hg::errors::IoResultExt; +use hg::dirstate_tree::dispatch::DirstateMapMethods; +use hg::errors::HgError; +use hg::manifest::Manifest; use hg::matchers::AlwaysMatcher; -use hg::operations::cat; use hg::repo::Repo; -use hg::revlog::node::Node; use hg::utils::hg_path::{hg_path_to_os_string, HgPath}; -use hg::StatusError; use hg::{HgPathCow, StatusOptions}; use log::{info, warn}; -use std::convert::TryInto; -use std::fs; -use std::io::BufReader; -use std::io::Read; pub const HELP_TEXT: &str = " Show changed files in the working directory @@ -166,40 +158,7 @@ }; let repo = invocation.repo?; - let dirstate_data_mmap; - let (mut dmap, parents) = if repo.has_dirstate_v2() { - let docket_data = - repo.hg_vfs().read("dirstate").io_not_found_as_none()?; - let parents; - let dirstate_data; - let data_size; - let docket; - let tree_metadata; - if let Some(docket_data) = &docket_data { - docket = on_disk::read_docket(docket_data)?; - tree_metadata = docket.tree_metadata(); - parents = Some(docket.parents()); - data_size = docket.data_size(); - dirstate_data_mmap = repo - .hg_vfs() - .mmap_open(docket.data_filename()) - .io_not_found_as_none()?; - dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b""); - } else { - parents = None; - tree_metadata = b""; - data_size = 0; - dirstate_data = b""; - } - let dmap = - DirstateMap::new_v2(dirstate_data, data_size, tree_metadata)?; - (dmap, parents) - } else { - dirstate_data_mmap = - repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?; - let dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b""); - DirstateMap::new_v1(dirstate_data)? - }; + let mut dmap = repo.dirstate_map_mut()?; let options = StatusOptions { // TODO should be provided by the dirstate parsing and @@ -216,8 +175,7 @@ collect_traversed_dirs: false, }; let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded - let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status( - &mut dmap, + let (mut ds_status, pattern_warnings) = dmap.status( &AlwaysMatcher, repo.working_directory_path().to_owned(), vec![ignore_file], @@ -239,16 +197,12 @@ if !ds_status.unsure.is_empty() && (display_states.modified || display_states.clean) { - let p1: Node = parents - .expect( - "Dirstate with no parents should not list any file to - be rechecked for modifications", - ) - .p1 - .into(); - let p1_hex = format!("{:x}", p1); + let p1 = repo.dirstate_parents()?.p1; + let manifest = repo.manifest_for_node(p1).map_err(|e| { + CommandError::from((e, &*format!("{:x}", p1.short()))) + })?; for to_check in ds_status.unsure { - if cat_file_is_modified(repo, &to_check, &p1_hex)? { + if cat_file_is_modified(repo, &manifest, &to_check)? { if display_states.modified { ds_status.modified.push(to_check); } @@ -309,39 +263,19 @@ /// TODO: detect permission bits and similar metadata modifications fn cat_file_is_modified( repo: &Repo, + manifest: &Manifest, hg_path: &HgPath, - rev: &str, -) -> Result<bool, CommandError> { - // TODO CatRev expects &[HgPathBuf], something like - // &[impl Deref<HgPath>] would be nicer and should avoid the copy - let path_bufs = [hg_path.into()]; - // TODO IIUC CatRev returns a simple Vec<u8> for all files - // being able to tell them apart as (path, bytes) would be nicer - // and OPTIM would allow manifest resolution just once. - let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?; +) -> Result<bool, HgError> { + let file_node = manifest + .find_file(hg_path)? + .expect("ambgious file not in p1"); + let filelog = repo.filelog(hg_path)?; + let filelog_entry = filelog.data_for_node(file_node).map_err(|_| { + HgError::corrupted("filelog missing node from manifest") + })?; + let contents_in_p1 = filelog_entry.data()?; - let fs_path = repo - .working_directory_vfs() - .join(hg_path_to_os_string(hg_path).expect("HgPath conversion")); - let hg_data_len: u64 = match output.concatenated.len().try_into() { - Ok(v) => v, - Err(_) => { - // conversion of data length to u64 failed, - // good luck for any file to have this content - return Ok(true); - } - }; - let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?; - if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len - { - return Ok(true); - } - for (fs_byte, hg_byte) in - BufReader::new(fobj).bytes().zip(output.concatenated) - { - if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte { - return Ok(true); - } - } - Ok(false) + let fs_path = hg_path_to_os_string(hg_path).expect("HgPath conversion"); + let fs_contents = repo.working_directory_vfs().read(fs_path)?; + return Ok(contents_in_p1 == &*fs_contents); }
--- a/rust/rhg/src/main.rs Tue Sep 21 18:18:56 2021 +0200 +++ b/rust/rhg/src/main.rs Tue Sep 28 09:40:57 2021 +0200 @@ -567,11 +567,10 @@ unsupported.remove(supported); } - if let Some(ignored_list) = - config.get_simple_list(b"rhg", b"ignored-extensions") + if let Some(ignored_list) = config.get_list(b"rhg", b"ignored-extensions") { for ignored in ignored_list { - unsupported.remove(ignored); + unsupported.remove(ignored.as_slice()); } }
--- a/tests/fakedirstatewritetime.py Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/fakedirstatewritetime.py Tue Sep 28 09:40:57 2021 +0200 @@ -34,7 +34,7 @@ ) parsers = policy.importmod('parsers') -rustmod = policy.importrust('parsers') +has_rust_dirstate = policy.importrust('dirstate') is not None def pack_dirstate(fakenow, orig, dmap, copymap, pl, now): @@ -63,7 +63,7 @@ # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0] - if rustmod is not None: + if has_rust_dirstate: # The Rust implementation does not use public parse/pack dirstate # to prevent conversion round-trips orig_dirstatemap_write = dirstatemapmod.dirstatemap.write @@ -85,7 +85,7 @@ finally: orig_module.pack_dirstate = orig_pack_dirstate dirstate._getfsnow = orig_dirstate_getfsnow - if rustmod is not None: + if has_rust_dirstate: dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
--- a/tests/library-infinitepush.sh Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/library-infinitepush.sh Tue Sep 28 09:40:57 2021 +0200 @@ -14,8 +14,6 @@ cat >> $HGRCPATH << EOF [extensions] infinitepush= -[ui] -ssh = "$PYTHON" "$TESTDIR/dummyssh" [infinitepush] branchpattern=re:scratch/.* EOF
--- a/tests/narrow-library.sh Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/narrow-library.sh Tue Sep 28 09:40:57 2021 +0200 @@ -1,8 +1,6 @@ cat >> $HGRCPATH <<EOF [extensions] narrow= -[ui] -ssh="$PYTHON" "$RUNTESTDIR/dummyssh" [experimental] changegroup3 = True EOF
--- a/tests/remotefilelog-library.sh Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/remotefilelog-library.sh Tue Sep 28 09:40:57 2021 +0200 @@ -7,8 +7,6 @@ remotefilelog= rebase= strip= -[ui] -ssh="$PYTHON" "$TESTDIR/dummyssh" [server] preferuncompressed=True [experimental]
--- a/tests/run-tests.py Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/run-tests.py Tue Sep 28 09:40:57 2021 +0200 @@ -1554,6 +1554,8 @@ hgrc.write(b'merge = internal:merge\n') hgrc.write(b'mergemarkers = detailed\n') hgrc.write(b'promptecho = True\n') + dummyssh = os.path.join(self._testdir, b'dummyssh') + hgrc.write(b'ssh = "%s" "%s"\n' % (PYTHON, dummyssh)) hgrc.write(b'timeout.warn=15\n') hgrc.write(b'[chgserver]\n') hgrc.write(b'idletimeout=60\n')
--- a/tests/simplestorerepo.py Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/simplestorerepo.py Tue Sep 28 09:40:57 2021 +0200 @@ -665,20 +665,24 @@ class simplestore(store.encodedstore): - def datafiles(self): + def datafiles(self, undecodable=None): for x in super(simplestore, self).datafiles(): yield x # Supplement with non-revlog files. extrafiles = self._walk('data', True, filefilter=issimplestorefile) - for unencoded, encoded, size in extrafiles: + for f1, size in extrafiles: try: - unencoded = store.decodefilename(unencoded) + f2 = store.decodefilename(f1) except KeyError: - unencoded = None + if undecodable is None: + raise error.StorageError(b'undecodable revlog name %s' % f1) + else: + undecodable.append(f1) + continue - yield unencoded, encoded, size + yield f2, size def reposetup(ui, repo):
--- a/tests/test-addremove-similar.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-addremove-similar.t Tue Sep 28 09:40:57 2021 +0200 @@ -131,13 +131,13 @@ $ hg addremove -s foo abort: similarity must be a number - [255] + [10] $ hg addremove -s -1 abort: similarity must be between 0 and 100 - [255] + [10] $ hg addremove -s 1e6 abort: similarity must be between 0 and 100 - [255] + [10] $ cd ..
--- a/tests/test-annotate.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-annotate.t Tue Sep 28 09:40:57 2021 +0200 @@ -455,7 +455,7 @@ $ hg ann nosuchfile abort: nosuchfile: no such file in rev e9e6b4fa872f - [255] + [10] annotate file without '\n' on last line
--- a/tests/test-basic.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-basic.t Tue Sep 28 09:40:57 2021 +0200 @@ -15,6 +15,7 @@ ui.merge=internal:merge ui.mergemarkers=detailed ui.promptecho=True + ui.ssh=* (glob) ui.timeout.warn=15 web.address=localhost web\.ipv6=(?:True|False) (re)
--- a/tests/test-batching.py Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-batching.py Tue Sep 28 09:40:57 2021 +0200 @@ -214,14 +214,11 @@ mangle(two), ), ] - encoded_res_future = wireprotov1peer.future() - yield encoded_args, encoded_res_future - yield unmangle(encoded_res_future.value) + return encoded_args, unmangle @wireprotov1peer.batchable def bar(self, b, a): - encresref = wireprotov1peer.future() - yield [ + return [ ( b'b', mangle(b), @@ -230,8 +227,7 @@ b'a', mangle(a), ), - ], encresref - yield unmangle(encresref.value) + ], unmangle # greet is coded directly. It therefore does not support batching. If it # does appear in a batch, the batch is split around greet, and the call to
--- a/tests/test-bookmarks-corner-case.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-bookmarks-corner-case.t Tue Sep 28 09:40:57 2021 +0200 @@ -12,16 +12,6 @@ node known to the changelog. If the cache invalidation between these two bits goes wrong, bookmark can be dropped. -global setup ------------- - - $ cat >> $HGRCPATH << EOF - > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" - > [server] - > concurrent-push-mode=check-related - > EOF - Setup -----
--- a/tests/test-bookmarks-pushpull.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-bookmarks-pushpull.t Tue Sep 28 09:40:57 2021 +0200 @@ -490,6 +490,30 @@ Y 0:4e3505fd9583 Z 1:0d2164f0ce0d +mirroring bookmarks + + $ hg book + @ 1:9b140be10808 + @foo 2:0d2164f0ce0d + X 1:9b140be10808 + X@foo 2:0d2164f0ce0d + Y 0:4e3505fd9583 + Z 2:0d2164f0ce0d + foo -1:000000000000 + * foobar 1:9b140be10808 + $ cp .hg/bookmarks .hg/bookmarks.bak + $ hg book -d X + $ hg pull ../a --config bookmarks.mirror=true + pulling from ../a + searching for changes + no changes found + $ hg book + @ 2:0d2164f0ce0d + X 2:0d2164f0ce0d + Y 0:4e3505fd9583 + Z 2:0d2164f0ce0d + $ mv .hg/bookmarks.bak .hg/bookmarks + explicit pull should overwrite the local version (issue4439) $ hg update -r X @@ -1142,8 +1166,6 @@ > local=../issue4455-dest/ > ssh=ssh://user@dummy/issue4455-dest > http=http://localhost:$HGPORT/ - > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" > EOF $ cat >> ../issue4455-dest/.hg/hgrc << EOF > [hooks] @@ -1270,7 +1292,6 @@ $ cat << EOF >> $HGRCPATH > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" > [server] > bookmarks-pushkey-compat = yes > EOF
--- a/tests/test-bundle2-exchange.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-bundle2-exchange.t Tue Sep 28 09:40:57 2021 +0200 @@ -28,8 +28,6 @@ > evolution.createmarkers=True > evolution.exchange=True > bundle2-output-capture=True - > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" > [command-templates] > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline} > [web] @@ -922,10 +920,6 @@ Test lazily acquiring the lock during unbundle $ cp $TESTTMP/hgrc.orig $HGRCPATH - $ cat >> $HGRCPATH <<EOF - > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" - > EOF $ cat >> $TESTTMP/locktester.py <<EOF > import os
--- a/tests/test-bundle2-format.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-bundle2-format.t Tue Sep 28 09:40:57 2021 +0200 @@ -233,8 +233,6 @@ > bundle2=$TESTTMP/bundle2.py > [experimental] > evolution.createmarkers=True - > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" > [command-templates] > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline} > [web]
--- a/tests/test-bundle2-pushback.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-bundle2-pushback.t Tue Sep 28 09:40:57 2021 +0200 @@ -37,7 +37,6 @@ $ cat >> $HGRCPATH <<EOF > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" > username = nobody <no.reply@example.com> > > [alias]
--- a/tests/test-bundle2-remote-changegroup.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-bundle2-remote-changegroup.t Tue Sep 28 09:40:57 2021 +0200 @@ -94,8 +94,6 @@ $ cat dumb.pid >> $DAEMON_PIDS $ cat >> $HGRCPATH << EOF - > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" > [command-templates] > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline} > EOF
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-clone-stream.t Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,904 @@ +#require serve no-reposimplestore no-chg + +#testcases stream-legacy stream-bundle2 + +#if stream-legacy + $ cat << EOF >> $HGRCPATH + > [server] + > bundle2.stream = no + > EOF +#endif + +Initialize repository +the status call is to check for issue5130 + + $ hg init server + $ cd server + $ touch foo + $ hg -q commit -A -m initial + >>> for i in range(1024): + ... with open(str(i), 'wb') as fh: + ... fh.write(b"%d" % i) and None + $ hg -q commit -A -m 'add a lot of files' + $ hg st + +add files with "tricky" name: + + $ echo foo > 00changelog.i + $ echo foo > 00changelog.d + $ echo foo > 00changelog.n + $ echo foo > 00changelog-ab349180a0405010.nd + $ echo foo > 00manifest.i + $ echo foo > 00manifest.d + $ echo foo > foo.i + $ echo foo > foo.d + $ echo foo > foo.n + $ echo foo > undo.py + $ echo foo > undo.i + $ echo foo > undo.d + $ echo foo > undo.n + $ echo foo > undo.foo.i + $ echo foo > undo.foo.d + $ echo foo > undo.foo.n + $ echo foo > undo.babar + $ mkdir savanah + $ echo foo > savanah/foo.i + $ echo foo > savanah/foo.d + $ echo foo > savanah/foo.n + $ echo foo > savanah/undo.py + $ echo foo > savanah/undo.i + $ echo foo > savanah/undo.d + $ echo foo > savanah/undo.n + $ echo foo > savanah/undo.foo.i + $ echo foo > savanah/undo.foo.d + $ echo foo > savanah/undo.foo.n + $ echo foo > savanah/undo.babar + $ mkdir data + $ echo foo > data/foo.i + $ echo foo > data/foo.d + $ echo foo > data/foo.n + $ echo foo > data/undo.py + $ echo foo > data/undo.i + $ echo foo > data/undo.d + $ echo foo > data/undo.n + $ echo foo > data/undo.foo.i + $ echo foo > data/undo.foo.d + $ echo foo > data/undo.foo.n + $ echo foo > data/undo.babar + $ mkdir meta + $ echo foo > meta/foo.i + $ echo foo > meta/foo.d + $ echo foo > meta/foo.n + $ echo foo > meta/undo.py + $ echo foo > meta/undo.i + $ echo foo > meta/undo.d + $ echo foo > meta/undo.n + $ echo foo > meta/undo.foo.i + $ echo foo > meta/undo.foo.d + $ echo foo > meta/undo.foo.n + $ echo foo > meta/undo.babar + $ mkdir store + $ echo foo > store/foo.i + $ echo foo > store/foo.d + $ echo foo > store/foo.n + $ echo foo > store/undo.py + $ echo foo > store/undo.i + $ echo foo > store/undo.d + $ echo foo > store/undo.n + $ echo foo > store/undo.foo.i + $ echo foo > store/undo.foo.d + $ echo foo > store/undo.foo.n + $ echo foo > store/undo.babar + +Name with special characters + + $ echo foo > store/CélesteVille_is_a_Capital_City + +name causing issue6581 + + $ mkdir --parents container/isam-build-centos7/ + $ touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch + +Add all that + + $ hg add . + adding 00changelog-ab349180a0405010.nd + adding 00changelog.d + adding 00changelog.i + adding 00changelog.n + adding 00manifest.d + adding 00manifest.i + adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch + adding data/foo.d + adding data/foo.i + adding data/foo.n + adding data/undo.babar + adding data/undo.d + adding data/undo.foo.d + adding data/undo.foo.i + adding data/undo.foo.n + adding data/undo.i + adding data/undo.n + adding data/undo.py + adding foo.d + adding foo.i + adding foo.n + adding meta/foo.d + adding meta/foo.i + adding meta/foo.n + adding meta/undo.babar + adding meta/undo.d + adding meta/undo.foo.d + adding meta/undo.foo.i + adding meta/undo.foo.n + adding meta/undo.i + adding meta/undo.n + adding meta/undo.py + adding savanah/foo.d + adding savanah/foo.i + adding savanah/foo.n + adding savanah/undo.babar + adding savanah/undo.d + adding savanah/undo.foo.d + adding savanah/undo.foo.i + adding savanah/undo.foo.n + adding savanah/undo.i + adding savanah/undo.n + adding savanah/undo.py + adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc) + adding store/foo.d + adding store/foo.i + adding store/foo.n + adding store/undo.babar + adding store/undo.d + adding store/undo.foo.d + adding store/undo.foo.i + adding store/undo.foo.n + adding store/undo.i + adding store/undo.n + adding store/undo.py + adding undo.babar + adding undo.d + adding undo.foo.d + adding undo.foo.i + adding undo.foo.n + adding undo.i + adding undo.n + adding undo.py + $ hg ci -m 'add files with "tricky" name' + $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid + $ cat hg.pid > $DAEMON_PIDS + $ cd .. + +Check local clone +================== + +The logic is close enough of uncompressed. +This is present here to reuse the testing around file with "special" names. + + $ hg clone server local-clone + updating to branch default + 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved + +Check that the clone went well + + $ hg verify -R local-clone + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 1088 changes to 1088 files + +Check uncompressed +================== + +Cannot stream clone when server.uncompressed is set + + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out' + 200 Script output follows + + 1 + +#if stream-legacy + $ hg debugcapabilities http://localhost:$HGPORT + Main capabilities: + batch + branchmap + $USUAL_BUNDLE2_CAPS_SERVER$ + changegroupsubset + compression=$BUNDLE2_COMPRESSIONS$ + getbundle + httpheader=1024 + httpmediatype=0.1rx,0.1tx,0.2tx + known + lookup + pushkey + unbundle=HG10GZ,HG10BZ,HG10UN + unbundlehash + Bundle2 capabilities: + HG20 + bookmarks + changegroup + 01 + 02 + checkheads + related + digests + md5 + sha1 + sha512 + error + abort + unsupportedcontent + pushraced + pushkey + hgtagsfnodes + listkeys + phases + heads + pushkey + remote-changegroup + http + https + + $ hg clone --stream -U http://localhost:$HGPORT server-disabled + warning: stream clone requested but server has them disabled + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 1088 changes to 1088 files + new changesets 96ee1d7354c4:5223b5e3265f + + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" + 200 Script output follows + content-type: application/mercurial-0.2 + + + $ f --size body --hexdump --bytes 100 + body: size=232 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| + 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| + 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| + 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| + 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| + 0060: 69 73 20 66 |is f| + +#endif +#if stream-bundle2 + $ hg debugcapabilities http://localhost:$HGPORT + Main capabilities: + batch + branchmap + $USUAL_BUNDLE2_CAPS_SERVER$ + changegroupsubset + compression=$BUNDLE2_COMPRESSIONS$ + getbundle + httpheader=1024 + httpmediatype=0.1rx,0.1tx,0.2tx + known + lookup + pushkey + unbundle=HG10GZ,HG10BZ,HG10UN + unbundlehash + Bundle2 capabilities: + HG20 + bookmarks + changegroup + 01 + 02 + checkheads + related + digests + md5 + sha1 + sha512 + error + abort + unsupportedcontent + pushraced + pushkey + hgtagsfnodes + listkeys + phases + heads + pushkey + remote-changegroup + http + https + + $ hg clone --stream -U http://localhost:$HGPORT server-disabled + warning: stream clone requested but server has them disabled + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 1088 changes to 1088 files + new changesets 96ee1d7354c4:5223b5e3265f + + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" + 200 Script output follows + content-type: application/mercurial-0.2 + + + $ f --size body --hexdump --bytes 100 + body: size=232 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| + 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| + 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| + 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| + 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| + 0060: 69 73 20 66 |is f| + +#endif + + $ killdaemons.py + $ cd server + $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt + $ cat hg.pid > $DAEMON_PIDS + $ cd .. + +Basic clone + +#if stream-legacy + $ hg clone --stream -U http://localhost:$HGPORT clone1 + streaming all changes + 1090 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1090 files to transfer, 98.8 KB of data (zstd !) + transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) + searching for changes + no changes found + $ cat server/errors.txt +#endif +#if stream-bundle2 + $ hg clone --stream -U http://localhost:$HGPORT clone1 + streaming all changes + 1093 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1093 files to transfer, 98.9 KB of data (zstd !) + transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) + + $ ls -1 clone1/.hg/cache + branch2-base + branch2-immutable + branch2-served + branch2-served.hidden + branch2-visible + branch2-visible-hidden + rbc-names-v1 + rbc-revs-v1 + tags2 + tags2-served + $ cat server/errors.txt +#endif + +getbundle requests with stream=1 are uncompressed + + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" + 200 Script output follows + content-type: application/mercurial-0.2 + + +#if no-zstd no-rust + $ f --size --hex --bytes 256 body + body: size=119153 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| + 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10| + 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109| + 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| + 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| + 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| + 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| + 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| + 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| + 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| + 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| + 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,| + 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............| + 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan| + 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0| +#endif +#if zstd no-rust + $ f --size --hex --bytes 256 body + body: size=116340 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| + 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10| + 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| + 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| + 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| + 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| + 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres| + 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl| + 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev| + 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s| + 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......| + 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................| + 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.| + 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..| + 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed| +#endif +#if zstd rust no-dirstate-v2 + $ f --size --hex --bytes 256 body + body: size=116361 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| + 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10| + 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| + 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| + 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| + 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| + 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod| + 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co| + 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2| + 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| + 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| + 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| + 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| + 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| + 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| +#endif +#if zstd dirstate-v2 + $ f --size --hex --bytes 256 body + body: size=109549 + 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| + 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| + 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95| + 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| + 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote| + 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs| + 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach| + 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta| + 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no| + 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c| + 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%| + 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| + 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| + 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| + 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| + 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| +#endif + +--uncompressed is an alias to --stream + +#if stream-legacy + $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed + streaming all changes + 1090 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1090 files to transfer, 98.8 KB of data (zstd !) + transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) + searching for changes + no changes found +#endif +#if stream-bundle2 + $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed + streaming all changes + 1093 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1093 files to transfer, 98.9 KB of data (zstd !) + transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) +#endif + +Clone with background file closing enabled + +#if stream-legacy + $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding + using http://localhost:$HGPORT/ + sending capabilities command + sending branchmap command + streaming all changes + sending stream_out command + 1090 files to transfer, 102 KB of data (no-zstd !) + 1090 files to transfer, 98.8 KB of data (zstd !) + starting 4 threads for background file closing + updating the branch cache + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) + query 1; heads + sending batch command + searching for changes + all remote heads known locally + no changes found + sending getbundle command + bundle2-input-bundle: with-transaction + bundle2-input-part: "listkeys" (params: 1 mandatory) supported + bundle2-input-part: "phase-heads" supported + bundle2-input-part: total payload size 24 + bundle2-input-bundle: 2 parts total + checking for updated bookmarks + updating the branch cache + (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) +#endif +#if stream-bundle2 + $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding + using http://localhost:$HGPORT/ + sending capabilities command + query 1; heads + sending batch command + streaming all changes + sending getbundle command + bundle2-input-bundle: with-transaction + bundle2-input-part: "stream2" (params: 3 mandatory) supported + applying stream bundle + 1093 files to transfer, 102 KB of data (no-zstd !) + 1093 files to transfer, 98.9 KB of data (zstd !) + starting 4 threads for background file closing + starting 4 threads for background file closing + updating the branch cache + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + bundle2-input-part: total payload size 118984 (no-zstd !) + transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) + bundle2-input-part: total payload size 116145 (zstd !) + bundle2-input-part: "listkeys" (params: 1 mandatory) supported + bundle2-input-bundle: 2 parts total + checking for updated bookmarks + updating the branch cache + (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob) +#endif + +Cannot stream clone when there are secret changesets + + $ hg -R server phase --force --secret -r tip + $ hg clone --stream -U http://localhost:$HGPORT secret-denied + warning: stream clone requested but server has them disabled + requesting all changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1025 changes to 1025 files + new changesets 96ee1d7354c4:c17445101a72 + + $ killdaemons.py + +Streaming of secrets can be overridden by server config + + $ cd server + $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid + $ cat hg.pid > $DAEMON_PIDS + $ cd .. + +#if stream-legacy + $ hg clone --stream -U http://localhost:$HGPORT secret-allowed + streaming all changes + 1090 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1090 files to transfer, 98.8 KB of data (zstd !) + transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) + searching for changes + no changes found +#endif +#if stream-bundle2 + $ hg clone --stream -U http://localhost:$HGPORT secret-allowed + streaming all changes + 1093 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1093 files to transfer, 98.9 KB of data (zstd !) + transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) +#endif + + $ killdaemons.py + +Verify interaction between preferuncompressed and secret presence + + $ cd server + $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid + $ cat hg.pid > $DAEMON_PIDS + $ cd .. + + $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret + requesting all changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1025 changes to 1025 files + new changesets 96ee1d7354c4:c17445101a72 + + $ killdaemons.py + +Clone not allowed when full bundles disabled and can't serve secrets + + $ cd server + $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid + $ cat hg.pid > $DAEMON_PIDS + $ cd .. + + $ hg clone --stream http://localhost:$HGPORT secret-full-disabled + warning: stream clone requested but server has them disabled + requesting all changes + remote: abort: server has pull-based clones disabled + abort: pull failed on remote + (remove --pull if specified or upgrade Mercurial) + [100] + +Local stream clone with secrets involved +(This is just a test over behavior: if you have access to the repo's files, +there is no security so it isn't important to prevent a clone here.) + + $ hg clone -U --stream server local-secret + warning: stream clone requested but server has them disabled + requesting all changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1025 changes to 1025 files + new changesets 96ee1d7354c4:c17445101a72 + +Stream clone while repo is changing: + + $ mkdir changing + $ cd changing + +extension for delaying the server process so we reliably can modify the repo +while cloning + + $ cat > stream_steps.py <<EOF + > import os + > import sys + > from mercurial import ( + > encoding, + > extensions, + > streamclone, + > testing, + > ) + > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1'] + > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2'] + > + > def _test_sync_point_walk_1(orig, repo): + > testing.write_file(WALKED_FILE_1) + > + > def _test_sync_point_walk_2(orig, repo): + > assert repo._currentlock(repo._lockref) is None + > testing.wait_file(WALKED_FILE_2) + > + > extensions.wrapfunction( + > streamclone, + > '_test_sync_point_walk_1', + > _test_sync_point_walk_1 + > ) + > extensions.wrapfunction( + > streamclone, + > '_test_sync_point_walk_2', + > _test_sync_point_walk_2 + > ) + > EOF + +prepare repo with small and big file to cover both code paths in emitrevlogdata + + $ hg init repo + $ touch repo/f1 + $ $TESTDIR/seq.py 50000 > repo/f2 + $ hg -R repo ci -Aqm "0" + $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1" + $ export HG_TEST_STREAM_WALKED_FILE_1 + $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2" + $ export HG_TEST_STREAM_WALKED_FILE_2 + $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3" + $ export HG_TEST_STREAM_WALKED_FILE_3 +# $ cat << EOF >> $HGRCPATH +# > [hooks] +# > pre-clone=rm -f "$TESTTMP/sync_file_walked_*" +# > EOF + $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py" + $ cat hg.pid >> $DAEMON_PIDS + +clone while modifying the repo between stating file with write lock and +actually serving file content + + $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") & + $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1 + $ echo >> repo/f1 + $ echo >> repo/f2 + $ hg -R repo ci -m "1" --config ui.timeout.warn=-1 + $ touch $HG_TEST_STREAM_WALKED_FILE_2 + $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3 + $ hg -R clone id + 000000000000 + $ cat errors.log + $ cd .. + +Stream repository with bookmarks +-------------------------------- + +(revert introduction of secret changeset) + + $ hg -R server phase --draft 'secret()' + +add a bookmark + + $ hg -R server bookmark -r tip some-bookmark + +clone it + +#if stream-legacy + $ hg clone --stream http://localhost:$HGPORT with-bookmarks + streaming all changes + 1090 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1090 files to transfer, 98.8 KB of data (zstd !) + transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) + searching for changes + no changes found + updating to branch default + 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved +#endif +#if stream-bundle2 + $ hg clone --stream http://localhost:$HGPORT with-bookmarks + streaming all changes + 1096 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1096 files to transfer, 99.1 KB of data (zstd !) + transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) + updating to branch default + 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved +#endif + $ hg verify -R with-bookmarks + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 1088 changes to 1088 files + $ hg -R with-bookmarks bookmarks + some-bookmark 2:5223b5e3265f + +Stream repository with phases +----------------------------- + +Clone as publishing + + $ hg -R server phase -r 'all()' + 0: draft + 1: draft + 2: draft + +#if stream-legacy + $ hg clone --stream http://localhost:$HGPORT phase-publish + streaming all changes + 1090 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1090 files to transfer, 98.8 KB of data (zstd !) + transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) + searching for changes + no changes found + updating to branch default + 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved +#endif +#if stream-bundle2 + $ hg clone --stream http://localhost:$HGPORT phase-publish + streaming all changes + 1096 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1096 files to transfer, 99.1 KB of data (zstd !) + transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) + updating to branch default + 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved +#endif + $ hg verify -R phase-publish + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 1088 changes to 1088 files + $ hg -R phase-publish phase -r 'all()' + 0: public + 1: public + 2: public + +Clone as non publishing + + $ cat << EOF >> server/.hg/hgrc + > [phases] + > publish = False + > EOF + $ killdaemons.py + $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid + $ cat hg.pid > $DAEMON_PIDS + +#if stream-legacy + +With v1 of the stream protocol, changeset are always cloned as public. It make +stream v1 unsuitable for non-publishing repository. + + $ hg clone --stream http://localhost:$HGPORT phase-no-publish + streaming all changes + 1090 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1090 files to transfer, 98.8 KB of data (zstd !) + transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) + searching for changes + no changes found + updating to branch default + 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg -R phase-no-publish phase -r 'all()' + 0: public + 1: public + 2: public +#endif +#if stream-bundle2 + $ hg clone --stream http://localhost:$HGPORT phase-no-publish + streaming all changes + 1097 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1097 files to transfer, 99.1 KB of data (zstd !) + transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) + updating to branch default + 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg -R phase-no-publish phase -r 'all()' + 0: draft + 1: draft + 2: draft +#endif + $ hg verify -R phase-no-publish + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 1088 changes to 1088 files + + $ killdaemons.py + +#if stream-legacy + +With v1 of the stream protocol, changeset are always cloned as public. There's +no obsolescence markers exchange in stream v1. + +#endif +#if stream-bundle2 + +Stream repository with obsolescence +----------------------------------- + +Clone non-publishing with obsolescence + + $ cat >> $HGRCPATH << EOF + > [experimental] + > evolution=all + > EOF + + $ cd server + $ echo foo > foo + $ hg -q commit -m 'about to be pruned' + $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents + 1 new obsolescence markers + obsoleted 1 changesets + $ hg up null -q + $ hg log -T '{rev}: {phase}\n' + 2: draft + 1: draft + 0: draft + $ hg serve -p $HGPORT -d --pid-file=hg.pid + $ cat hg.pid > $DAEMON_PIDS + $ cd .. + + $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence + streaming all changes + 1098 files to transfer, 102 KB of data (no-zstd !) + transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) + 1098 files to transfer, 99.5 KB of data (zstd !) + transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !) + $ hg -R with-obsolescence log -T '{rev}: {phase}\n' + 2: draft + 1: draft + 0: draft + $ hg debugobsolete -R with-obsolescence + 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} + $ hg verify -R with-obsolescence + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 4 changesets with 1089 changes to 1088 files + + $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution + streaming all changes + remote: abort: server has obsolescence markers, but client cannot receive them via stream clone + abort: pull failed on remote + [100] + + $ killdaemons.py + +#endif
--- a/tests/test-clone-uncompressed.t Tue Sep 21 18:18:56 2021 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,904 +0,0 @@ -#require serve no-reposimplestore no-chg - -#testcases stream-legacy stream-bundle2 - -#if stream-legacy - $ cat << EOF >> $HGRCPATH - > [server] - > bundle2.stream = no - > EOF -#endif - -Initialize repository -the status call is to check for issue5130 - - $ hg init server - $ cd server - $ touch foo - $ hg -q commit -A -m initial - >>> for i in range(1024): - ... with open(str(i), 'wb') as fh: - ... fh.write(b"%d" % i) and None - $ hg -q commit -A -m 'add a lot of files' - $ hg st - -add files with "tricky" name: - - $ echo foo > 00changelog.i - $ echo foo > 00changelog.d - $ echo foo > 00changelog.n - $ echo foo > 00changelog-ab349180a0405010.nd - $ echo foo > 00manifest.i - $ echo foo > 00manifest.d - $ echo foo > foo.i - $ echo foo > foo.d - $ echo foo > foo.n - $ echo foo > undo.py - $ echo foo > undo.i - $ echo foo > undo.d - $ echo foo > undo.n - $ echo foo > undo.foo.i - $ echo foo > undo.foo.d - $ echo foo > undo.foo.n - $ echo foo > undo.babar - $ mkdir savanah - $ echo foo > savanah/foo.i - $ echo foo > savanah/foo.d - $ echo foo > savanah/foo.n - $ echo foo > savanah/undo.py - $ echo foo > savanah/undo.i - $ echo foo > savanah/undo.d - $ echo foo > savanah/undo.n - $ echo foo > savanah/undo.foo.i - $ echo foo > savanah/undo.foo.d - $ echo foo > savanah/undo.foo.n - $ echo foo > savanah/undo.babar - $ mkdir data - $ echo foo > data/foo.i - $ echo foo > data/foo.d - $ echo foo > data/foo.n - $ echo foo > data/undo.py - $ echo foo > data/undo.i - $ echo foo > data/undo.d - $ echo foo > data/undo.n - $ echo foo > data/undo.foo.i - $ echo foo > data/undo.foo.d - $ echo foo > data/undo.foo.n - $ echo foo > data/undo.babar - $ mkdir meta - $ echo foo > meta/foo.i - $ echo foo > meta/foo.d - $ echo foo > meta/foo.n - $ echo foo > meta/undo.py - $ echo foo > meta/undo.i - $ echo foo > meta/undo.d - $ echo foo > meta/undo.n - $ echo foo > meta/undo.foo.i - $ echo foo > meta/undo.foo.d - $ echo foo > meta/undo.foo.n - $ echo foo > meta/undo.babar - $ mkdir store - $ echo foo > store/foo.i - $ echo foo > store/foo.d - $ echo foo > store/foo.n - $ echo foo > store/undo.py - $ echo foo > store/undo.i - $ echo foo > store/undo.d - $ echo foo > store/undo.n - $ echo foo > store/undo.foo.i - $ echo foo > store/undo.foo.d - $ echo foo > store/undo.foo.n - $ echo foo > store/undo.babar - -Name with special characters - - $ echo foo > store/CélesteVille_is_a_Capital_City - -name causing issue6581 - - $ mkdir --parents container/isam-build-centos7/ - $ touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch - -Add all that - - $ hg add . - adding 00changelog-ab349180a0405010.nd - adding 00changelog.d - adding 00changelog.i - adding 00changelog.n - adding 00manifest.d - adding 00manifest.i - adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch - adding data/foo.d - adding data/foo.i - adding data/foo.n - adding data/undo.babar - adding data/undo.d - adding data/undo.foo.d - adding data/undo.foo.i - adding data/undo.foo.n - adding data/undo.i - adding data/undo.n - adding data/undo.py - adding foo.d - adding foo.i - adding foo.n - adding meta/foo.d - adding meta/foo.i - adding meta/foo.n - adding meta/undo.babar - adding meta/undo.d - adding meta/undo.foo.d - adding meta/undo.foo.i - adding meta/undo.foo.n - adding meta/undo.i - adding meta/undo.n - adding meta/undo.py - adding savanah/foo.d - adding savanah/foo.i - adding savanah/foo.n - adding savanah/undo.babar - adding savanah/undo.d - adding savanah/undo.foo.d - adding savanah/undo.foo.i - adding savanah/undo.foo.n - adding savanah/undo.i - adding savanah/undo.n - adding savanah/undo.py - adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc) - adding store/foo.d - adding store/foo.i - adding store/foo.n - adding store/undo.babar - adding store/undo.d - adding store/undo.foo.d - adding store/undo.foo.i - adding store/undo.foo.n - adding store/undo.i - adding store/undo.n - adding store/undo.py - adding undo.babar - adding undo.d - adding undo.foo.d - adding undo.foo.i - adding undo.foo.n - adding undo.i - adding undo.n - adding undo.py - $ hg ci -m 'add files with "tricky" name' - $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid - $ cat hg.pid > $DAEMON_PIDS - $ cd .. - -Check local clone -================== - -The logic is close enough of uncompressed. -This is present here to reuse the testing around file with "special" names. - - $ hg clone server local-clone - updating to branch default - 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved - -Check that the clone went well - - $ hg verify -R local-clone - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files - checked 3 changesets with 1088 changes to 1088 files - -Check uncompressed -================== - -Cannot stream clone when server.uncompressed is set - - $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out' - 200 Script output follows - - 1 - -#if stream-legacy - $ hg debugcapabilities http://localhost:$HGPORT - Main capabilities: - batch - branchmap - $USUAL_BUNDLE2_CAPS_SERVER$ - changegroupsubset - compression=$BUNDLE2_COMPRESSIONS$ - getbundle - httpheader=1024 - httpmediatype=0.1rx,0.1tx,0.2tx - known - lookup - pushkey - unbundle=HG10GZ,HG10BZ,HG10UN - unbundlehash - Bundle2 capabilities: - HG20 - bookmarks - changegroup - 01 - 02 - checkheads - related - digests - md5 - sha1 - sha512 - error - abort - unsupportedcontent - pushraced - pushkey - hgtagsfnodes - listkeys - phases - heads - pushkey - remote-changegroup - http - https - - $ hg clone --stream -U http://localhost:$HGPORT server-disabled - warning: stream clone requested but server has them disabled - requesting all changes - adding changesets - adding manifests - adding file changes - added 3 changesets with 1088 changes to 1088 files - new changesets 96ee1d7354c4:5223b5e3265f - - $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" - 200 Script output follows - content-type: application/mercurial-0.2 - - - $ f --size body --hexdump --bytes 100 - body: size=232 - 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| - 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| - 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| - 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| - 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| - 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| - 0060: 69 73 20 66 |is f| - -#endif -#if stream-bundle2 - $ hg debugcapabilities http://localhost:$HGPORT - Main capabilities: - batch - branchmap - $USUAL_BUNDLE2_CAPS_SERVER$ - changegroupsubset - compression=$BUNDLE2_COMPRESSIONS$ - getbundle - httpheader=1024 - httpmediatype=0.1rx,0.1tx,0.2tx - known - lookup - pushkey - unbundle=HG10GZ,HG10BZ,HG10UN - unbundlehash - Bundle2 capabilities: - HG20 - bookmarks - changegroup - 01 - 02 - checkheads - related - digests - md5 - sha1 - sha512 - error - abort - unsupportedcontent - pushraced - pushkey - hgtagsfnodes - listkeys - phases - heads - pushkey - remote-changegroup - http - https - - $ hg clone --stream -U http://localhost:$HGPORT server-disabled - warning: stream clone requested but server has them disabled - requesting all changes - adding changesets - adding manifests - adding file changes - added 3 changesets with 1088 changes to 1088 files - new changesets 96ee1d7354c4:5223b5e3265f - - $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" - 200 Script output follows - content-type: application/mercurial-0.2 - - - $ f --size body --hexdump --bytes 100 - body: size=232 - 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| - 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| - 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| - 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| - 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| - 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| - 0060: 69 73 20 66 |is f| - -#endif - - $ killdaemons.py - $ cd server - $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt - $ cat hg.pid > $DAEMON_PIDS - $ cd .. - -Basic clone - -#if stream-legacy - $ hg clone --stream -U http://localhost:$HGPORT clone1 - streaming all changes - 1090 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1090 files to transfer, 98.8 KB of data (zstd !) - transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) - searching for changes - no changes found - $ cat server/errors.txt -#endif -#if stream-bundle2 - $ hg clone --stream -U http://localhost:$HGPORT clone1 - streaming all changes - 1093 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1093 files to transfer, 98.9 KB of data (zstd !) - transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) - - $ ls -1 clone1/.hg/cache - branch2-base - branch2-immutable - branch2-served - branch2-served.hidden - branch2-visible - branch2-visible-hidden - rbc-names-v1 - rbc-revs-v1 - tags2 - tags2-served - $ cat server/errors.txt -#endif - -getbundle requests with stream=1 are uncompressed - - $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" - 200 Script output follows - content-type: application/mercurial-0.2 - - -#if no-zstd no-rust - $ f --size --hex --bytes 256 body - body: size=119153 - 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| - 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| - 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10| - 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109| - 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| - 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| - 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| - 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| - 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| - 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| - 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| - 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| - 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,| - 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............| - 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan| - 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0| -#endif -#if zstd no-rust - $ f --size --hex --bytes 256 body - body: size=116340 - 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| - 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| - 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10| - 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| - 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| - 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| - 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| - 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres| - 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl| - 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev| - 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s| - 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......| - 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................| - 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.| - 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..| - 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed| -#endif -#if zstd rust no-dirstate-v2 - $ f --size --hex --bytes 256 body - body: size=116361 - 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| - 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| - 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10| - 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| - 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| - 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| - 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| - 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod| - 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co| - 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2| - 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| - 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| - 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| - 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| - 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| - 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| -#endif -#if zstd dirstate-v2 - $ f --size --hex --bytes 256 body - body: size=109549 - 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| - 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| - 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95| - 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| - 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote| - 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs| - 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach| - 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta| - 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no| - 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c| - 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%| - 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| - 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| - 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| - 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| - 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| -#endif - ---uncompressed is an alias to --stream - -#if stream-legacy - $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed - streaming all changes - 1090 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1090 files to transfer, 98.8 KB of data (zstd !) - transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) - searching for changes - no changes found -#endif -#if stream-bundle2 - $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed - streaming all changes - 1093 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1093 files to transfer, 98.9 KB of data (zstd !) - transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) -#endif - -Clone with background file closing enabled - -#if stream-legacy - $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding - using http://localhost:$HGPORT/ - sending capabilities command - sending branchmap command - streaming all changes - sending stream_out command - 1090 files to transfer, 102 KB of data (no-zstd !) - 1090 files to transfer, 98.8 KB of data (zstd !) - starting 4 threads for background file closing - updating the branch cache - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) - query 1; heads - sending batch command - searching for changes - all remote heads known locally - no changes found - sending getbundle command - bundle2-input-bundle: with-transaction - bundle2-input-part: "listkeys" (params: 1 mandatory) supported - bundle2-input-part: "phase-heads" supported - bundle2-input-part: total payload size 24 - bundle2-input-bundle: 2 parts total - checking for updated bookmarks - updating the branch cache - (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) -#endif -#if stream-bundle2 - $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding - using http://localhost:$HGPORT/ - sending capabilities command - query 1; heads - sending batch command - streaming all changes - sending getbundle command - bundle2-input-bundle: with-transaction - bundle2-input-part: "stream2" (params: 3 mandatory) supported - applying stream bundle - 1093 files to transfer, 102 KB of data (no-zstd !) - 1093 files to transfer, 98.9 KB of data (zstd !) - starting 4 threads for background file closing - starting 4 threads for background file closing - updating the branch cache - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - bundle2-input-part: total payload size 118984 (no-zstd !) - transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) - bundle2-input-part: total payload size 116145 (zstd !) - bundle2-input-part: "listkeys" (params: 1 mandatory) supported - bundle2-input-bundle: 2 parts total - checking for updated bookmarks - updating the branch cache - (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob) -#endif - -Cannot stream clone when there are secret changesets - - $ hg -R server phase --force --secret -r tip - $ hg clone --stream -U http://localhost:$HGPORT secret-denied - warning: stream clone requested but server has them disabled - requesting all changes - adding changesets - adding manifests - adding file changes - added 2 changesets with 1025 changes to 1025 files - new changesets 96ee1d7354c4:c17445101a72 - - $ killdaemons.py - -Streaming of secrets can be overridden by server config - - $ cd server - $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid - $ cat hg.pid > $DAEMON_PIDS - $ cd .. - -#if stream-legacy - $ hg clone --stream -U http://localhost:$HGPORT secret-allowed - streaming all changes - 1090 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1090 files to transfer, 98.8 KB of data (zstd !) - transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) - searching for changes - no changes found -#endif -#if stream-bundle2 - $ hg clone --stream -U http://localhost:$HGPORT secret-allowed - streaming all changes - 1093 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1093 files to transfer, 98.9 KB of data (zstd !) - transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) -#endif - - $ killdaemons.py - -Verify interaction between preferuncompressed and secret presence - - $ cd server - $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid - $ cat hg.pid > $DAEMON_PIDS - $ cd .. - - $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret - requesting all changes - adding changesets - adding manifests - adding file changes - added 2 changesets with 1025 changes to 1025 files - new changesets 96ee1d7354c4:c17445101a72 - - $ killdaemons.py - -Clone not allowed when full bundles disabled and can't serve secrets - - $ cd server - $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid - $ cat hg.pid > $DAEMON_PIDS - $ cd .. - - $ hg clone --stream http://localhost:$HGPORT secret-full-disabled - warning: stream clone requested but server has them disabled - requesting all changes - remote: abort: server has pull-based clones disabled - abort: pull failed on remote - (remove --pull if specified or upgrade Mercurial) - [100] - -Local stream clone with secrets involved -(This is just a test over behavior: if you have access to the repo's files, -there is no security so it isn't important to prevent a clone here.) - - $ hg clone -U --stream server local-secret - warning: stream clone requested but server has them disabled - requesting all changes - adding changesets - adding manifests - adding file changes - added 2 changesets with 1025 changes to 1025 files - new changesets 96ee1d7354c4:c17445101a72 - -Stream clone while repo is changing: - - $ mkdir changing - $ cd changing - -extension for delaying the server process so we reliably can modify the repo -while cloning - - $ cat > stream_steps.py <<EOF - > import os - > import sys - > from mercurial import ( - > encoding, - > extensions, - > streamclone, - > testing, - > ) - > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1'] - > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2'] - > - > def _test_sync_point_walk_1(orig, repo): - > testing.write_file(WALKED_FILE_1) - > - > def _test_sync_point_walk_2(orig, repo): - > assert repo._currentlock(repo._lockref) is None - > testing.wait_file(WALKED_FILE_2) - > - > extensions.wrapfunction( - > streamclone, - > '_test_sync_point_walk_1', - > _test_sync_point_walk_1 - > ) - > extensions.wrapfunction( - > streamclone, - > '_test_sync_point_walk_2', - > _test_sync_point_walk_2 - > ) - > EOF - -prepare repo with small and big file to cover both code paths in emitrevlogdata - - $ hg init repo - $ touch repo/f1 - $ $TESTDIR/seq.py 50000 > repo/f2 - $ hg -R repo ci -Aqm "0" - $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1" - $ export HG_TEST_STREAM_WALKED_FILE_1 - $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2" - $ export HG_TEST_STREAM_WALKED_FILE_2 - $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3" - $ export HG_TEST_STREAM_WALKED_FILE_3 -# $ cat << EOF >> $HGRCPATH -# > [hooks] -# > pre-clone=rm -f "$TESTTMP/sync_file_walked_*" -# > EOF - $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py" - $ cat hg.pid >> $DAEMON_PIDS - -clone while modifying the repo between stating file with write lock and -actually serving file content - - $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") & - $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1 - $ echo >> repo/f1 - $ echo >> repo/f2 - $ hg -R repo ci -m "1" --config ui.timeout.warn=-1 - $ touch $HG_TEST_STREAM_WALKED_FILE_2 - $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3 - $ hg -R clone id - 000000000000 - $ cat errors.log - $ cd .. - -Stream repository with bookmarks --------------------------------- - -(revert introduction of secret changeset) - - $ hg -R server phase --draft 'secret()' - -add a bookmark - - $ hg -R server bookmark -r tip some-bookmark - -clone it - -#if stream-legacy - $ hg clone --stream http://localhost:$HGPORT with-bookmarks - streaming all changes - 1090 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1090 files to transfer, 98.8 KB of data (zstd !) - transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) - searching for changes - no changes found - updating to branch default - 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved -#endif -#if stream-bundle2 - $ hg clone --stream http://localhost:$HGPORT with-bookmarks - streaming all changes - 1096 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1096 files to transfer, 99.1 KB of data (zstd !) - transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) - updating to branch default - 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved -#endif - $ hg verify -R with-bookmarks - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files - checked 3 changesets with 1088 changes to 1088 files - $ hg -R with-bookmarks bookmarks - some-bookmark 2:5223b5e3265f - -Stream repository with phases ------------------------------ - -Clone as publishing - - $ hg -R server phase -r 'all()' - 0: draft - 1: draft - 2: draft - -#if stream-legacy - $ hg clone --stream http://localhost:$HGPORT phase-publish - streaming all changes - 1090 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1090 files to transfer, 98.8 KB of data (zstd !) - transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) - searching for changes - no changes found - updating to branch default - 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved -#endif -#if stream-bundle2 - $ hg clone --stream http://localhost:$HGPORT phase-publish - streaming all changes - 1096 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1096 files to transfer, 99.1 KB of data (zstd !) - transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) - updating to branch default - 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved -#endif - $ hg verify -R phase-publish - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files - checked 3 changesets with 1088 changes to 1088 files - $ hg -R phase-publish phase -r 'all()' - 0: public - 1: public - 2: public - -Clone as non publishing - - $ cat << EOF >> server/.hg/hgrc - > [phases] - > publish = False - > EOF - $ killdaemons.py - $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid - $ cat hg.pid > $DAEMON_PIDS - -#if stream-legacy - -With v1 of the stream protocol, changeset are always cloned as public. It make -stream v1 unsuitable for non-publishing repository. - - $ hg clone --stream http://localhost:$HGPORT phase-no-publish - streaming all changes - 1090 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1090 files to transfer, 98.8 KB of data (zstd !) - transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) - searching for changes - no changes found - updating to branch default - 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg -R phase-no-publish phase -r 'all()' - 0: public - 1: public - 2: public -#endif -#if stream-bundle2 - $ hg clone --stream http://localhost:$HGPORT phase-no-publish - streaming all changes - 1097 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1097 files to transfer, 99.1 KB of data (zstd !) - transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) - updating to branch default - 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg -R phase-no-publish phase -r 'all()' - 0: draft - 1: draft - 2: draft -#endif - $ hg verify -R phase-no-publish - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files - checked 3 changesets with 1088 changes to 1088 files - - $ killdaemons.py - -#if stream-legacy - -With v1 of the stream protocol, changeset are always cloned as public. There's -no obsolescence markers exchange in stream v1. - -#endif -#if stream-bundle2 - -Stream repository with obsolescence ------------------------------------ - -Clone non-publishing with obsolescence - - $ cat >> $HGRCPATH << EOF - > [experimental] - > evolution=all - > EOF - - $ cd server - $ echo foo > foo - $ hg -q commit -m 'about to be pruned' - $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents - 1 new obsolescence markers - obsoleted 1 changesets - $ hg up null -q - $ hg log -T '{rev}: {phase}\n' - 2: draft - 1: draft - 0: draft - $ hg serve -p $HGPORT -d --pid-file=hg.pid - $ cat hg.pid > $DAEMON_PIDS - $ cd .. - - $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence - streaming all changes - 1098 files to transfer, 102 KB of data (no-zstd !) - transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) - 1098 files to transfer, 99.5 KB of data (zstd !) - transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !) - $ hg -R with-obsolescence log -T '{rev}: {phase}\n' - 2: draft - 1: draft - 0: draft - $ hg debugobsolete -R with-obsolescence - 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} - $ hg verify -R with-obsolescence - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files - checked 4 changesets with 1089 changes to 1088 files - - $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution - streaming all changes - remote: abort: server has obsolescence markers, but client cannot receive them via stream clone - abort: pull failed on remote - [100] - - $ killdaemons.py - -#endif
--- a/tests/test-clone.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-clone.t Tue Sep 28 09:40:57 2021 +0200 @@ -1125,7 +1125,7 @@ $ hg id -R remote -r 0 abort: repository remote not found [255] - $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote + $ hg --config share.pool=share -q clone a ssh://user@dummy/remote $ hg -R remote id -r 0 acb14030fe0a
--- a/tests/test-clonebundles.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-clonebundles.t Tue Sep 28 09:40:57 2021 +0200 @@ -208,7 +208,7 @@ Feature works over SSH - $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone + $ hg clone -U ssh://user@dummy/server ssh-full-clone applying clone bundle from http://localhost:$HGPORT1/full.hg adding changesets adding manifests
--- a/tests/test-commandserver.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-commandserver.t Tue Sep 28 09:40:57 2021 +0200 @@ -226,6 +226,7 @@ ui.detailed-exit-code=True ui.merge=internal:merge ui.mergemarkers=detailed + ui.ssh=* (glob) ui.timeout.warn=15 ui.foo=bar ui.nontty=true @@ -239,6 +240,7 @@ ui.detailed-exit-code=True ui.merge=internal:merge ui.mergemarkers=detailed + ui.ssh=* (glob) ui.timeout.warn=15 ui.nontty=true #endif
--- a/tests/test-completion.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-completion.t Tue Sep 28 09:40:57 2021 +0200 @@ -316,7 +316,7 @@ debugpushkey: debugpvec: debugrebuilddirstate: rev, minimal - debugrebuildfncache: + debugrebuildfncache: only-data debugrename: rev debugrequires: debugrevlog: changelog, manifest, dir, dump
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-config-parselist.py Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,52 @@ +""" +List-valued configuration keys have an ad-hoc microsyntax. From `hg help config`: + +> List values are separated by whitespace or comma, except when values are +> placed in double quotation marks: +> +> allow_read = "John Doe, PhD", brian, betty +> +> Quotation marks can be escaped by prefixing them with a backslash. Only +> quotation marks at the beginning of a word is counted as a quotation +> (e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``). + +That help documentation is fairly light on details, the actual parser has many +other edge cases. This test tries to cover them. +""" + +from mercurial.utils import stringutil + + +def assert_parselist(input, expected): + result = stringutil.parselist(input) + if result != expected: + raise AssertionError( + "parse_input(%r)\n got %r\nexpected %r" + % (input, result, expected) + ) + + +# Keep these Python tests in sync with the Rust ones in `rust/hg-core/src/config/values.rs` + +assert_parselist(b'', []) +assert_parselist(b',', []) +assert_parselist(b'A', [b'A']) +assert_parselist(b'B,B', [b'B', b'B']) +assert_parselist(b', C, ,C,', [b'C', b'C']) +assert_parselist(b'"', [b'"']) +assert_parselist(b'""', [b'', b'']) +assert_parselist(b'D,"', [b'D', b'"']) +assert_parselist(b'E,""', [b'E', b'', b'']) +assert_parselist(b'"F,F"', [b'F,F']) +assert_parselist(b'"G,G', [b'"G', b'G']) +assert_parselist(b'"H \\",\\"H', [b'"H', b',', b'H']) +assert_parselist(b'I,I"', [b'I', b'I"']) +assert_parselist(b'J,"J', [b'J', b'"J']) +assert_parselist(b'K K', [b'K', b'K']) +assert_parselist(b'"K" K', [b'K', b'K']) +assert_parselist(b'L\tL', [b'L', b'L']) +assert_parselist(b'"L"\tL', [b'L', b'', b'L']) +assert_parselist(b'M\x0bM', [b'M', b'M']) +assert_parselist(b'"M"\x0bM', [b'M', b'', b'M']) +assert_parselist(b'"N" , ,"', [b'N"']) +assert_parselist(b'" ,O, ', [b'"', b'O'])
--- a/tests/test-config.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-config.t Tue Sep 28 09:40:57 2021 +0200 @@ -413,7 +413,7 @@ The feature is experimental and behavior may varies. This test exists to make sure the code is run. We grep it to avoid too much variability in its current experimental state. - $ hg config --exp-all-known | grep commit + $ hg config --exp-all-known | grep commit | grep -v ssh commands.commit.interactive.git=False commands.commit.interactive.ignoreblanklines=False commands.commit.interactive.ignorews=False
--- a/tests/test-debugcommands.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-debugcommands.t Tue Sep 28 09:40:57 2021 +0200 @@ -644,14 +644,13 @@ Test debugpeer - $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" debugpeer ssh://user@dummy/debugrevlog + $ hg debugpeer ssh://user@dummy/debugrevlog url: ssh://user@dummy/debugrevlog local: no pushable: yes - $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" --debug debugpeer ssh://user@dummy/debugrevlog - running "*" "*/tests/dummyssh" 'user@dummy' 'hg -R debugrevlog serve --stdio' (glob) (no-windows !) - running "*" "*\tests/dummyssh" "user@dummy" "hg -R debugrevlog serve --stdio" (glob) (windows !) + $ hg --debug debugpeer ssh://user@dummy/debugrevlog + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes sending hello command
--- a/tests/test-diff-change.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-diff-change.t Tue Sep 28 09:40:57 2021 +0200 @@ -119,7 +119,7 @@ +wdir $ hg diff -r "2 and 1" abort: empty revision range - [255] + [10] $ cd ..
--- a/tests/test-dirs.py Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-dirs.py Tue Sep 28 09:40:57 2021 +0200 @@ -13,13 +13,13 @@ (b'a/a/a', [b'a', b'a/a', b'']), (b'alpha/beta/gamma', [b'', b'alpha', b'alpha/beta']), ]: - d = pathutil.dirs({}) + d = pathutil.dirs([]) d.addpath(case) self.assertEqual(sorted(d), sorted(want)) def testinvalid(self): with self.assertRaises(ValueError): - d = pathutil.dirs({}) + d = pathutil.dirs([]) d.addpath(b'a//b')
--- a/tests/test-extdiff.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-extdiff.t Tue Sep 28 09:40:57 2021 +0200 @@ -87,7 +87,7 @@ $ hg extdiff -p diff --patch --rev 'ancestor()' --rev 1 abort: empty revision on one side of range - [255] + [10] Test diff during merge:
--- a/tests/test-extension.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-extension.t Tue Sep 28 09:40:57 2021 +0200 @@ -1692,6 +1692,26 @@ $ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third' [1] +Don't explode on py3 with a bad version number (both str vs bytes, and not enough +parts) + + $ cat > minversion4.py << EOF + > from mercurial import util + > util.version = lambda: b'3.5' + > minimumhgversion = '3' + > EOF + $ hg --config extensions.minversion=minversion4.py version -v + Mercurial Distributed SCM (version 3.5) + (see https://mercurial-scm.org for more information) + + Copyright (C) 2005-* Olivia Mackall and others (glob) + This is free software; see the source for copying conditions. There is NO + warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + + Enabled extensions: + + minversion external + Restore HGRCPATH $ HGRCPATH=$ORGHGRCPATH
--- a/tests/test-fastannotate-hg.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-fastannotate-hg.t Tue Sep 28 09:40:57 2021 +0200 @@ -458,7 +458,7 @@ $ hg ann nosuchfile abort: nosuchfile: no such file in rev e9e6b4fa872f - [255] + [10] annotate file without '\n' on last line
--- a/tests/test-fastannotate-protocol.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-fastannotate-protocol.t Tue Sep 28 09:40:57 2021 +0200 @@ -1,6 +1,4 @@ $ cat >> $HGRCPATH << EOF - > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" > [extensions] > fastannotate= > [fastannotate]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-hgwebdir-gc.py Tue Sep 28 09:40:57 2021 +0200 @@ -0,0 +1,49 @@ +from __future__ import absolute_import + +import os +from mercurial.hgweb import hgwebdir_mod + +hgwebdir = hgwebdir_mod.hgwebdir + +os.mkdir(b'webdir') +os.chdir(b'webdir') + +webdir = os.path.realpath(b'.') + + +def trivial_response(req, res): + return [] + + +def make_hgwebdir(gc_rate=None): + config = os.path.join(webdir, b'hgwebdir.conf') + with open(config, 'wb') as configfile: + configfile.write(b'[experimental]\n') + if gc_rate is not None: + configfile.write(b'web.full-garbage-collection-rate=%d\n' % gc_rate) + hg_wd = hgwebdir(config) + hg_wd._runwsgi = trivial_response + return hg_wd + + +def process_requests(webdir_instance, number): + # we don't care for now about passing realistic arguments + for _ in range(number): + for chunk in webdir_instance.run_wsgi(None, None): + pass + + +without_gc = make_hgwebdir(gc_rate=0) +process_requests(without_gc, 5) +assert without_gc.requests_count == 5 +assert without_gc.gc_full_collections_done == 0 + +with_gc = make_hgwebdir(gc_rate=2) +process_requests(with_gc, 5) +assert with_gc.requests_count == 5 +assert with_gc.gc_full_collections_done == 2 + +with_systematic_gc = make_hgwebdir() # default value of the setting +process_requests(with_systematic_gc, 3) +assert with_systematic_gc.requests_count == 3 +assert with_systematic_gc.gc_full_collections_done == 3
--- a/tests/test-infinitepush-ci.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-infinitepush-ci.t Tue Sep 28 09:40:57 2021 +0200 @@ -9,8 +9,6 @@ $ . "$TESTDIR/library-infinitepush.sh" $ cat >> $HGRCPATH <<EOF - > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" > [alias] > glog = log -GT "{rev}:{node|short} {desc}\n{phase}" > EOF
--- a/tests/test-init.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-init.t Tue Sep 28 09:40:57 2021 +0200 @@ -123,7 +123,7 @@ init+push to remote2 - $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2 + $ hg init ssh://user@dummy/remote2 $ hg incoming -R remote2 local comparing with local changeset: 0:08b9e9f63b32 @@ -133,7 +133,7 @@ summary: init - $ hg push -R local -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote2 + $ hg push -R local ssh://user@dummy/remote2 pushing to ssh://user@dummy/remote2 searching for changes remote: adding changesets @@ -143,7 +143,7 @@ clone to remote1 - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1 + $ hg clone local ssh://user@dummy/remote1 searching for changes remote: adding changesets remote: adding manifests @@ -151,7 +151,7 @@ remote: added 1 changesets with 1 changes to 1 files The largefiles extension doesn't crash - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remotelf --config extensions.largefiles= + $ hg clone local ssh://user@dummy/remotelf --config extensions.largefiles= The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !) The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !) searching for changes @@ -162,14 +162,14 @@ init to existing repo - $ hg init -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote1 + $ hg init ssh://user@dummy/remote1 abort: repository remote1 already exists abort: could not create remote repo [255] clone to existing repo - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote1 + $ hg clone local ssh://user@dummy/remote1 abort: repository remote1 already exists abort: could not create remote repo [255] @@ -283,7 +283,7 @@ $ hg -R local bookmark test $ hg -R local bookmarks * test 0:08b9e9f63b32 - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" local ssh://user@dummy/remote-bookmarks + $ hg clone local ssh://user@dummy/remote-bookmarks searching for changes remote: adding changesets remote: adding manifests
--- a/tests/test-largefiles-wireproto.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-largefiles-wireproto.t Tue Sep 28 09:40:57 2021 +0200 @@ -124,7 +124,7 @@ #endif vanilla clients locked out from largefiles ssh repos - $ hg --config extensions.largefiles=! clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5 + $ hg --config extensions.largefiles=! clone ssh://user@dummy/r4 r5 remote: remote: This repository uses the largefiles extension. remote:
--- a/tests/test-log.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-log.t Tue Sep 28 09:40:57 2021 +0200 @@ -2516,10 +2516,9 @@ is global. So we shouldn't expect the namespace always exists. Using ssh:// makes sure a bundle repository is created from scratch. (issue6301) - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \ - > -qr0 "ssh://user@dummy/`pwd`/a" a-clone + $ hg clone -qr0 "ssh://user@dummy/`pwd`/a" a-clone $ hg incoming --config extensions.names=names.py -R a-clone \ - > -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -T '{bars}\n' -l1 + > -T '{bars}\n' -l1 comparing with ssh://user@dummy/$TESTTMP/a searching for changes
--- a/tests/test-logexchange.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-logexchange.t Tue Sep 28 09:40:57 2021 +0200 @@ -2,8 +2,6 @@ ============================================= $ cat >> $HGRCPATH << EOF - > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" > [alias] > glog = log -G -T '{rev}:{node|short} {desc}' > [extensions]
--- a/tests/test-merge-remove.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-merge-remove.t Tue Sep 28 09:40:57 2021 +0200 @@ -55,8 +55,8 @@ adding foo1 $ hg debugstate --no-dates - n 0 -2 unset bar - n 0 -2 unset foo1 + m 0 -2 unset bar + m 0 -2 unset foo1 copy: foo -> foo1 $ hg st -qC @@ -74,8 +74,8 @@ reverting foo1 $ hg debugstate --no-dates - n 0 -2 unset bar - n 0 -2 unset foo1 + m 0 -2 unset bar + m 0 -2 unset foo1 copy: foo -> foo1 $ hg st -qC
--- a/tests/test-missing-capability.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-missing-capability.t Tue Sep 28 09:40:57 2021 +0200 @@ -24,10 +24,6 @@ > [extensions] > disable-lookup = $TESTTMP/disable-lookup.py > EOF - $ cat >> .hg/hgrc <<EOF - > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" - > EOF $ hg pull ssh://user@dummy/repo1 -r tip -B a pulling from ssh://user@dummy/repo1
--- a/tests/test-persistent-nodemap.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-persistent-nodemap.t Tue Sep 28 09:40:57 2021 +0200 @@ -1056,7 +1056,7 @@ No race condition - $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)' + $ hg clone -U --stream ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)' adding [s] 00manifest.n (62 bytes) adding [s] 00manifest-*.nd (118 KB) (glob) adding [s] 00changelog.n (62 bytes) @@ -1121,7 +1121,7 @@ Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time. - $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) & + $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) & $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1 $ hg -R test-repo/ commit -m foo $ touch $HG_TEST_STREAM_WALKED_FILE_2 @@ -1218,7 +1218,7 @@ Performe the mix of clone and full refresh of the nodemap, so that the files (and filenames) are different between listing time and actual transfer time. - $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) & + $ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) & $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1 $ rm test-repo/.hg/store/00changelog.n $ rm test-repo/.hg/store/00changelog-*.nd
--- a/tests/test-push-race.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-push-race.t Tue Sep 28 09:40:57 2021 +0200 @@ -102,7 +102,6 @@ $ cat >> $HGRCPATH << EOF > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" > # simplify output > logtemplate = {node|short} {desc} ({branch}) > [phases]
--- a/tests/test-rebase-parameters.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-rebase-parameters.t Tue Sep 28 09:40:57 2021 +0200 @@ -132,7 +132,7 @@ $ hg rebase --dest '1 & !1' abort: empty revision set - [255] + [10] These work:
--- a/tests/test-rebuildstate.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-rebuildstate.t Tue Sep 28 09:40:57 2021 +0200 @@ -17,9 +17,16 @@ > try: > for file in pats: > if opts.get('normal_lookup'): - > repo.dirstate._normallookup(file) + > with repo.dirstate.parentchange(): + > repo.dirstate.update_file( + > file, + > p1_tracked=True, + > wc_tracked=True, + > possibly_dirty=True, + > ) > else: - > repo.dirstate._drop(file) + > repo.dirstate._map.reset_state(file) + > repo.dirstate._dirty = True > > repo.dirstate.write(repo.currenttransaction()) > finally:
--- a/tests/test-revset2.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-revset2.t Tue Sep 28 09:40:57 2021 +0200 @@ -840,7 +840,7 @@ $ hg diff -r 'author("babar") or author("celeste")' abort: empty revision range - [255] + [10] aliases:
--- a/tests/test-rhg.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-rhg.t Tue Sep 28 09:40:57 2021 +0200 @@ -126,6 +126,9 @@ [255] $ $NO_FALLBACK rhg cat -r d file-2 2 + $ $NO_FALLBACK rhg cat -r 0000 file-2 + abort: invalid revision identifier: 0000 + [255] Cat files $ cd $TESTTMP
--- a/tests/test-share.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-share.t Tue Sep 28 09:40:57 2021 +0200 @@ -160,7 +160,7 @@ Cloning a shared repo via bundle2 results in a non-shared clone $ cd .. - $ hg clone -q --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2 + $ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2 $ cat ./cloned-via-bundle2/.hg/requires | grep "shared" [1] $ hg id --cwd cloned-via-bundle2 -r tip
--- a/tests/test-sparse-clone.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-sparse-clone.t Tue Sep 28 09:40:57 2021 +0200 @@ -2,7 +2,6 @@ $ cat >> $HGRCPATH << EOF > [ui] - > ssh = "$PYTHON" "$RUNTESTDIR/dummyssh" > username = nobody <no.reply@fb.com> > [extensions] > sparse=
--- a/tests/test-ssh-batch.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-ssh-batch.t Tue Sep 28 09:40:57 2021 +0200 @@ -9,7 +9,7 @@ fails (thus causing the sshpeer to be stopped), the errors from the further lookups don't result in tracebacks. - $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a + $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) ssh://user@dummy/$(pwd)/../a pulling from ssh://user@dummy/$TESTTMP/b/../a abort: unknown revision 'nosuchbookmark' [255]
--- a/tests/test-ssh-bundle1.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-ssh-bundle1.t Tue Sep 28 09:40:57 2021 +0200 @@ -52,7 +52,7 @@ repo not found error - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local + $ hg clone ssh://user@dummy/nonexistent local remote: abort: repository nonexistent not found abort: no suitable response from remote hg [255] @@ -60,7 +60,7 @@ non-existent absolute path #if no-msys - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local + $ hg clone ssh://user@dummy//`pwd`/nonexistent local remote: abort: repository /$TESTTMP/nonexistent not found abort: no suitable response from remote hg [255] @@ -70,7 +70,7 @@ #if no-reposimplestore - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream + $ hg clone --stream ssh://user@dummy/remote local-stream streaming all changes 4 files to transfer, 602 bytes of data (no-zstd !) transferred 602 bytes in * seconds (*) (glob) (no-zstd !) @@ -94,7 +94,7 @@ clone bookmarks via stream $ hg -R local-stream book mybook - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2 + $ hg clone --stream ssh://user@dummy/local-stream stream2 streaming all changes 4 files to transfer, 602 bytes of data (no-zstd !) transferred 602 bytes in * seconds (*) (glob) (no-zstd !) @@ -114,7 +114,7 @@ clone remote via pull - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local + $ hg clone ssh://user@dummy/remote local requesting all changes adding changesets adding manifests @@ -142,14 +142,14 @@ $ hg paths default = ssh://user@dummy/remote - $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" + $ hg pull pulling from ssh://user@dummy/remote searching for changes no changes found pull from wrong ssh URL - $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist + $ hg pull ssh://user@dummy/doesnotexist pulling from ssh://user@dummy/doesnotexist remote: abort: repository doesnotexist not found abort: no suitable response from remote hg @@ -163,8 +163,6 @@ updating rc $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc - $ echo "[ui]" >> .hg/hgrc - $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc find outgoing @@ -181,7 +179,7 @@ find incoming on the remote side - $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local + $ hg incoming -R ../remote ssh://user@dummy/local comparing with ssh://user@dummy/local searching for changes changeset: 3:a28a9d1a809c @@ -194,7 +192,7 @@ find incoming on the remote side (using absolute path) - $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`" + $ hg incoming -R ../remote "ssh://user@dummy/`pwd`" comparing with ssh://user@dummy/$TESTTMP/local searching for changes changeset: 3:a28a9d1a809c @@ -241,7 +239,7 @@ test pushkeys and bookmarks $ cd $TESTTMP/local - $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces + $ hg debugpushkey ssh://user@dummy/remote namespaces bookmarks namespaces phases @@ -256,7 +254,7 @@ no changes found exporting bookmark foo [1] - $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks + $ hg debugpushkey ssh://user@dummy/remote bookmarks foo 1160648e36cec0054048a7edc4110c6f84fde594 $ hg book -f foo $ hg push --traceback @@ -328,7 +326,7 @@ $ hg -R ../remote bookmark test $ hg -R ../remote bookmarks * test 4:6c0482d977a3 - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks + $ hg clone ssh://user@dummy/remote local-bookmarks requesting all changes adding changesets adding manifests @@ -356,21 +354,21 @@ Test remote paths with spaces (issue2983): - $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo" + $ hg init "ssh://user@dummy/a repo" $ touch "$TESTTMP/a repo/test" $ hg -R 'a repo' commit -A -m "test" adding test $ hg -R 'a repo' tag tag - $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo" + $ hg id "ssh://user@dummy/a repo" 73649e48688a - $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO" + $ hg id "ssh://user@dummy/a repo#noNoNO" abort: unknown revision 'noNoNO' [255] Test (non-)escaping of remote paths with spaces when cloning (issue3145): - $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo" + $ hg clone "ssh://user@dummy/a repo" destination directory: a repo abort: destination 'a repo' is not empty [10] @@ -462,8 +460,6 @@ $ cat >> .hg/hgrc << EOF > [paths] > default-push = ssh://user@dummy/remote - > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" > [extensions] > localwrite = localwrite.py > EOF @@ -486,7 +482,7 @@ $ hg pull --debug ssh://user@dummy/remote pulling from ssh://user@dummy/remote - running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re) sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) sending hello command sending between command @@ -583,11 +579,11 @@ $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc - $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout + $ hg -q clone ssh://user@dummy/remote hookout $ cd hookout $ touch hookfailure $ hg -q commit -A -m 'remote hook failure' - $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push + $ hg push pushing to ssh://user@dummy/remote searching for changes remote: adding changesets @@ -607,7 +603,7 @@ > [extensions] > crash = ${TESTDIR}/crashgetbundler.py > EOF - $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull + $ hg pull pulling from ssh://user@dummy/remote searching for changes adding changesets
--- a/tests/test-ssh-clone-r.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-ssh-clone-r.t Tue Sep 28 09:40:57 2021 +0200 @@ -28,7 +28,7 @@ clone remote via stream $ for i in 0 1 2 3 4 5 6 7 8; do - > hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream -r "$i" ssh://user@dummy/remote test-"$i" + > hg clone --stream -r "$i" ssh://user@dummy/remote test-"$i" > if cd test-"$i"; then > hg verify > cd .. @@ -160,7 +160,7 @@ checked 9 changesets with 7 changes to 4 files $ cd .. $ cd test-1 - $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 4 ssh://user@dummy/remote + $ hg pull -r 4 ssh://user@dummy/remote pulling from ssh://user@dummy/remote searching for changes adding changesets @@ -175,7 +175,7 @@ crosschecking files in changesets and manifests checking files checked 3 changesets with 2 changes to 1 files - $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote + $ hg pull ssh://user@dummy/remote pulling from ssh://user@dummy/remote searching for changes adding changesets @@ -186,7 +186,7 @@ (run 'hg update' to get a working copy) $ cd .. $ cd test-2 - $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 5 ssh://user@dummy/remote + $ hg pull -r 5 ssh://user@dummy/remote pulling from ssh://user@dummy/remote searching for changes adding changesets @@ -201,7 +201,7 @@ crosschecking files in changesets and manifests checking files checked 5 changesets with 3 changes to 1 files - $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote + $ hg pull ssh://user@dummy/remote pulling from ssh://user@dummy/remote searching for changes adding changesets
--- a/tests/test-ssh-proto.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-ssh-proto.t Tue Sep 28 09:40:57 2021 +0200 @@ -28,8 +28,6 @@ > } $ cat >> $HGRCPATH << EOF - > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" > [devel] > debug.peer-request = true > [extensions] @@ -65,8 +63,7 @@ $ cd .. $ hg --debug debugpeer ssh://user@dummy/server - running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) - running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes sending hello command @@ -178,8 +175,7 @@ --debug will print the banner $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server - running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) - running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes sending hello command @@ -269,8 +265,7 @@ servers. $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server - running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) - running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes sending hello command @@ -315,8 +310,7 @@ o> 1\n $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server - running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) - running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) sending no-args command devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes @@ -385,8 +379,7 @@ o> \n $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server - running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) - running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) sending unknown1 command sending unknown2 command sending unknown3 command @@ -961,8 +954,7 @@ $ cd .. $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server - running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) - running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) sending upgrade request: * proto=exp-ssh-v2-0003 (glob) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes @@ -1019,8 +1011,7 @@ $ cd .. $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server - running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) - running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) sending upgrade request: * proto=exp-ssh-v2-0003 (glob) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes @@ -1038,8 +1029,7 @@ Verify the peer has capabilities $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server - running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) - running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re) sending upgrade request: * proto=exp-ssh-v2-0003 (glob) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes
--- a/tests/test-ssh-repoerror.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-ssh-repoerror.t Tue Sep 28 09:40:57 2021 +0200 @@ -4,13 +4,6 @@ `alias hg=rhg` by run-tests.py. With such alias removed, this test is revealed buggy. This need to be resolved sooner than later. -initial setup - - $ cat << EOF >> $HGRCPATH - > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" - > EOF - repository itself is non-readable ---------------------------------
--- a/tests/test-ssh.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-ssh.t Tue Sep 28 09:40:57 2021 +0200 @@ -42,18 +42,18 @@ repo not found error - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local + $ hg clone ssh://user@dummy/nonexistent local remote: abort: repository nonexistent not found abort: no suitable response from remote hg [255] - $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local + $ hg clone -q ssh://user@dummy/nonexistent local remote: abort: repository nonexistent not found abort: no suitable response from remote hg [255] non-existent absolute path - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/nonexistent local + $ hg clone ssh://user@dummy/`pwd`/nonexistent local remote: abort: repository $TESTTMP/nonexistent not found abort: no suitable response from remote hg [255] @@ -62,7 +62,7 @@ #if no-reposimplestore - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream + $ hg clone --stream ssh://user@dummy/remote local-stream streaming all changes 8 files to transfer, 827 bytes of data (no-zstd !) transferred 827 bytes in * seconds (*) (glob) (no-zstd !) @@ -84,7 +84,7 @@ clone bookmarks via stream $ hg -R local-stream book mybook - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2 + $ hg clone --stream ssh://user@dummy/local-stream stream2 streaming all changes 15 files to transfer, * of data (glob) transferred * in * seconds (*) (glob) @@ -100,7 +100,7 @@ clone remote via pull - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local + $ hg clone ssh://user@dummy/remote local requesting all changes adding changesets adding manifests @@ -128,14 +128,14 @@ $ hg paths default = ssh://user@dummy/remote - $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" + $ hg pull pulling from ssh://user@dummy/remote searching for changes no changes found pull from wrong ssh URL - $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/doesnotexist + $ hg pull ssh://user@dummy/doesnotexist pulling from ssh://user@dummy/doesnotexist remote: abort: repository doesnotexist not found abort: no suitable response from remote hg @@ -149,8 +149,6 @@ updating rc $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc - $ echo "[ui]" >> .hg/hgrc - $ echo "ssh = \"$PYTHON\" \"$TESTDIR/dummyssh\"" >> .hg/hgrc find outgoing @@ -167,7 +165,7 @@ find incoming on the remote side - $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/local + $ hg incoming -R ../remote ssh://user@dummy/local comparing with ssh://user@dummy/local searching for changes changeset: 3:a28a9d1a809c @@ -180,7 +178,7 @@ find incoming on the remote side (using absolute path) - $ hg incoming -R ../remote -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`" + $ hg incoming -R ../remote "ssh://user@dummy/`pwd`" comparing with ssh://user@dummy/$TESTTMP/local searching for changes changeset: 3:a28a9d1a809c @@ -227,7 +225,7 @@ test pushkeys and bookmarks $ cd $TESTTMP/local - $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces + $ hg debugpushkey ssh://user@dummy/remote namespaces bookmarks namespaces phases @@ -242,7 +240,7 @@ no changes found exporting bookmark foo [1] - $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks + $ hg debugpushkey ssh://user@dummy/remote bookmarks foo 1160648e36cec0054048a7edc4110c6f84fde594 $ hg book -f foo $ hg push --traceback @@ -347,7 +345,7 @@ $ hg -R ../remote bookmark test $ hg -R ../remote bookmarks * test 4:6c0482d977a3 - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks + $ hg clone ssh://user@dummy/remote local-bookmarks requesting all changes adding changesets adding manifests @@ -375,21 +373,21 @@ Test remote paths with spaces (issue2983): - $ hg init --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo" + $ hg init "ssh://user@dummy/a repo" $ touch "$TESTTMP/a repo/test" $ hg -R 'a repo' commit -A -m "test" adding test $ hg -R 'a repo' tag tag - $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo" + $ hg id "ssh://user@dummy/a repo" 73649e48688a - $ hg id --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO" + $ hg id "ssh://user@dummy/a repo#noNoNO" abort: unknown revision 'noNoNO' [255] Test (non-)escaping of remote paths with spaces when cloning (issue3145): - $ hg clone --ssh "\"$PYTHON\" \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo" + $ hg clone "ssh://user@dummy/a repo" destination directory: a repo abort: destination 'a repo' is not empty [10] @@ -515,8 +513,6 @@ $ cat >> .hg/hgrc << EOF > [paths] > default-push = ssh://user@dummy/remote - > [ui] - > ssh = "$PYTHON" "$TESTDIR/dummyssh" > [extensions] > localwrite = localwrite.py > EOF @@ -540,7 +536,7 @@ $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes pulling from ssh://user@dummy/remote - running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re) + running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re) sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !) devel-peer-request: hello+between devel-peer-request: pairs: 81 bytes @@ -670,11 +666,11 @@ $ echo "pretxnchangegroup.fail = python:$TESTTMP/failhook:hook" >> remote/.hg/hgrc - $ hg -q --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" clone ssh://user@dummy/remote hookout + $ hg -q clone ssh://user@dummy/remote hookout $ cd hookout $ touch hookfailure $ hg -q commit -A -m 'remote hook failure' - $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" push + $ hg push pushing to ssh://user@dummy/remote searching for changes remote: adding changesets @@ -695,7 +691,7 @@ > [extensions] > crash = ${TESTDIR}/crashgetbundler.py > EOF - $ hg --config ui.ssh="\"$PYTHON\" $TESTDIR/dummyssh" pull + $ hg pull pulling from ssh://user@dummy/remote searching for changes remote: abort: this is an exercise @@ -704,14 +700,14 @@ abort with no error hint when there is a ssh problem when pulling - $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" + $ hg pull ssh://brokenrepository pulling from ssh://brokenrepository/ abort: no suitable response from remote hg [255] abort with configured error hint when there is a ssh problem when pulling - $ hg pull ssh://brokenrepository -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" \ + $ hg pull ssh://brokenrepository \ > --config ui.ssherrorhint="Please see http://company/internalwiki/ssh.html" pulling from ssh://brokenrepository/ abort: no suitable response from remote hg
--- a/tests/test-stream-bundle-v2.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-stream-bundle-v2.t Tue Sep 28 09:40:57 2021 +0200 @@ -14,7 +14,6 @@ > evolution.exchange=True > bundle2-output-capture=True > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline} > [web] > push_ssl = false
--- a/tests/test-subrepo-relative-path.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-subrepo-relative-path.t Tue Sep 28 09:40:57 2021 +0200 @@ -186,7 +186,7 @@ subrepo paths with ssh urls - $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/cloned sshclone + $ hg clone ssh://user@dummy/cloned sshclone requesting all changes adding changesets adding manifests @@ -203,7 +203,7 @@ new changesets 863c1745b441 3 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg -R sshclone push -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/`pwd`/cloned + $ hg -R sshclone push ssh://user@dummy/`pwd`/cloned pushing to ssh://user@dummy/$TESTTMP/cloned pushing subrepo sub to ssh://user@dummy/$TESTTMP/sub searching for changes
--- a/tests/test-transaction-rollback-on-revlog-split.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-transaction-rollback-on-revlog-split.t Tue Sep 28 09:40:57 2021 +0200 @@ -82,15 +82,14 @@ date: Thu Jan 01 00:00:00 1970 +0000 summary: _ - $ hg verify - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files + $ hg verify -q warning: revlog 'data/file.d' not in fncache! - checked 2 changesets with 2 changes to 1 files 1 warnings encountered! hint: run "hg debugrebuildfncache" to recover from corrupt fncache + $ hg debugrebuildfncache --only-data + adding data/file.d + 1 items added, 0 removed from fncache + $ hg verify -q $ cd .. @@ -133,12 +132,7 @@ date: Thu Jan 01 00:00:00 1970 +0000 summary: _ - $ hg verify - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files - checked 2 changesets with 2 changes to 1 files + $ hg verify -q $ cd .. @@ -170,13 +164,8 @@ date: Thu Jan 01 00:00:00 1970 +0000 summary: _ - $ hg verify - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files + $ hg verify -q warning: revlog 'data/file.d' not in fncache! - checked 2 changesets with 2 changes to 1 files 1 warnings encountered! hint: run "hg debugrebuildfncache" to recover from corrupt fncache $ cd ..
--- a/tests/test-transaction-rollback-on-sigpipe.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-transaction-rollback-on-sigpipe.t Tue Sep 28 09:40:57 2021 +0200 @@ -2,7 +2,7 @@ the remote hg is able to successfully roll back the transaction. $ hg init -q remote - $ hg clone -e "\"$PYTHON\" \"$RUNTESTDIR/dummyssh\"" -q ssh://user@dummy/`pwd`/remote local + $ hg clone -q ssh://user@dummy/`pwd`/remote local $ SIGPIPE_REMOTE_DEBUG_FILE="$TESTTMP/DEBUGFILE" $ SYNCFILE1="$TESTTMP/SYNCFILE1" $ SYNCFILE2="$TESTTMP/SYNCFILE2" @@ -36,7 +36,7 @@ (use quiet to avoid flacky output from the server) - $ hg push --quiet -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --remotecmd "$remotecmd" + $ hg push --quiet --remotecmd "$remotecmd" abort: stream ended unexpectedly (got 0 bytes, expected 4) [255] $ cat $SIGPIPE_REMOTE_DEBUG_FILE
--- a/tests/test-treemanifest.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-treemanifest.t Tue Sep 28 09:40:57 2021 +0200 @@ -1,8 +1,3 @@ - $ cat << EOF >> $HGRCPATH - > [ui] - > ssh="$PYTHON" "$TESTDIR/dummyssh" - > EOF - Set up repo $ hg --config experimental.treemanifest=True init repo
--- a/tests/test-wireproto.py Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-wireproto.py Tue Sep 28 09:40:57 2021 +0200 @@ -75,9 +75,7 @@ @wireprotov1peer.batchable def greet(self, name): - f = wireprotov1peer.future() - yield {b'name': mangle(name)}, f - yield unmangle(f.value) + return {b'name': mangle(name)}, unmangle class serverrepo(object):
--- a/tests/test-wireproto.t Tue Sep 21 18:18:56 2021 +0200 +++ b/tests/test-wireproto.t Tue Sep 28 09:40:57 2021 +0200 @@ -142,13 +142,13 @@ SSH (try to exercise the ssh functionality with a dummy script): - $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo uno due tre quattro + $ hg debugwireargs ssh://user@dummy/repo uno due tre quattro uno due tre quattro None - $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --four vier + $ hg debugwireargs ssh://user@dummy/repo eins zwei --four vier eins zwei None vier None - $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei + $ hg debugwireargs ssh://user@dummy/repo eins zwei eins zwei None None None - $ hg debugwireargs --ssh "\"$PYTHON\" $TESTDIR/dummyssh" ssh://user@dummy/repo eins zwei --five fuenf + $ hg debugwireargs ssh://user@dummy/repo eins zwei --five fuenf eins zwei None None None Explicitly kill daemons to let the test exit on Windows