changeset 50404:64cdd80d6909

branching: merge stable into default
author Raphaël Gomès <rgomes@octobus.net>
date Fri, 24 Mar 2023 10:43:47 +0100
parents 4341c2271d67 (current diff) 1b3aa9762d3a (diff)
children c0fbd63d66a7
files tests/test-hook.t
diffstat 21 files changed, 596 insertions(+), 127 deletions(-) [+]
line wrap: on
line diff
--- a/.hgsigs	Wed Mar 22 15:19:02 2023 +0100
+++ b/.hgsigs	Fri Mar 24 10:43:47 2023 +0100
@@ -240,3 +240,4 @@
 59466b13a3ae0e29a5d4f485393e516cfbb057d0 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmO1XgoZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVn8nDACU04KbPloLl+if6DQYreESnF9LU8C+qnLC/j5RRuaFNh/ec6C3DzLWqWdmnWA/siV3nUR1bXHfTui95azxJfYvWoXH2R2yam+YhE256B4rDDYWS1LI9kNNM+A33xcPS2HxVowkByhjB5FPKR6I90dX42BYJpTS5s/VPx63wXLznjFWuD7XJ3P0VI7D72j/+6EQCmHaAUEE5bO00Ob2JxmzJlaP+02fYc814PAONE2/ocfR0aExAVS3VA+SJGXnXTVpoaHr7NJKC2sBLFsdnhIRwtCf3rtGEvIJ5v2U2xx0ZEz/mimtGzW5ovkthobV4mojk0DRz7xBtA96pOGSRTD8QndIsdMCUipo8zZ/AGAMByCtsQOX7OYhR6gp+I6+iPh8fTR5oCbkO7cizDDQtXcrR5OT/BDH9xkAF1ghNL8o23a09/wfZ9NPg5zrh/4T/dFfoe2COlkAJJ1ttDPYyQkCfMsoWm3OXk6xJ3ExVbwkZzUDQSzsxGS+oxbFDWJZ64Q=
 8830004967ad865ead89c28a410405a6e71e0796 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQAsOQZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVl7XC/0W+Wd4gzMUbaot+NVIZTpubNw3KHBDXrlMgwQgCDg7qcqJnVuT1NNEy5sRELjZOO0867k+pBchZaxdmAiFwY1W76+7nwiLBqfCkYgYY0iQe48JHTq9kCgohvx9PSEVbUsScmqAQImd5KzErjhsLj8D2FiFIrcMyqsCBq4ZPs0Ey7lVKu6q3z5eDjlrxUIr0up6yKvgBxhY0GxyTp6DGoinzlFMEadiJlsvlwO4C6UpzKiCGMeKNT5xHK/Hx3ChrOH2Yuu1fHaPLJ+ZpXjR33ileVYlkQrh1D6fWHXcP7ZuwsEKREtgsw1YjYczGFwmhBO362bNi5wy33mBtCvcIAqpsI0rMrExs66qqbfyG+Yp1dvkgzUfdhbYFHA+mvg3/YTSD9dLKzzsb69LM87+dvcLqhBJ0nEAuBmAzU5ECkoArbiwMT96NhhjLPRmJJdHNo0IDos/LBGTgkOZ6iqIx8Xm/tgjBjFJG8B+IVy3laNgun4AZ9Ejc3ahIfhJUIo2j8o=
 05de4896508e8ec387b33eb30d8aab78d1c8e9e4 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQBI2AZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVrRZC/wJyPOJoxpjEJZaRoBmWtkOlf0Y0TyEb6wd8tZIVALNDYZMSMqT7UBjFmaZijOYndUW7ZCj1hKShaIw80vY/hjJ3KZMODY9t91SOwmrVaGrCUeF1tXkuhEgwxfkekPWLxYYc688gLb6oc3FBm//lucNGrOWBXw6yhm1dUcndHXXpafjJslKAHwJN7vI5q69SxvS6SlJUzh/RFWYLnbZ2Qi35ixkU12FZiYVzxDl2i7XbhVoT5mit6VTU7Wh4BMSYuorAv937sF9Y6asE7sQUYHC2C2qjp8S5uFXV/IrhCPbJyWVc4ymPm58Eh6SmItC9zHDviFF9aFoZMK/lfK3Dqumu3T9x6ZYcxulpjNsM0/yv9OiiWbw33PnNb74A9uwrxZHB3XexXiigBUlUzO4lJQ5Oe1rhpPfPPRVyxaeZ8/cPmoJjCuwoiG0YtUeNH5PkHi05O0/hLR9PftDY8oMyzOBErSqjMjZ6OTkFFgk3dI9rHU72C1KL9Jh5uHwEQchBmg=
+f14864fffdcab725d9eac6d4f4c07be05a35f59a 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQc3KUZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVnYZDACh1Bcj8Yu3t8pO22SKWJnz8Ndw9Hvw+ifLaRxFUxKtqUYvy3CIl2qt8k7V13M25qw0061SKgcvNdjtkOhdmtFHNAbqryy0nK9oSZ2GfndmJfMxm9ixF/CcHrx+MmsklEz2woApViHW5PrmgKvZNsStQ5NM457Yx3B4nsT9b8t03NzdNiZRM+RZOkZ+4OdSbiB6hYuTqEFIi2YM+gfVM5Z7H8sEFBkUCtuwUjFGaWThZGGhAcqD5E7p/Lkjv4e4tzyHOzHDgdd+OCAkcbib6/E3Q1MlQ1x7CKpJ190T8R35CzAIMBVoTSI+Ov7OKw1OfGdeCvMVJsKUvqY3zrPawmJB6pG7GoVPEu5pU65H51U3Plq3GhsekUrKWY/BSHV9FOqpKZdnxOAllfWcjLYpbC/fM3l8uuQVcPAs89GvWKnDuE/NWCDYzDAYE++s/H4tP3Chv6yQbPSv/lbccst7OfLLDtXgRHIyEWLo392X3mWzhrkNtfJkBdi39uH9Aoh7pN0=
--- a/.hgtags	Wed Mar 22 15:19:02 2023 +0100
+++ b/.hgtags	Fri Mar 24 10:43:47 2023 +0100
@@ -256,3 +256,4 @@
 59466b13a3ae0e29a5d4f485393e516cfbb057d0 6.3.2
 8830004967ad865ead89c28a410405a6e71e0796 6.3.3
 05de4896508e8ec387b33eb30d8aab78d1c8e9e4 6.4rc0
+f14864fffdcab725d9eac6d4f4c07be05a35f59a 6.4
--- a/mercurial/changelog.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/changelog.py	Fri Mar 24 10:43:47 2023 +0100
@@ -481,7 +481,7 @@
             self._delaybuf = None
         self._divert = False
         # split when we're done
-        self._enforceinlinesize(tr)
+        self._enforceinlinesize(tr, side_write=False)
 
     def _writepending(self, tr):
         """create a file containing the unfinalized state for
@@ -512,9 +512,9 @@
 
         return False
 
-    def _enforceinlinesize(self, tr):
+    def _enforceinlinesize(self, tr, side_write=True):
         if not self._delayed:
-            revlog.revlog._enforceinlinesize(self, tr)
+            revlog.revlog._enforceinlinesize(self, tr, side_write=side_write)
 
     def read(self, nodeorrev):
         """Obtain data from a parsed changelog revision.
--- a/mercurial/debugcommands.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/debugcommands.py	Fri Mar 24 10:43:47 2023 +0100
@@ -803,11 +803,12 @@
     # security to avoid crash on corrupted revlogs
     total_revs = len(index)
 
+    chain_size_cache = {}
+
     def revinfo(rev):
         e = index[rev]
         compsize = e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
         uncompsize = e[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH]
-        chainsize = 0
 
         base = e[revlog_constants.ENTRY_DELTA_BASE]
         p1 = e[revlog_constants.ENTRY_PARENT_1]
@@ -870,11 +871,17 @@
                 deltatype = b'prev'
 
         chain = r._deltachain(rev)[0]
-        for iterrev in chain:
-            e = index[iterrev]
-            chainsize += e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
-
-        return p1, p2, compsize, uncompsize, deltatype, chain, chainsize
+        chain_size = 0
+        for iter_rev in reversed(chain):
+            cached = chain_size_cache.get(iter_rev)
+            if cached is not None:
+                chain_size += cached
+                break
+            e = index[iter_rev]
+            chain_size += e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
+        chain_size_cache[rev] = chain_size
+
+        return p1, p2, compsize, uncompsize, deltatype, chain, chain_size
 
     fm = ui.formatter(b'debugdeltachain', opts)
 
--- a/mercurial/filelog.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/filelog.py	Fri Mar 24 10:43:47 2023 +0100
@@ -25,7 +25,7 @@
 
 @interfaceutil.implementer(repository.ifilestorage)
 class filelog:
-    def __init__(self, opener, path):
+    def __init__(self, opener, path, try_split=False):
         self._revlog = revlog.revlog(
             opener,
             # XXX should use the unencoded path
@@ -33,6 +33,7 @@
             radix=b'/'.join((b'data', path)),
             censorable=True,
             canonical_parent_order=False,  # see comment in revlog.py
+            try_split=try_split,
         )
         # Full name of the user visible file, relative to the repository root.
         # Used by LFS.
@@ -256,8 +257,8 @@
 class narrowfilelog(filelog):
     """Filelog variation to be used with narrow stores."""
 
-    def __init__(self, opener, path, narrowmatch):
-        super(narrowfilelog, self).__init__(opener, path)
+    def __init__(self, opener, path, narrowmatch, try_split=False):
+        super(narrowfilelog, self).__init__(opener, path, try_split=try_split)
         self._narrowmatch = narrowmatch
 
     def renamed(self, node):
--- a/mercurial/hook.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/hook.py	Fri Mar 24 10:43:47 2023 +0100
@@ -191,6 +191,11 @@
         cwd = encoding.getcwd()
     r = ui.system(cmd, environ=env, cwd=cwd, blockedtag=b'exthook-%s' % (name,))
 
+    if repo is not None and repo.currentwlock() is None:
+        repo.invalidatedirstate()
+    if repo is not None and repo.currentlock() is None:
+        repo.invalidate()
+
     duration = util.timer() - starttime
     ui.log(
         b'exthook',
--- a/mercurial/interfaces/repository.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/interfaces/repository.py	Fri Mar 24 10:43:47 2023 +0100
@@ -1810,6 +1810,9 @@
     def lock(wait=True):
         """Lock the repository store and return a lock instance."""
 
+    def currentlock():
+        """Return the lock if it's held or None."""
+
     def wlock(wait=True):
         """Lock the non-store parts of the repository."""
 
--- a/mercurial/localrepo.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/localrepo.py	Fri Mar 24 10:43:47 2023 +0100
@@ -1240,7 +1240,12 @@
         if path.startswith(b'/'):
             path = path[1:]
 
-        return filelog.filelog(self.svfs, path)
+        try_split = (
+            self.currenttransaction() is not None
+            or txnutil.mayhavepending(self.root)
+        )
+
+        return filelog.filelog(self.svfs, path, try_split=try_split)
 
 
 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
@@ -1251,7 +1256,13 @@
         if path.startswith(b'/'):
             path = path[1:]
 
-        return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
+        try_split = (
+            self.currenttransaction() is not None
+            or txnutil.mayhavepending(self.root)
+        )
+        return filelog.narrowfilelog(
+            self.svfs, path, self._storenarrowmatch, try_split=try_split
+        )
 
 
 def makefilestorage(requirements, features, **kwargs):
@@ -1794,10 +1805,29 @@
         )
 
     def _dirstatevalidate(self, node):
+        okay = True
         try:
             self.changelog.rev(node)
+        except error.LookupError:
+            # If the parent are unknown it might just be because the changelog
+            # in memory is lagging behind the dirstate in memory. So try to
+            # refresh the changelog first.
+            #
+            # We only do so if we don't hold the lock, if we do hold the lock
+            # the invalidation at that time should have taken care of this and
+            # something is very fishy.
+            if self.currentlock() is None:
+                self.invalidate()
+                try:
+                    self.changelog.rev(node)
+                except error.LookupError:
+                    okay = False
+            else:
+                # XXX we should consider raising an error here.
+                okay = False
+        if okay:
             return node
-        except error.LookupError:
+        else:
             if not self._dirstatevalidatewarned:
                 self._dirstatevalidatewarned = True
                 self.ui.warn(
@@ -3130,6 +3160,10 @@
         """Returns the wlock if it's held, or None if it's not."""
         return self._currentlock(self._wlockref)
 
+    def currentlock(self):
+        """Returns the lock if it's held, or None if it's not."""
+        return self._currentlock(self._lockref)
+
     def checkcommitpatterns(self, wctx, match, status, fail):
         """check for commit arguments that aren't committable"""
         if match.isexact() or match.prefix():
--- a/mercurial/revlog.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/revlog.py	Fri Mar 24 10:43:47 2023 +0100
@@ -302,6 +302,7 @@
         persistentnodemap=False,
         concurrencychecker=None,
         trypending=False,
+        try_split=False,
         canonical_parent_order=True,
     ):
         """
@@ -328,6 +329,7 @@
         self._nodemap_file = None
         self.postfix = postfix
         self._trypending = trypending
+        self._try_split = try_split
         self.opener = opener
         if persistentnodemap:
             self._nodemap_file = nodemaputil.get_nodemap_file(self)
@@ -511,6 +513,8 @@
             entry_point = b'%s.i.%s' % (self.radix, self.postfix)
         elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
             entry_point = b'%s.i.a' % self.radix
+        elif self._try_split and self.opener.exists(b'%s.i.s' % self.radix):
+            entry_point = b'%s.i.s' % self.radix
         else:
             entry_point = b'%s.i' % self.radix
 
@@ -2015,7 +2019,7 @@
                 raise error.CensoredNodeError(self.display_id, node, text)
             raise
 
-    def _enforceinlinesize(self, tr):
+    def _enforceinlinesize(self, tr, side_write=True):
         """Check if the revlog is too big for inline and convert if so.
 
         This should be called after revisions are added to the revlog. If the
@@ -2032,7 +2036,8 @@
             raise error.RevlogError(
                 _(b"%s not found in the transaction") % self._indexfile
             )
-        trindex = None
+        if troffset:
+            tr.addbackup(self._indexfile, for_offset=True)
         tr.add(self._datafile, 0)
 
         existing_handles = False
@@ -2048,6 +2053,29 @@
             # No need to deal with sidedata writing handle as it is only
             # relevant with revlog-v2 which is never inline, not reaching
             # this code
+        if side_write:
+            old_index_file_path = self._indexfile
+            new_index_file_path = self._indexfile + b'.s'
+            opener = self.opener
+
+            fncache = getattr(opener, 'fncache', None)
+            if fncache is not None:
+                fncache.addignore(new_index_file_path)
+
+            # the "split" index replace the real index when the transaction is finalized
+            def finalize_callback(tr):
+                opener.rename(
+                    new_index_file_path,
+                    old_index_file_path,
+                    checkambig=True,
+                )
+
+            tr.registertmp(new_index_file_path)
+            if self.target[1] is not None:
+                finalize_id = b'000-revlog-split-%d-%s' % self.target
+            else:
+                finalize_id = b'000-revlog-split-%d' % self.target[0]
+            tr.addfinalize(finalize_id, finalize_callback)
 
         new_dfh = self._datafp(b'w+')
         new_dfh.truncate(0)  # drop any potentially existing data
@@ -2055,17 +2083,10 @@
             with self._indexfp() as read_ifh:
                 for r in self:
                     new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
-                    if (
-                        trindex is None
-                        and troffset
-                        <= self.start(r) + r * self.index.entry_size
-                    ):
-                        trindex = r
                 new_dfh.flush()
 
-            if trindex is None:
-                trindex = 0
-
+            if side_write:
+                self._indexfile = new_index_file_path
             with self.__index_new_fp() as fp:
                 self._format_flags &= ~FLAG_INLINE_DATA
                 self._inline = False
@@ -2079,16 +2100,9 @@
                 if self._docket is not None:
                     self._docket.index_end = fp.tell()
 
-                # There is a small transactional race here. If the rename of
-                # the index fails, we should remove the datafile. It is more
-                # important to ensure that the data file is not truncated
-                # when the index is replaced as otherwise data is lost.
-                tr.replace(self._datafile, self.start(trindex))
-
-                # the temp file replace the real index when we exit the context
-                # manager
-
-            tr.replace(self._indexfile, trindex * self.index.entry_size)
+                # If we don't use side-write, the temp file replace the real
+                # index when we exit the context manager
+
             nodemaputil.setup_persistent_nodemap(tr, self)
             self._segmentfile = randomaccessfile.randomaccessfile(
                 self.opener,
--- a/mercurial/store.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/store.py	Fri Mar 24 10:43:47 2023 +0100
@@ -403,7 +403,7 @@
 # some exception to the above matching
 #
 # XXX This is currently not in use because of issue6542
-EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
+EXCLUDED = re.compile(br'.*undo\.[^/]+\.(nd?|i)$')
 
 
 def is_revlog(f, kind, st):
@@ -603,6 +603,7 @@
     # hence the encodedir/decodedir dance
     def __init__(self, vfs):
         self.vfs = vfs
+        self._ignores = set()
         self.entries = None
         self._dirty = False
         # set of new additions to fncache
@@ -687,7 +688,12 @@
             self.entries = None
             self.addls = set()
 
+    def addignore(self, fn):
+        self._ignores.add(fn)
+
     def add(self, fn):
+        if fn in self._ignores:
+            return
         if self.entries is None:
             self._load()
         if fn not in self.entries:
--- a/mercurial/transaction.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/transaction.py	Fri Mar 24 10:43:47 2023 +0100
@@ -105,7 +105,48 @@
     unlink=True,
     checkambigfiles=None,
 ):
+    """rollback a transaction :
+    - truncate files that have been appended to
+    - restore file backups
+    - delete temporary files
+    """
+    backupfiles = []
+
+    def restore_one_backup(vfs, f, b, checkambig):
+        filepath = vfs.join(f)
+        backuppath = vfs.join(b)
+        try:
+            util.copyfile(backuppath, filepath, checkambig=checkambig)
+            backupfiles.append((vfs, b))
+        except IOError as exc:
+            e_msg = stringutil.forcebytestr(exc)
+            report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
+            raise
+
+    # gather all backup files that impact the store
+    # (we need this to detect files that are both backed up and truncated)
+    store_backup = {}
+    for entry in backupentries:
+        location, file_path, backup_path, cache = entry
+        vfs = vfsmap[location]
+        is_store = vfs.join(b'') == opener.join(b'')
+        if is_store and file_path and backup_path:
+            store_backup[file_path] = entry
+    copy_done = set()
+
+    # truncate all file `f` to offset `o`
     for f, o in sorted(dict(entries).items()):
+        # if we have a backup for `f`, we should restore it first and truncate
+        # the restored file
+        bck_entry = store_backup.get(f)
+        if bck_entry is not None:
+            location, file_path, backup_path, cache = bck_entry
+            checkambig = False
+            if checkambigfiles:
+                checkambig = (file_path, location) in checkambigfiles
+            restore_one_backup(opener, file_path, backup_path, checkambig)
+            copy_done.add(bck_entry)
+        # truncate the file to its pre-transaction size
         if o or not unlink:
             checkambig = checkambigfiles and (f, b'') in checkambigfiles
             try:
@@ -124,45 +165,52 @@
                 report(_(b"failed to truncate %s\n") % f)
                 raise
         else:
+            # delete empty file
             try:
                 opener.unlink(f)
             except FileNotFoundError:
                 pass
-
-    backupfiles = []
-    for l, f, b, c in backupentries:
+    # restore backed up files and clean up temporary files
+    for entry in backupentries:
+        if entry in copy_done:
+            continue
+        l, f, b, c = entry
         if l not in vfsmap and c:
             report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
         vfs = vfsmap[l]
         try:
+            checkambig = checkambigfiles and (f, l) in checkambigfiles
             if f and b:
-                filepath = vfs.join(f)
-                backuppath = vfs.join(b)
-                checkambig = checkambigfiles and (f, l) in checkambigfiles
-                try:
-                    util.copyfile(backuppath, filepath, checkambig=checkambig)
-                    backupfiles.append(b)
-                except IOError as exc:
-                    e_msg = stringutil.forcebytestr(exc)
-                    report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
+                restore_one_backup(vfs, f, b, checkambig)
             else:
                 target = f or b
                 try:
                     vfs.unlink(target)
                 except FileNotFoundError:
+                    # This is fine because
+                    #
+                    # either we are trying to delete the main file, and it is
+                    # already deleted.
+                    #
+                    # or we are trying to delete a temporary file and it is
+                    # already deleted.
+                    #
+                    # in both case, our target result (delete the file) is
+                    # already achieved.
                     pass
         except (IOError, OSError, error.Abort):
             if not c:
                 raise
 
+    # cleanup transaction state file and the backups file
     backuppath = b"%s.backupfiles" % journal
     if opener.exists(backuppath):
         opener.unlink(backuppath)
     opener.unlink(journal)
     try:
-        for f in backupfiles:
-            if opener.exists(f):
-                opener.unlink(f)
+        for vfs, f in backupfiles:
+            if vfs.exists(f):
+                vfs.unlink(f)
     except (IOError, OSError, error.Abort):
         # only pure backup file remains, it is sage to ignore any error
         pass
@@ -331,7 +379,7 @@
         self._file.flush()
 
     @active
-    def addbackup(self, file, hardlink=True, location=b''):
+    def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
         """Adds a backup of the file to the transaction
 
         Calling addbackup() creates a hardlink backup of the specified file
@@ -340,17 +388,25 @@
 
         * `file`: the file path, relative to .hg/store
         * `hardlink`: use a hardlink to quickly create the backup
+
+        If `for_offset` is set, we expect a offset for this file to have been previously recorded
         """
         if self._queue:
             msg = b'cannot use transaction.addbackup inside "group"'
             raise error.ProgrammingError(msg)
 
-        if (
-            file in self._newfiles
-            or file in self._offsetmap
-            or file in self._backupmap
-        ):
+        if file in self._newfiles or file in self._backupmap:
+            return
+        elif file in self._offsetmap and not for_offset:
             return
+        elif for_offset and file not in self._offsetmap:
+            msg = (
+                'calling `addbackup` with `for_offmap=True`, '
+                'but no offset recorded: [%r] %r'
+            )
+            msg %= (location, file)
+            raise error.ProgrammingError(msg)
+
         vfs = self._vfsmap[location]
         dirname, filename = vfs.split(file)
         backupfilename = b"%s.backup.%s" % (self._journal, filename)
--- a/mercurial/url.py	Wed Mar 22 15:19:02 2023 +0100
+++ b/mercurial/url.py	Fri Mar 24 10:43:47 2023 +0100
@@ -327,7 +327,9 @@
             self.cert_file = cert_file
 
         def connect(self):
-            self.sock = socket.create_connection((self.host, self.port))
+            self.sock = socket.create_connection(
+                (self.host, self.port), self.timeout
+            )
 
             host = self.host
             realhostport = self.realhostport  # pytype: disable=attribute-error
--- a/relnotes/6.4	Wed Mar 22 15:19:02 2023 +0100
+++ b/relnotes/6.4	Fri Mar 24 10:43:47 2023 +0100
@@ -1,4 +1,4 @@
-= Mercurial 6.4rc0 =
+= Mercurial 6.4 =
 
 == New Features ==
 
@@ -90,6 +90,31 @@
  * bundlerepo: apply phase data stored in the bundle instead of assuming `draft`
  * config-item: declare undeclared path suboption
  * narrow: read pending file when applicable
+ * rust: fix building on macOS (issue6801)
+ * run-tests: fix a crash when using the coverage options
+ * undo-files: also remove the undo.backupfiles
+ * undo-files: cleanup backup when cleaning undos
+ * undo-files: clean existing files up before writing new one
+ * undo-files: cleanup legacy files when applicable
+ * dirstate-v2: fix an incorrect handling of readdir errors
+ * rust: update zstd dependency
+ * rust: upgrade `rayon` dependency
+ * dirstate: fix the bug in [status] dealing with committed&ignored directories
+ * dirstate: fix a potential traceback when in `copy` and `rename`
+ * histedit: fix diff colors
+ * cext: fix for PyLong refactoring in CPython 3.12
+ * py3: fix for Python 3.12 emitting SyntaxWarning on invalid escape sequences
+ * statprof: with Python 3.12, lineno is (more) often None
+ * transaction: properly clean up backup file outside of .hg/store/
+ * transaction: raise on backup restoration error
+ * revlog: improve the robustness of the splitting process
+ * debugdeltachain: stop summing the same chain over and over
+ * url: don't ignore timeout for https connections
+ * py3: fix for Python 3.12 emitting SyntaxWarning on invalid escape sequences
+ * tests: accept a test output change in [tests/test-serve.t]
+ * rust: fix thread cap (for real this time)
+ * dirstate: try refreshing the changelog when parent are unknown
+ * hooks: invalidate the repo after the hooks
 
 == Backwards Compatibility Changes ==
  * rust: upgrade supported Rust toolchain version
--- a/rust/hg-core/src/dirstate_tree/status.rs	Wed Mar 22 15:19:02 2023 +0100
+++ b/rust/hg-core/src/dirstate_tree/status.rs	Fri Mar 24 10:43:47 2023 +0100
@@ -47,16 +47,10 @@
     options: StatusOptions,
 ) -> Result<(DirstateStatus<'dirstate>, Vec<PatternFileWarning>), StatusError>
 {
-    // Force the global rayon threadpool to not exceed 16 concurrent threads.
-    // This is a stop-gap measure until we figure out why using more than 16
-    // threads makes `status` slower for each additional thread.
-    // We use `ok()` in case the global threadpool has already been
-    // instantiated in `rhg` or some other caller.
-    // TODO find the underlying cause and fix it, then remove this.
-    rayon::ThreadPoolBuilder::new()
-        .num_threads(16.min(rayon::current_num_threads()))
-        .build_global()
-        .ok();
+    // Also cap for a Python caller of this function, but don't complain if
+    // the global threadpool has already been set since this code path is also
+    // being used by `rhg`, which calls this early.
+    let _ = crate::utils::cap_default_rayon_threads();
 
     let (ignore_fn, warnings, patterns_changed): (IgnoreFnType, _, _) =
         if options.list_ignored || options.list_unknown {
--- a/rust/hg-core/src/utils.rs	Wed Mar 22 15:19:02 2023 +0100
+++ b/rust/hg-core/src/utils.rs	Fri Mar 24 10:43:47 2023 +0100
@@ -498,3 +498,35 @@
         Err(e) => Some(Err(e)),
     })
 }
+
+/// Force the global rayon threadpool to not exceed 16 concurrent threads
+/// unless the user has specified a value.
+/// This is a stop-gap measure until we figure out why using more than 16
+/// threads makes `status` slower for each additional thread.
+///
+/// TODO find the underlying cause and fix it, then remove this.
+///
+/// # Errors
+///
+/// Returns an error if the global threadpool has already been initialized if
+/// we try to initialize it.
+pub fn cap_default_rayon_threads() -> Result<(), rayon::ThreadPoolBuildError> {
+    const THREAD_CAP: usize = 16;
+
+    if std::env::var("RAYON_NUM_THREADS").is_err() {
+        let available_parallelism = std::thread::available_parallelism()
+            .map(usize::from)
+            .unwrap_or(1);
+        let new_thread_count = THREAD_CAP.min(available_parallelism);
+        let res = rayon::ThreadPoolBuilder::new()
+            .num_threads(new_thread_count)
+            .build_global();
+        if res.is_ok() {
+            log::trace!(
+                "Capped the rayon threadpool to {new_thread_count} threads",
+            );
+        }
+        return res;
+    }
+    Ok(())
+}
--- a/rust/rhg/src/main.rs	Wed Mar 22 15:19:02 2023 +0100
+++ b/rust/rhg/src/main.rs	Fri Mar 24 10:43:47 2023 +0100
@@ -140,6 +140,13 @@
 
     env_logger::init();
 
+    // Make sure nothing in a future version of `rhg` sets the global
+    // threadpool before we can cap default threads. (This is also called
+    // in core because Python uses the same code path, we're adding a
+    // redundant check.)
+    hg::utils::cap_default_rayon_threads()
+        .expect("Rayon threadpool already initialized");
+
     let early_args = EarlyArgs::parse(&argv);
 
     let initial_current_dir = early_args.cwd.map(|cwd| {
--- a/tests/helper-killhook.py	Wed Mar 22 15:19:02 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,5 +0,0 @@
-import os
-
-
-def killme(ui, repo, hooktype, **wkargs):
-    os._exit(80)
--- a/tests/test-dirstate-read-race.t	Wed Mar 22 15:19:02 2023 +0100
+++ b/tests/test-dirstate-read-race.t	Fri Mar 24 10:43:47 2023 +0100
@@ -249,7 +249,7 @@
 
 The status process should return a consistent result and not crash.
 
-#if rust no-rhg dirstate-v2-append
+#if no-rhg
   $ cat $TESTTMP/status-race-lock.out
   A dir/o
   R dir/nested/m
@@ -258,7 +258,7 @@
   ? q
   $ cat $TESTTMP/status-race-lock.log
 #else
-#if rhg pre-some-read dirstate-v2-append
+#if pre-some-read dirstate-v2-append
   $ cat $TESTTMP/status-race-lock.out
   A dir/o
   R dir/nested/m
@@ -268,12 +268,10 @@
   $ cat $TESTTMP/status-race-lock.log
 #else
   $ cat $TESTTMP/status-race-lock.out
-  M dir/o (no-rhg known-bad-output !)
   ? dir/n
   ? p
   ? q
   $ cat $TESTTMP/status-race-lock.log
-  warning: ignoring unknown working parent 02a67a77ee9b! (no-rhg !)
 #endif
 #endif
 
--- a/tests/test-hook.t	Wed Mar 22 15:19:02 2023 +0100
+++ b/tests/test-hook.t	Fri Mar 24 10:43:47 2023 +0100
@@ -1423,3 +1423,41 @@
   ### no ######## plain: <unset>
   ### auto ###### plain: 1
   Mercurial Distributed SCM (*) (glob)
+
+Test hook that change the underlying repo
+=========================================
+
+blackbox access the dirstate afterward and can see a changelog / dirstate
+desync.
+
+
+  $ cd $TESTTMP
+  $ cat <<EOF >> $HGRCPATH
+  > [extensions]
+  > blackbox=
+  > [hooks]
+  > post-merge = hg commit -m "auto merge"
+  > EOF
+
+  $ hg init t
+  $ cd t
+  $ touch ".hgignore"
+  $ hg commit -Am "initial" -d'0 0'
+  adding .hgignore
+
+  $ echo This is file a1 > a
+  $ hg commit -Am "commit #1" -d'0 0'
+  adding a
+
+  $ hg update 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo This is file b1 > b
+  $ hg commit -Am "commit #2" -d'0 0'
+  adding b
+  created new head
+
+  $ hg merge 1
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ cd ..
--- a/tests/test-serve.t	Wed Mar 22 15:19:02 2023 +0100
+++ b/tests/test-serve.t	Fri Mar 24 10:43:47 2023 +0100
@@ -55,7 +55,7 @@
 #if no-windows
   $ KILLQUIETLY=Y
   $ hgserve -p daytime
-  abort: cannot start server at 'localhost:13': Permission denied (?)
+  abort: cannot start server at 'localhost:13': $EACCES$ (?)
   abort: child process failed to start (?)
   abort: no port number associated with service 'daytime' (?)
   listening at http://localhost/ (bound to $LOCALIP:13) (?)
--- a/tests/test-transaction-rollback-on-revlog-split.t	Wed Mar 22 15:19:02 2023 +0100
+++ b/tests/test-transaction-rollback-on-revlog-split.t	Fri Mar 24 10:43:47 2023 +0100
@@ -1,22 +1,103 @@
 Test correctness of revlog inline -> non-inline transition
 ----------------------------------------------------------
 
-Helper extension to intercept renames.
+Helper extension to intercept renames and kill process
 
-  $ cat > $TESTTMP/intercept_rename.py << EOF
+  $ cat > $TESTTMP/intercept_before_rename.py << EOF
   > import os
-  > import sys
+  > import signal
+  > from mercurial import extensions, util
+  > 
+  > def extsetup(ui):
+  >     def rename(orig, src, dest, *args, **kwargs):
+  >         path = util.normpath(dest)
+  >         if path.endswith(b'data/file.i'):
+  >             os.kill(os.getpid(), signal.SIGKILL)
+  >         return orig(src, dest, *args, **kwargs)
+  >     extensions.wrapfunction(util, 'rename', rename)
+  > EOF
+
+  $ cat > $TESTTMP/intercept_after_rename.py << EOF
+  > import os
+  > import signal
   > from mercurial import extensions, util
   > 
   > def extsetup(ui):
   >     def close(orig, *args, **kwargs):
   >         path = util.normpath(args[0]._atomictempfile__name)
+  >         r = orig(*args, **kwargs)
   >         if path.endswith(b'/.hg/store/data/file.i'):
-  >             os._exit(80)
-  >         return orig(*args, **kwargs)
+  >             os.kill(os.getpid(), signal.SIGKILL)
+  >         return r
   >     extensions.wrapfunction(util.atomictempfile, 'close', close)
+  > def extsetup(ui):
+  >     def rename(orig, src, dest, *args, **kwargs):
+  >         path = util.normpath(dest)
+  >         r = orig(src, dest, *args, **kwargs)
+  >         if path.endswith(b'data/file.i'):
+  >             os.kill(os.getpid(), signal.SIGKILL)
+  >         return r
+  >     extensions.wrapfunction(util, 'rename', rename)
   > EOF
 
+  $ cat > $TESTTMP/killme.py << EOF
+  > import os
+  > import signal
+  > 
+  > def killme(ui, repo, hooktype, **kwargs):
+  >     os.kill(os.getpid(), signal.SIGKILL)
+  > EOF
+
+  $ cat > $TESTTMP/reader_wait_split.py << EOF
+  > import os
+  > import signal
+  > from mercurial import extensions, revlog, testing
+  > def _wait_post_load(orig, self, *args, **kwargs):
+  >     wait = b'data/file' in self.radix
+  >     if wait:
+  >         testing.wait_file(b"$TESTTMP/writer-revlog-split")
+  >     r = orig(self, *args, **kwargs)
+  >     if wait:
+  >         testing.write_file(b"$TESTTMP/reader-index-read")
+  >         testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
+  >     return r
+  > 
+  > def extsetup(ui):
+  >     extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
+  > EOF
+
+setup a repository for tests
+----------------------------
+
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > revlog-compression=none
+  > EOF
+
+  $ hg init troffset-computation
+  $ cd troffset-computation
+  $ printf '%20d' '1' > file
+  $ hg commit -Aqma
+  $ printf '%1024d' '1' > file
+  $ hg commit -Aqmb
+  $ printf '%20d' '1' > file
+  $ hg commit -Aqmc
+  $ dd if=/dev/zero of=file bs=1k count=128 > /dev/null 2>&1
+  $ hg commit -AqmD --traceback
+
+Reference size:
+  $ f -s file
+  file: size=131072
+  $ f -s .hg/store/data/file*
+  .hg/store/data/file.d: size=132139
+  .hg/store/data/file.i: size=256
+
+  $ cd ..
+
+
+Test a hard crash after the file was split but before the transaction was committed
+===================================================================================
+
 Test offset computation to correctly factor in the index entries themselves.
 Also test that the new data size has the correct size if the transaction is aborted
 after the index has been replaced.
@@ -28,30 +109,19 @@
 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
 but truncate the index and the data to remove both c and D.
 
-  $ hg init troffset-computation --config format.revlog-compression=none
-  $ cd troffset-computation
-  $ printf '%20d' '1' > file
-  $ hg commit -Aqma
-  $ printf '%1024d' '1' > file
-  $ hg commit -Aqmb
-  $ printf '%20d' '1' > file
-  $ hg commit -Aqmc
-  $ dd if=/dev/zero of=file bs=1k count=128 > /dev/null 2>&1
-  $ hg commit -AqmD
 
-  $ cd ..
-
-  $ hg clone -r 1 troffset-computation troffset-computation-copy --config format.revlog-compression=none -q
+  $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
   $ cd troffset-computation-copy
 
 Reference size:
-
+  $ f -s file
+  file: size=1024
   $ f -s .hg/store/data/file*
   .hg/store/data/file.i: size=1174
 
   $ cat > .hg/hgrc <<EOF
   > [hooks]
-  > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme
+  > pretxnchangegroup = python:$TESTTMP/killme.py:killme
   > EOF
 #if chg
   $ hg pull ../troffset-computation
@@ -60,27 +130,38 @@
 #else
   $ hg pull ../troffset-computation
   pulling from ../troffset-computation
-  [80]
+  Killed
+  [137]
 #endif
-  $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file | tail -1
-  data/file.i 128
+
+
+The inline revlog still exist, but a split version exist next to it
+
+  $ f -s .hg/store/data/file*
+  .hg/store/data/file.d: size=132139
+  .hg/store/data/file.i: size=132395
+  .hg/store/data/file.i.s: size=256
+
 
 The first file.i entry should match the "Reference size" above.
 The first file.d entry is the temporary record during the split,
-the second entry after the split happened. The sum of the second file.d
-and the second file.i entry should match the first file.i entry.
+
+A "temporary file" entry exist for the split index.
 
   $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
   data/file.i 1174
   data/file.d 0
-  data/file.d 1046
-  data/file.i 128
+  $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
+   data/file.i data/journal.backup.file.i 0
+   data/file.i.s 0
+
+recover is rolling the split back, the fncache is still valid
+
   $ hg recover
   rolling back interrupted transaction
   (verify step skipped, run `hg verify` to check your repository content)
   $ f -s .hg/store/data/file*
-  .hg/store/data/file.d: size=1046
-  .hg/store/data/file.i: size=128
+  .hg/store/data/file.i: size=1174
   $ hg tip
   changeset:   1:cfa8d6e60429
   tag:         tip
@@ -89,47 +170,67 @@
   summary:     b
   
   $ hg verify -q
-   warning: revlog 'data/file.d' not in fncache!
-  1 warnings encountered!
-  hint: run "hg debugrebuildfncache" to recover from corrupt fncache
   $ hg debugrebuildfncache --only-data
-  adding data/file.d
-  1 items added, 0 removed from fncache
+  fncache already up to date
   $ hg verify -q
   $ cd ..
 
+Test a hard crash right before the index is move into place
+===========================================================
 
 Now retry the procedure but intercept the rename of the index and check that
 the journal does not contain the new index size. This demonstrates the edge case
 where the data file is left as garbage.
 
-  $ hg clone -r 1 troffset-computation troffset-computation-copy2 --config format.revlog-compression=none -q
+  $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
   $ cd troffset-computation-copy2
+
+Reference size:
+  $ f -s file
+  file: size=1024
+  $ f -s .hg/store/data/file*
+  .hg/store/data/file.i: size=1174
+
   $ cat > .hg/hgrc <<EOF
   > [extensions]
-  > intercept_rename = $TESTTMP/intercept_rename.py
-  > [hooks]
-  > pretxnchangegroup = python:$TESTDIR/helper-killhook.py:killme
+  > intercept_rename = $TESTTMP/intercept_before_rename.py
   > EOF
 #if chg
   $ hg pull ../troffset-computation
   pulling from ../troffset-computation
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
   [255]
 #else
   $ hg pull ../troffset-computation
   pulling from ../troffset-computation
-  [80]
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  Killed
+  [137]
 #endif
+
+The inline revlog still exist, but a split version exist next to it
+
+  $ f -s .hg/store/data/file*
+  .hg/store/data/file.d: size=132139
+  .hg/store/data/file.i: size=132395
+  .hg/store/data/file.i.s: size=256
+
   $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
   data/file.i 1174
   data/file.d 0
-  data/file.d 1046
+
+recover is rolling the split back, the fncache is still valid
 
   $ hg recover
   rolling back interrupted transaction
   (verify step skipped, run `hg verify` to check your repository content)
   $ f -s .hg/store/data/file*
-  .hg/store/data/file.d: size=1046
   .hg/store/data/file.i: size=1174
   $ hg tip
   changeset:   1:cfa8d6e60429
@@ -141,10 +242,77 @@
   $ hg verify -q
   $ cd ..
 
+Test a hard crash right after the index is move into place
+===========================================================
+
+Now retry the procedure but intercept the rename of the index.
+
+  $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
+  $ cd troffset-computation-crash-after-rename
+
+Reference size:
+  $ f -s file
+  file: size=1024
+  $ f -s .hg/store/data/file*
+  .hg/store/data/file.i: size=1174
+
+  $ cat > .hg/hgrc <<EOF
+  > [extensions]
+  > intercept_rename = $TESTTMP/intercept_after_rename.py
+  > EOF
+#if chg
+  $ hg pull ../troffset-computation
+  pulling from ../troffset-computation
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  [255]
+#else
+  $ hg pull ../troffset-computation
+  pulling from ../troffset-computation
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  Killed
+  [137]
+#endif
+
+The inline revlog was over written on disk
+
+  $ f -s .hg/store/data/file*
+  .hg/store/data/file.d: size=132139
+  .hg/store/data/file.i: size=256
+
+  $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
+  data/file.i 1174
+  data/file.d 0
+
+recover is rolling the split back, the fncache is still valid
+
+  $ hg recover
+  rolling back interrupted transaction
+  (verify step skipped, run `hg verify` to check your repository content)
+  $ f -s .hg/store/data/file*
+  .hg/store/data/file.i: size=1174
+  $ hg tip
+  changeset:   1:cfa8d6e60429
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     b
+  
+  $ hg verify -q
+  $ cd ..
+
+Have the transaction rollback itself without any hard crash
+===========================================================
+
 
 Repeat the original test but let hg rollback the transaction.
 
-  $ hg clone -r 1 troffset-computation troffset-computation-copy-rb --config format.revlog-compression=none -q
+  $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
   $ cd troffset-computation-copy-rb
   $ cat > .hg/hgrc <<EOF
   > [hooks]
@@ -160,9 +328,14 @@
   rollback completed
   abort: pretxnchangegroup hook exited with status 1
   [40]
+
+The split was rollback
+
   $ f -s .hg/store/data/file*
-  .hg/store/data/file.d: size=1046
-  .hg/store/data/file.i: size=128
+  .hg/store/data/file.d: size=0
+  .hg/store/data/file.i: size=1174
+
+
   $ hg tip
   changeset:   1:cfa8d6e60429
   tag:         tip
@@ -171,8 +344,85 @@
   summary:     b
   
   $ hg verify -q
-   warning: revlog 'data/file.d' not in fncache!
-  1 warnings encountered!
-  hint: run "hg debugrebuildfncache" to recover from corrupt fncache
   $ cd ..
 
+Read race
+=========
+
+We check that a client that started reading a revlog (its index) after the
+split and end reading (the data) after the rollback should be fine
+
+  $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
+  $ cd troffset-computation-race
+  $ cat > .hg/hgrc <<EOF
+  > [hooks]
+  > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
+  > pretxnclose = false
+  > EOF
+
+start a reader
+
+  $ hg cat --rev 0 file \
+  > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
+  > 2> $TESTTMP/reader.stderr \
+  > > $TESTTMP/reader.stdout &
+
+Do a failed pull in //
+
+  $ hg pull ../troffset-computation
+  pulling from ../troffset-computation
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [40]
+  $ touch $TESTTMP/writer-revlog-unsplit
+  $ wait
+
+The reader should be fine
+  $ cat $TESTTMP/reader.stderr
+  $ cat $TESTTMP/reader.stdout
+                     1 (no-eol)
+  $ cd ..
+
+pending hooks
+=============
+
+We checks that hooks properly see the inside of the transaction, while other process don't.
+
+  $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
+  $ cd troffset-computation-hooks
+  $ cat > .hg/hgrc <<EOF
+  > [hooks]
+  > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
+  > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
+  > pretxnclose.03-abort = false
+  > EOF
+
+  $ (
+  >   $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
+  >   hg cat -r 'max(all())' file | f --size;\
+  >   touch $TESTTMP/hook-done
+  > ) >stdout 2>stderr &
+
+  $ hg pull ../troffset-computation
+  pulling from ../troffset-computation
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  size=131072
+  transaction abort!
+  rollback completed
+  abort: pretxnclose.03-abort hook exited with status 1
+  [40]
+
+  $ cat stdout
+  size=1024
+  $ cat stderr
+
+
+  $ cd ..