changeset 45411:a42999f611ff

merge with stable
author Augie Fackler <augie@google.com>
date Mon, 07 Sep 2020 15:20:31 -0400
parents 2d08dcf8fd9e (diff) d58a205d0672 (current diff)
children 8ddbb75bad09
files
diffstat 97 files changed, 2591 insertions(+), 1570 deletions(-) [+]
line wrap: on
line diff
--- a/.editorconfig	Wed Sep 02 12:31:37 2020 +0200
+++ b/.editorconfig	Mon Sep 07 15:20:31 2020 -0400
@@ -6,13 +6,16 @@
 indent_size = 4
 indent_style = space
 trim_trailing_whitespace = true
+end_of_line = lf
 
 [*.{c,h}]
 indent_size = 8
 indent_style = tab
 trim_trailing_whitespace = true
+end_of_line = lf
 
 [*.t]
 indent_size = 2
 indent_style = space
 trim_trailing_whitespace = false
+end_of_line = lf
--- a/Makefile	Wed Sep 02 12:31:37 2020 +0200
+++ b/Makefile	Mon Sep 07 15:20:31 2020 -0400
@@ -234,7 +234,6 @@
 	make -C contrib/chg \
 	  HGPATH=/usr/local/bin/hg \
 	  PYTHON=/usr/bin/python2.7 \
-	  HGEXTDIR=/Library/Python/2.7/site-packages/hgext \
 	  DESTDIR=../../build/mercurial \
 	  PREFIX=/usr/local \
 	  clean install
--- a/contrib/check-py3-compat.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/contrib/check-py3-compat.py	Mon Sep 07 15:20:31 2020 -0400
@@ -97,6 +97,15 @@
     if sys.version_info[0] == 2:
         fn = check_compat_py2
     else:
+        # check_compat_py3 will import every filename we specify as long as it
+        # starts with one of a few prefixes. It does this by converting
+        # specified filenames like 'mercurial/foo.py' to 'mercurial.foo' and
+        # importing that. When running standalone (not as part of a test), this
+        # means we actually import the installed versions, not the files we just
+        # specified. When running as test-check-py3-compat.t, we technically
+        # would import the correct paths, but it's cleaner to have both cases
+        # use the same import logic.
+        sys.path.insert(0, '.')
         fn = check_compat_py3
 
     for f in sys.argv[1:]:
--- a/hgext/convert/hg.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/convert/hg.py	Mon Sep 07 15:20:31 2020 -0400
@@ -217,7 +217,8 @@
         """
         anc = [p1ctx.ancestor(p2ctx)]
         # Calculate what files are coming from p2
-        actions, diverge, rename = mergemod.calculateupdates(
+        # TODO: mresult.commitinfo might be able to get that info
+        mresult = mergemod.calculateupdates(
             self.repo,
             p1ctx,
             p2ctx,
@@ -228,7 +229,7 @@
             followcopies=False,
         )
 
-        for file, (action, info, msg) in pycompat.iteritems(actions):
+        for file, (action, info, msg) in mresult.filemap():
             if source.targetfilebelongstosource(file):
                 # If the file belongs to the source repo, ignore the p2
                 # since it will be covered by the existing fileset.
--- a/hgext/extdiff.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/extdiff.py	Mon Sep 07 15:20:31 2020 -0400
@@ -255,7 +255,6 @@
     tmproot,
     dir1a,
     dir1b,
-    dir2root,
     dir2,
     rev1a,
     rev1b,
@@ -267,7 +266,7 @@
     waitprocs = []
     totalfiles = len(commonfiles)
     for idx, commonfile in enumerate(sorted(commonfiles)):
-        path1a = os.path.join(tmproot, dir1a, commonfile)
+        path1a = os.path.join(dir1a, commonfile)
         label1a = commonfile + rev1a
         if not os.path.isfile(path1a):
             path1a = pycompat.osdevnull
@@ -275,12 +274,12 @@
         path1b = b''
         label1b = b''
         if do3way:
-            path1b = os.path.join(tmproot, dir1b, commonfile)
+            path1b = os.path.join(dir1b, commonfile)
             label1b = commonfile + rev1b
             if not os.path.isfile(path1b):
                 path1b = pycompat.osdevnull
 
-        path2 = os.path.join(dir2root, dir2, commonfile)
+        path2 = os.path.join(dir2, commonfile)
         label2 = commonfile + rev2
 
         if confirm:
@@ -457,23 +456,23 @@
     label1b = rev1b
     label2 = rev2
 
-    # If only one change, diff the files instead of the directories
-    # Handle bogus modifies correctly by checking if the files exist
-    if len(common) == 1:
-        common_file = util.localpath(common.pop())
-        dir1a = os.path.join(tmproot, dir1a, common_file)
-        label1a = common_file + rev1a
-        if not os.path.isfile(dir1a):
-            dir1a = pycompat.osdevnull
-        if do3way:
-            dir1b = os.path.join(tmproot, dir1b, common_file)
-            label1b = common_file + rev1b
-            if not os.path.isfile(dir1b):
-                dir1b = pycompat.osdevnull
-        dir2 = os.path.join(dir2root, dir2, common_file)
-        label2 = common_file + rev2
+    if not opts.get(b'per_file'):
+        # If only one change, diff the files instead of the directories
+        # Handle bogus modifies correctly by checking if the files exist
+        if len(common) == 1:
+            common_file = util.localpath(common.pop())
+            dir1a = os.path.join(tmproot, dir1a, common_file)
+            label1a = common_file + rev1a
+            if not os.path.isfile(dir1a):
+                dir1a = pycompat.osdevnull
+            if do3way:
+                dir1b = os.path.join(tmproot, dir1b, common_file)
+                label1b = common_file + rev1b
+                if not os.path.isfile(dir1b):
+                    dir1b = pycompat.osdevnull
+            dir2 = os.path.join(dir2root, dir2, common_file)
+            label2 = common_file + rev2
 
-    if not opts.get(b'per_file'):
         # Run the external tool on the 2 temp directories or the patches
         cmdline = formatcmdline(
             cmdline,
@@ -499,10 +498,9 @@
             confirm=opts.get(b'confirm'),
             commonfiles=common,
             tmproot=tmproot,
-            dir1a=dir1a,
-            dir1b=dir1b,
-            dir2root=dir2root,
-            dir2=dir2,
+            dir1a=os.path.join(tmproot, dir1a),
+            dir1b=os.path.join(tmproot, dir1b) if do3way else None,
+            dir2=os.path.join(dir2root, dir2),
             rev1a=rev1a,
             rev1b=rev1b,
             rev2=rev2,
@@ -711,45 +709,67 @@
         )
 
 
+def _gettooldetails(ui, cmd, path):
+    """
+    returns following things for a
+    ```
+    [extdiff]
+    <cmd> = <path>
+    ```
+    entry:
+
+    cmd: command/tool name
+    path: path to the tool
+    cmdline: the command which should be run
+    isgui: whether the tool uses GUI or not
+
+    Reads all external tools related configs, whether it be extdiff section,
+    diff-tools or merge-tools section, or its specified in an old format or
+    the latest format.
+    """
+    path = util.expandpath(path)
+    if cmd.startswith(b'cmd.'):
+        cmd = cmd[4:]
+        if not path:
+            path = procutil.findexe(cmd)
+            if path is None:
+                path = filemerge.findexternaltool(ui, cmd) or cmd
+        diffopts = ui.config(b'extdiff', b'opts.' + cmd)
+        cmdline = procutil.shellquote(path)
+        if diffopts:
+            cmdline += b' ' + diffopts
+        isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
+    else:
+        if path:
+            # case "cmd = path opts"
+            cmdline = path
+            diffopts = len(pycompat.shlexsplit(cmdline)) > 1
+        else:
+            # case "cmd ="
+            path = procutil.findexe(cmd)
+            if path is None:
+                path = filemerge.findexternaltool(ui, cmd) or cmd
+            cmdline = procutil.shellquote(path)
+            diffopts = False
+        isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
+    # look for diff arguments in [diff-tools] then [merge-tools]
+    if not diffopts:
+        key = cmd + b'.diffargs'
+        for section in (b'diff-tools', b'merge-tools'):
+            args = ui.config(section, key)
+            if args:
+                cmdline += b' ' + args
+                if isgui is None:
+                    isgui = ui.configbool(section, cmd + b'.gui') or False
+                break
+    return cmd, path, cmdline, isgui
+
+
 def uisetup(ui):
     for cmd, path in ui.configitems(b'extdiff'):
-        path = util.expandpath(path)
-        if cmd.startswith(b'cmd.'):
-            cmd = cmd[4:]
-            if not path:
-                path = procutil.findexe(cmd)
-                if path is None:
-                    path = filemerge.findexternaltool(ui, cmd) or cmd
-            diffopts = ui.config(b'extdiff', b'opts.' + cmd)
-            cmdline = procutil.shellquote(path)
-            if diffopts:
-                cmdline += b' ' + diffopts
-            isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
-        elif cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
+        if cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
             continue
-        else:
-            if path:
-                # case "cmd = path opts"
-                cmdline = path
-                diffopts = len(pycompat.shlexsplit(cmdline)) > 1
-            else:
-                # case "cmd ="
-                path = procutil.findexe(cmd)
-                if path is None:
-                    path = filemerge.findexternaltool(ui, cmd) or cmd
-                cmdline = procutil.shellquote(path)
-                diffopts = False
-            isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
-        # look for diff arguments in [diff-tools] then [merge-tools]
-        if not diffopts:
-            key = cmd + b'.diffargs'
-            for section in (b'diff-tools', b'merge-tools'):
-                args = ui.config(section, key)
-                if args:
-                    cmdline += b' ' + args
-                    if isgui is None:
-                        isgui = ui.configbool(section, cmd + b'.gui') or False
-                    break
+        cmd, path, cmdline, isgui = _gettooldetails(ui, cmd, path)
         command(
             cmd,
             extdiffopts[:],
--- a/hgext/fix.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/fix.py	Mon Sep 07 15:20:31 2020 -0400
@@ -241,15 +241,15 @@
     of files, unless the --whole flag is used. Some tools may always affect the
     whole file regardless of --whole.
 
-    If revisions are specified with --rev, those revisions will be checked, and
-    they may be replaced with new revisions that have fixed file content.  It is
-    desirable to specify all descendants of each specified revision, so that the
-    fixes propagate to the descendants. If all descendants are fixed at the same
-    time, no merging, rebasing, or evolution will be required.
+    If --working-dir is used, files with uncommitted changes in the working copy
+    will be fixed. Note that no backup are made.
 
-    If --working-dir is used, files with uncommitted changes in the working copy
-    will be fixed. If the checked-out revision is also fixed, the working
-    directory will update to the replacement revision.
+    If revisions are specified with --source, those revisions and their
+    descendants will be checked, and they may be replaced with new revisions
+    that have fixed file content. By automatically including the descendants,
+    no merging, rebasing, or evolution will be required. If an ancestor of the
+    working copy is included, then the working copy itself will also be fixed,
+    and the working copy will be updated to the fixed parent.
 
     When determining what lines of each file to fix at each revision, the whole
     set of revisions being fixed is considered, so that fixes to earlier
--- a/hgext/histedit.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/histedit.py	Mon Sep 07 15:20:31 2020 -0400
@@ -635,12 +635,11 @@
 
 def applychanges(ui, repo, ctx, opts):
     """Merge changeset from ctx (only) in the current working directory"""
-    wcpar = repo.dirstate.p1()
-    if ctx.p1().node() == wcpar:
+    if ctx.p1().node() == repo.dirstate.p1():
         # edits are "in place" we do not need to make any merge,
         # just applies changes on parent for editing
         ui.pushbuffer()
-        cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
+        cmdutil.revert(ui, repo, ctx, all=True)
         stats = mergemod.updateresult(0, 0, 0, 0)
         ui.popbuffer()
     else:
--- a/hgext/hooklib/changeset_obsoleted.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/hooklib/changeset_obsoleted.py	Mon Sep 07 15:20:31 2020 -0400
@@ -13,7 +13,7 @@
   messageidseed = myseed
 
   [hooks]
-  pretxnclose.changeset_obsoleted = \
+  txnclose.changeset_obsoleted = \
     python:hgext.hooklib.changeset_obsoleted.hook
 """
 
@@ -26,6 +26,7 @@
 from mercurial import (
     encoding,
     error,
+    formatter,
     logcmdutil,
     mail,
     obsutil,
@@ -62,7 +63,7 @@
         b'notify_obsoleted', b'messageidseed'
     ) or ui.config(b'notify', b'messageidseed')
     template = ui.config(b'notify_obsoleted', b'template')
-    spec = logcmdutil.templatespec(template, None)
+    spec = formatter.literal_templatespec(template)
     templater = logcmdutil.changesettemplater(ui, repo, spec)
     ui.pushbuffer()
     n = notify.notifier(ui, repo, b'incoming')
--- a/hgext/hooklib/changeset_published.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/hooklib/changeset_published.py	Mon Sep 07 15:20:31 2020 -0400
@@ -26,6 +26,7 @@
 from mercurial import (
     encoding,
     error,
+    formatter,
     logcmdutil,
     mail,
     pycompat,
@@ -61,7 +62,7 @@
         b'notify_published', b'messageidseed'
     ) or ui.config(b'notify', b'messageidseed')
     template = ui.config(b'notify_published', b'template')
-    spec = logcmdutil.templatespec(template, None)
+    spec = formatter.literal_templatespec(template)
     templater = logcmdutil.changesettemplater(ui, repo, spec)
     ui.pushbuffer()
     n = notify.notifier(ui, repo, b'incoming')
--- a/hgext/largefiles/overrides.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/largefiles/overrides.py	Mon Sep 07 15:20:31 2020 -0400
@@ -52,6 +52,8 @@
 
 lfstatus = lfutil.lfstatus
 
+MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
+
 # -- Utility functions: commonly/repeatedly needed functionality ---------------
 
 
@@ -543,16 +545,16 @@
     origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
 ):
     overwrite = force and not branchmerge
-    actions, diverge, renamedelete = origfn(
+    mresult = origfn(
         repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
     )
 
     if overwrite:
-        return actions, diverge, renamedelete
+        return mresult
 
     # Convert to dictionary with filename as key and action as value.
     lfiles = set()
-    for f in actions:
+    for f in mresult.files():
         splitstandin = lfutil.splitstandin(f)
         if splitstandin is not None and splitstandin in p1:
             lfiles.add(splitstandin)
@@ -561,8 +563,8 @@
 
     for lfile in sorted(lfiles):
         standin = lfutil.standin(lfile)
-        (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
-        (sm, sargs, smsg) = actions.get(standin, (None, None, None))
+        (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
+        (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
         if sm in (b'g', b'dc') and lm != b'r':
             if sm == b'dc':
                 f1, f2, fa, move, anc = sargs
@@ -578,14 +580,18 @@
                 % lfile
             )
             if repo.ui.promptchoice(usermsg, 0) == 0:  # pick remote largefile
-                actions[lfile] = (b'r', None, b'replaced by standin')
-                actions[standin] = (b'g', sargs, b'replaces standin')
+                mresult.addfile(lfile, b'r', None, b'replaced by standin')
+                mresult.addfile(standin, b'g', sargs, b'replaces standin')
             else:  # keep local normal file
-                actions[lfile] = (b'k', None, b'replaces standin')
+                mresult.addfile(lfile, b'k', None, b'replaces standin')
                 if branchmerge:
-                    actions[standin] = (b'k', None, b'replaced by non-standin')
+                    mresult.addfile(
+                        standin, b'k', None, b'replaced by non-standin',
+                    )
                 else:
-                    actions[standin] = (b'r', None, b'replaced by non-standin')
+                    mresult.addfile(
+                        standin, b'r', None, b'replaced by non-standin',
+                    )
         elif lm in (b'g', b'dc') and sm != b'r':
             if lm == b'dc':
                 f1, f2, fa, move, anc = largs
@@ -603,31 +609,36 @@
             if repo.ui.promptchoice(usermsg, 0) == 0:  # keep local largefile
                 if branchmerge:
                     # largefile can be restored from standin safely
-                    actions[lfile] = (b'k', None, b'replaced by standin')
-                    actions[standin] = (b'k', None, b'replaces standin')
+                    mresult.addfile(
+                        lfile, b'k', None, b'replaced by standin',
+                    )
+                    mresult.addfile(standin, b'k', None, b'replaces standin')
                 else:
                     # "lfile" should be marked as "removed" without
                     # removal of itself
-                    actions[lfile] = (
-                        b'lfmr',
+                    mresult.addfile(
+                        lfile,
+                        MERGE_ACTION_LARGEFILE_MARK_REMOVED,
                         None,
                         b'forget non-standin largefile',
                     )
 
                     # linear-merge should treat this largefile as 're-added'
-                    actions[standin] = (b'a', None, b'keep standin')
+                    mresult.addfile(standin, b'a', None, b'keep standin')
             else:  # pick remote normal file
-                actions[lfile] = (b'g', largs, b'replaces standin')
-                actions[standin] = (b'r', None, b'replaced by non-standin')
+                mresult.addfile(lfile, b'g', largs, b'replaces standin')
+                mresult.addfile(
+                    standin, b'r', None, b'replaced by non-standin',
+                )
 
-    return actions, diverge, renamedelete
+    return mresult
 
 
 @eh.wrapfunction(mergestatemod, b'recordupdates')
 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
-    if b'lfmr' in actions:
+    if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
         lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
-        for lfile, args, msg in actions[b'lfmr']:
+        for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
             # this should be executed before 'orig', to execute 'remove'
             # before all other actions
             repo.dirstate.remove(lfile)
@@ -863,7 +874,7 @@
 # the matcher to hit standins instead of largefiles. Based on the
 # resulting standins update the largefiles.
 @eh.wrapfunction(cmdutil, b'revert')
-def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
+def overriderevert(orig, ui, repo, ctx, *pats, **opts):
     # Because we put the standins in a bad state (by updating them)
     # and then return them to a correct state we need to lock to
     # prevent others from changing them in their incorrect state.
@@ -926,7 +937,7 @@
             return m
 
         with extensions.wrappedfunction(scmutil, b'match', overridematch):
-            orig(ui, repo, ctx, parents, *pats, **opts)
+            orig(ui, repo, ctx, *pats, **opts)
 
         newstandins = lfutil.getstandinsstate(repo)
         filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
--- a/hgext/mq.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/mq.py	Mon Sep 07 15:20:31 2020 -0400
@@ -1717,11 +1717,7 @@
             except:  # re-raises
                 self.ui.warn(_(b'cleaning up working directory...\n'))
                 cmdutil.revert(
-                    self.ui,
-                    repo,
-                    repo[b'.'],
-                    repo.dirstate.parents(),
-                    no_backup=True,
+                    self.ui, repo, repo[b'.'], no_backup=True,
                 )
                 # only remove unknown files that we know we touched or
                 # created while patching
--- a/hgext/narrow/__init__.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/narrow/__init__.py	Mon Sep 07 15:20:31 2020 -0400
@@ -11,9 +11,9 @@
 from mercurial import (
     localrepo,
     registrar,
+    requirements,
 )
 
-from mercurial.interfaces import repository
 
 from . import (
     narrowbundle2,
@@ -52,7 +52,7 @@
 
 
 def featuresetup(ui, features):
-    features.add(repository.NARROW_REQUIREMENT)
+    features.add(requirements.NARROW_REQUIREMENT)
 
 
 def uisetup(ui):
@@ -69,7 +69,7 @@
         return
 
     repo.ui.setconfig(b'experimental', b'narrow', True, b'narrow-ext')
-    if repository.NARROW_REQUIREMENT in repo.requirements:
+    if requirements.NARROW_REQUIREMENT in repo.requirements:
         narrowrepo.wraprepo(repo)
         narrowwirepeer.reposetup(repo)
 
--- a/hgext/narrow/narrowbundle2.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/narrow/narrowbundle2.py	Mon Sep 07 15:20:31 2020 -0400
@@ -20,11 +20,11 @@
     localrepo,
     narrowspec,
     repair,
+    requirements,
     scmutil,
     util,
     wireprototypes,
 )
-from mercurial.interfaces import repository
 from mercurial.utils import stringutil
 
 _NARROWACL_SECTION = b'narrowacl'
@@ -108,7 +108,7 @@
 
         part = bundler.newpart(b'changegroup', data=cgdata)
         part.addparam(b'version', version)
-        if b'treemanifest' in repo.requirements:
+        if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
             part.addparam(b'treemanifest', b'1')
 
 
@@ -163,7 +163,7 @@
 
         part = bundler.newpart(b'changegroup', data=cgdata)
         part.addparam(b'version', version)
-        if b'treemanifest' in repo.requirements:
+        if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
             part.addparam(b'treemanifest', b'1')
 
 
@@ -178,8 +178,8 @@
     narrowspec.validatepatterns(includepats)
     narrowspec.validatepatterns(excludepats)
 
-    if not repository.NARROW_REQUIREMENT in op.repo.requirements:
-        op.repo.requirements.add(repository.NARROW_REQUIREMENT)
+    if not requirements.NARROW_REQUIREMENT in op.repo.requirements:
+        op.repo.requirements.add(requirements.NARROW_REQUIREMENT)
         scmutil.writereporequirements(op.repo)
     op.repo.setnarrowpats(includepats, excludepats)
     narrowspec.copytoworkingcopy(op.repo)
@@ -194,8 +194,8 @@
     narrowspec.validatepatterns(includepats)
     narrowspec.validatepatterns(excludepats)
 
-    if repository.NARROW_REQUIREMENT not in op.repo.requirements:
-        op.repo.requirements.add(repository.NARROW_REQUIREMENT)
+    if requirements.NARROW_REQUIREMENT not in op.repo.requirements:
+        op.repo.requirements.add(requirements.NARROW_REQUIREMENT)
         scmutil.writereporequirements(op.repo)
     op.repo.setnarrowpats(includepats, excludepats)
     narrowspec.copytoworkingcopy(op.repo)
--- a/hgext/narrow/narrowcommands.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/narrow/narrowcommands.py	Mon Sep 07 15:20:31 2020 -0400
@@ -27,11 +27,11 @@
     registrar,
     repair,
     repoview,
+    requirements,
     sparse,
     util,
     wireprototypes,
 )
-from mercurial.interfaces import repository
 
 table = {}
 command = registrar.command(table)
@@ -133,7 +133,7 @@
 def pullnarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps pull command to allow modifying narrow spec."""
     wrappedextraprepare = util.nullcontextmanager()
-    if repository.NARROW_REQUIREMENT in repo.requirements:
+    if requirements.NARROW_REQUIREMENT in repo.requirements:
 
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
@@ -150,7 +150,7 @@
 
 def archivenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps archive command to narrow the default includes."""
-    if repository.NARROW_REQUIREMENT in repo.requirements:
+    if requirements.NARROW_REQUIREMENT in repo.requirements:
         repo_includes, repo_excludes = repo.narrowpats
         includes = set(opts.get('include', []))
         excludes = set(opts.get('exclude', []))
@@ -166,7 +166,7 @@
 
 def pullbundle2extraprepare(orig, pullop, kwargs):
     repo = pullop.repo
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return orig(pullop, kwargs)
 
     if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
@@ -482,7 +482,7 @@
     exclude switches, the changes are applied immediately.
     """
     opts = pycompat.byteskwargs(opts)
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         raise error.Abort(
             _(
                 b'the tracked command is only supported on '
--- a/hgext/patchbomb.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/patchbomb.py	Mon Sep 07 15:20:31 2020 -0400
@@ -207,7 +207,7 @@
     if not tmpl:
         return b' '.join(flags)
     out = util.stringio()
-    spec = formatter.templatespec(b'', templater.unquotestring(tmpl), None)
+    spec = formatter.literal_templatespec(templater.unquotestring(tmpl))
     with formatter.templateformatter(ui, out, b'patchbombflag', {}, spec) as fm:
         fm.startitem()
         fm.context(ctx=repo[rev])
--- a/hgext/phabricator.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/phabricator.py	Mon Sep 07 15:20:31 2020 -0400
@@ -238,8 +238,9 @@
 
     def decorate(fn):
         def inner(*args, **kwargs):
-            if kwargs.get('test_vcr'):
-                cassette = pycompat.fsdecode(kwargs.pop('test_vcr'))
+            vcr = kwargs.pop('test_vcr')
+            if vcr:
+                cassette = pycompat.fsdecode(vcr)
                 import hgdemandimport
 
                 with hgdemandimport.deactivated():
--- a/hgext/remotefilelog/__init__.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/remotefilelog/__init__.py	Mon Sep 07 15:20:31 2020 -0400
@@ -150,6 +150,7 @@
     localrepo,
     match as matchmod,
     merge,
+    mergestate as mergestatemod,
     node as nodemod,
     patch,
     pycompat,
@@ -479,36 +480,38 @@
 
 # prefetch files before update
 def applyupdates(
-    orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
+    orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
 ):
     if isenabled(repo):
         manifest = mctx.manifest()
         files = []
-        for f, args, msg in actions[b'g']:
+        for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
             files.append((f, hex(manifest[f])))
         # batch fetch the needed files from the server
         repo.fileservice.prefetch(files)
-    return orig(
-        repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels
-    )
+    return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
 
 
 # Prefetch merge checkunknownfiles
-def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs):
+def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
     if isenabled(repo):
         files = []
         sparsematch = repo.maybesparsematch(mctx.rev())
-        for f, (m, actionargs, msg) in pycompat.iteritems(actions):
+        for f, (m, actionargs, msg) in mresult.filemap():
             if sparsematch and not sparsematch(f):
                 continue
-            if m in (b'c', b'dc', b'cm'):
+            if m in (
+                mergestatemod.ACTION_CREATED,
+                mergestatemod.ACTION_DELETED_CHANGED,
+                mergestatemod.ACTION_CREATED_MERGE,
+            ):
                 files.append((f, hex(mctx.filenode(f))))
-            elif m == b'dg':
+            elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
                 f2 = actionargs[0]
                 files.append((f2, hex(mctx.filenode(f2))))
         # batch fetch the needed files from the server
         repo.fileservice.prefetch(files)
-    return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
+    return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
 
 
 # Prefetch files before status attempts to look at their size and contents
--- a/hgext/remotefilelog/remotefilelogserver.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/remotefilelog/remotefilelogserver.py	Mon Sep 07 15:20:31 2020 -0400
@@ -23,6 +23,7 @@
     extensions,
     match,
     pycompat,
+    requirements,
     store,
     streamclone,
     util,
@@ -169,7 +170,7 @@
                         if kind == stat.S_IFDIR:
                             visit.append(fp)
 
-            if b'treemanifest' in repo.requirements:
+            if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
                 for (u, e, s) in repo.store.datafiles():
                     if u.startswith(b'meta/') and (
                         u.endswith(b'.i') or u.endswith(b'.d')
--- a/hgext/sqlitestore.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/hgext/sqlitestore.py	Mon Sep 07 15:20:31 2020 -0400
@@ -67,6 +67,7 @@
     mdiff,
     pycompat,
     registrar,
+    requirements,
     util,
     verify,
 )
@@ -1151,7 +1152,7 @@
     supported.add(REQUIREMENT_ZLIB)
     supported.add(REQUIREMENT_NONE)
     supported.add(REQUIREMENT_SHALLOW_FILES)
-    supported.add(repository.NARROW_REQUIREMENT)
+    supported.add(requirements.NARROW_REQUIREMENT)
 
 
 def newreporequirements(orig, ui, createopts):
--- a/mercurial/bundle2.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/bundle2.py	Mon Sep 07 15:20:31 2020 -0400
@@ -166,6 +166,7 @@
     phases,
     pushkey,
     pycompat,
+    requirements,
     scmutil,
     streamclone,
     tags,
@@ -1965,7 +1966,7 @@
         nbchangesets = int(inpart.params.get(b'nbchanges'))
     if (
         b'treemanifest' in inpart.params
-        and b'treemanifest' not in op.repo.requirements
+        and requirements.TREEMANIFEST_REQUIREMENT not in op.repo.requirements
     ):
         if len(op.repo.changelog) != 0:
             raise error.Abort(
@@ -1974,7 +1975,7 @@
                     b"non-empty and does not use tree manifests"
                 )
             )
-        op.repo.requirements.add(b'treemanifest')
+        op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT)
         op.repo.svfs.options = localrepo.resolvestorevfsoptions(
             op.repo.ui, op.repo.requirements, op.repo.features
         )
@@ -2576,7 +2577,7 @@
 
         part = bundler.newpart(b'changegroup', data=cgdata)
         part.addparam(b'version', cgversion)
-        if b'treemanifest' in repo.requirements:
+        if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
             part.addparam(b'treemanifest', b'1')
         if b'exp-sidedata-flag' in repo.requirements:
             part.addparam(b'exp-sidedata', b'1')
--- a/mercurial/changegroup.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/changegroup.py	Mon Sep 07 15:20:31 2020 -0400
@@ -26,6 +26,7 @@
     mdiff,
     phases,
     pycompat,
+    requirements,
     util,
 )
 
@@ -949,7 +950,7 @@
         # either, because we don't discover which directory nodes to
         # send along with files. This could probably be fixed.
         fastpathlinkrev = fastpathlinkrev and (
-            b'treemanifest' not in repo.requirements
+            requirements.TREEMANIFEST_REQUIREMENT not in repo.requirements
         )
 
         fnodes = {}  # needed file nodes
@@ -1467,7 +1468,7 @@
     if (
         repo.ui.configbool(b'experimental', b'changegroup3')
         or repo.ui.configbool(b'experimental', b'treemanifest')
-        or b'treemanifest' in repo.requirements
+        or requirements.TREEMANIFEST_REQUIREMENT in repo.requirements
     ):
         # we keep version 03 because we need to to exchange treemanifest data
         #
@@ -1495,7 +1496,7 @@
 # Changegroup versions that can be created from the repo
 def supportedoutgoingversions(repo):
     versions = allsupportedversions(repo)
-    if b'treemanifest' in repo.requirements:
+    if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
         # Versions 01 and 02 support only flat manifests and it's just too
         # expensive to convert between the flat manifest and tree manifest on
         # the fly. Since tree manifests are hashed differently, all of history
@@ -1503,7 +1504,7 @@
         # support versions 01 and 02.
         versions.discard(b'01')
         versions.discard(b'02')
-    if repository.NARROW_REQUIREMENT in repo.requirements:
+    if requirements.NARROW_REQUIREMENT in repo.requirements:
         # Versions 01 and 02 don't support revlog flags, and we need to
         # support that for stripping and unbundling to work.
         versions.discard(b'01')
--- a/mercurial/changelog.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/changelog.py	Mon Sep 07 15:20:31 2020 -0400
@@ -524,10 +524,6 @@
         user,
         date=None,
         extra=None,
-        p1copies=None,
-        p2copies=None,
-        filesadded=None,
-        filesremoved=None,
     ):
         # Convert to UTF-8 encoded bytestrings as the very first
         # thing: calling any method on a localstr object will turn it
@@ -559,48 +555,10 @@
                 raise error.StorageError(
                     _(b'the name \'%s\' is reserved') % branch
                 )
-        sortedfiles = sorted(files)
+        sortedfiles = sorted(files.touched)
         sidedata = None
-        if extra is not None:
-            for name in (
-                b'p1copies',
-                b'p2copies',
-                b'filesadded',
-                b'filesremoved',
-            ):
-                extra.pop(name, None)
-        if p1copies is not None:
-            p1copies = metadata.encodecopies(sortedfiles, p1copies)
-        if p2copies is not None:
-            p2copies = metadata.encodecopies(sortedfiles, p2copies)
-        if filesadded is not None:
-            filesadded = metadata.encodefileindices(sortedfiles, filesadded)
-        if filesremoved is not None:
-            filesremoved = metadata.encodefileindices(sortedfiles, filesremoved)
-        if self._copiesstorage == b'extra':
-            extrasentries = p1copies, p2copies, filesadded, filesremoved
-            if extra is None and any(x is not None for x in extrasentries):
-                extra = {}
-            if p1copies is not None:
-                extra[b'p1copies'] = p1copies
-            if p2copies is not None:
-                extra[b'p2copies'] = p2copies
-            if filesadded is not None:
-                extra[b'filesadded'] = filesadded
-            if filesremoved is not None:
-                extra[b'filesremoved'] = filesremoved
-        elif self._copiesstorage == b'changeset-sidedata':
-            sidedata = {}
-            if p1copies:
-                sidedata[sidedatamod.SD_P1COPIES] = p1copies
-            if p2copies:
-                sidedata[sidedatamod.SD_P2COPIES] = p2copies
-            if filesadded:
-                sidedata[sidedatamod.SD_FILESADDED] = filesadded
-            if filesremoved:
-                sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
-            if not sidedata:
-                sidedata = None
+        if self._copiesstorage == b'changeset-sidedata':
+            sidedata = metadata.encode_copies_sidedata(files)
 
         if extra:
             extra = encodeextra(extra)
--- a/mercurial/cmdutil.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/cmdutil.py	Mon Sep 07 15:20:31 2020 -0400
@@ -46,6 +46,7 @@
     phases,
     pycompat,
     repair,
+    requirements,
     revlog,
     rewriteutil,
     scmutil,
@@ -1358,7 +1359,7 @@
         if cl:
             r = repo.unfiltered().changelog
         elif dir:
-            if b'treemanifest' not in repo.requirements:
+            if requirements.TREEMANIFEST_REQUIREMENT not in repo.requirements:
                 raise error.Abort(
                     _(
                         b"--dir can only be used on repos with "
@@ -3258,6 +3259,7 @@
         if opts.get(b'secret'):
             commitphase = phases.secret
         newid = repo.commitctx(new)
+        ms.reset()
 
         # Reroute the working copy parent to the new changeset
         repo.setparents(newid, nullid)
@@ -3375,7 +3377,7 @@
 
 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
     ui = repo.ui
-    spec = formatter.templatespec(ref, None, None)
+    spec = formatter.reference_templatespec(ref)
     t = logcmdutil.changesettemplater(ui, repo, spec)
     t.t.cache.update(
         (k, templater.unquotestring(v))
@@ -3492,9 +3494,9 @@
     return repo.status(match=scmutil.match(repo[None], pats, opts))
 
 
-def revert(ui, repo, ctx, parents, *pats, **opts):
+def revert(ui, repo, ctx, *pats, **opts):
     opts = pycompat.byteskwargs(opts)
-    parent, p2 = parents
+    parent, p2 = repo.dirstate.parents()
     node = ctx.node()
 
     mf = ctx.manifest()
@@ -3780,7 +3782,6 @@
             match = scmutil.match(repo[None], pats)
             _performrevert(
                 repo,
-                parents,
                 ctx,
                 names,
                 uipathfn,
@@ -3806,7 +3807,6 @@
 
 def _performrevert(
     repo,
-    parents,
     ctx,
     names,
     uipathfn,
@@ -3822,7 +3822,7 @@
 
     Make sure you have the working directory locked when calling this function.
     """
-    parent, p2 = parents
+    parent, p2 = repo.dirstate.parents()
     node = ctx.node()
     excluded_files = []
 
--- a/mercurial/commands.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/commands.py	Mon Sep 07 15:20:31 2020 -0400
@@ -837,7 +837,7 @@
     else:
         hg.clean(repo, node, show_stats=False)
         repo.dirstate.setbranch(branch)
-        cmdutil.revert(ui, repo, rctx, repo.dirstate.parents())
+        cmdutil.revert(ui, repo, rctx)
 
     if opts.get(b'no_commit'):
         msg = _(b"changeset %s backed out, don't forget to commit.\n")
@@ -5781,6 +5781,13 @@
     [
         (b'A', b'after', None, _(b'record a rename that has already occurred')),
         (
+            b'',
+            b'at-rev',
+            b'',
+            _(b'(un)mark renames in the given revision (EXPERIMENTAL)'),
+            _(b'REV'),
+        ),
+        (
             b'f',
             b'force',
             None,
@@ -5962,8 +5969,6 @@
             if not m(f):
                 continue
 
-            if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER:
-                continue
             label, key = mergestateinfo[ms[f]]
             fm.startitem()
             fm.context(ctx=wctx)
@@ -6011,9 +6016,6 @@
 
             didwork = True
 
-            if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER:
-                continue
-
             # don't let driver-resolved files be marked, and run the conclude
             # step if asked to resolve
             if ms[f] == mergestatemod.MERGE_RECORD_DRIVER_RESOLVED:
@@ -6294,9 +6296,7 @@
             hint = _(b"use --all to revert all files")
         raise error.Abort(msg, hint=hint)
 
-    return cmdutil.revert(
-        ui, repo, ctx, (parent, p2), *pats, **pycompat.strkwargs(opts)
-    )
+    return cmdutil.revert(ui, repo, ctx, *pats, **pycompat.strkwargs(opts))
 
 
 @command(
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/commit.py	Mon Sep 07 15:20:31 2020 -0400
@@ -0,0 +1,438 @@
+# commit.py - fonction to perform commit
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+
+from .i18n import _
+from .node import (
+    hex,
+    nullid,
+    nullrev,
+)
+
+from . import (
+    context,
+    mergestate,
+    metadata,
+    phases,
+    scmutil,
+    subrepoutil,
+)
+
+
+def _write_copy_meta(repo):
+    """return a (changelog, filelog) boolean tuple
+
+    changelog: copy related information should be stored in the changeset
+    filelof:   copy related information should be written in the file revision
+    """
+    if repo.filecopiesmode == b'changeset-sidedata':
+        writechangesetcopy = True
+        writefilecopymeta = True
+    else:
+        writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
+        writefilecopymeta = writecopiesto != b'changeset-only'
+        writechangesetcopy = writecopiesto in (
+            b'changeset-only',
+            b'compatibility',
+        )
+    return writechangesetcopy, writefilecopymeta
+
+
+def commitctx(repo, ctx, error=False, origctx=None):
+    """Add a new revision to the target repository.
+    Revision information is passed via the context argument.
+
+    ctx.files() should list all files involved in this commit, i.e.
+    modified/added/removed files. On merge, it may be wider than the
+    ctx.files() to be committed, since any file nodes derived directly
+    from p1 or p2 are excluded from the committed ctx.files().
+
+    origctx is for convert to work around the problem that bug
+    fixes to the files list in changesets change hashes. For
+    convert to be the identity, it can pass an origctx and this
+    function will use the same files list when it makes sense to
+    do so.
+    """
+    repo = repo.unfiltered()
+
+    p1, p2 = ctx.p1(), ctx.p2()
+    user = ctx.user()
+
+    with repo.lock(), repo.transaction(b"commit") as tr:
+        mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
+
+        extra = ctx.extra().copy()
+
+        if extra is not None:
+            for name in (
+                b'p1copies',
+                b'p2copies',
+                b'filesadded',
+                b'filesremoved',
+            ):
+                extra.pop(name, None)
+        if repo.changelog._copiesstorage == b'extra':
+            extra = _extra_with_copies(repo, extra, files)
+
+        # update changelog
+        repo.ui.note(_(b"committing changelog\n"))
+        repo.changelog.delayupdate(tr)
+        n = repo.changelog.add(
+            mn,
+            files,
+            ctx.description(),
+            tr,
+            p1.node(),
+            p2.node(),
+            user,
+            ctx.date(),
+            extra,
+        )
+        xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
+        repo.hook(
+            b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
+        )
+        # set the new commit is proper phase
+        targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
+        if targetphase:
+            # retract boundary do not alter parent changeset.
+            # if a parent have higher the resulting phase will
+            # be compliant anyway
+            #
+            # if minimal phase was 0 we don't need to retract anything
+            phases.registernew(repo, tr, targetphase, [n])
+        return n
+
+
+def _prepare_files(tr, ctx, error=False, origctx=None):
+    repo = ctx.repo()
+    p1 = ctx.p1()
+
+    writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
+
+    if ctx.manifestnode():
+        # reuse an existing manifest revision
+        repo.ui.debug(b'reusing known manifest\n')
+        mn = ctx.manifestnode()
+        files = metadata.ChangingFiles()
+        files.update_touched(ctx.files())
+        if writechangesetcopy:
+            files.update_added(ctx.filesadded())
+            files.update_removed(ctx.filesremoved())
+    elif not ctx.files():
+        repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
+        mn = p1.manifestnode()
+        files = metadata.ChangingFiles()
+    else:
+        mn, files = _process_files(tr, ctx, error=error)
+
+    if origctx and origctx.manifestnode() == mn:
+        origfiles = origctx.files()
+        assert files.touched.issubset(origfiles)
+        files.update_touched(origfiles)
+
+    if writechangesetcopy:
+        files.update_copies_from_p1(ctx.p1copies())
+        files.update_copies_from_p2(ctx.p2copies())
+
+    return mn, files
+
+
+def _process_files(tr, ctx, error=False):
+    repo = ctx.repo()
+    p1 = ctx.p1()
+    p2 = ctx.p2()
+
+    writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
+
+    m1ctx = p1.manifestctx()
+    m2ctx = p2.manifestctx()
+    mctx = m1ctx.copy()
+
+    m = mctx.read()
+    m1 = m1ctx.read()
+    m2 = m2ctx.read()
+
+    files = metadata.ChangingFiles()
+
+    # check in files
+    added = []
+    removed = list(ctx.removed())
+    linkrev = len(repo)
+    repo.ui.note(_(b"committing files:\n"))
+    uipathfn = scmutil.getuipathfn(repo)
+    for f in sorted(ctx.modified() + ctx.added()):
+        repo.ui.note(uipathfn(f) + b"\n")
+        try:
+            fctx = ctx[f]
+            if fctx is None:
+                removed.append(f)
+            else:
+                added.append(f)
+                m[f], is_touched = _filecommit(
+                    repo, fctx, m1, m2, linkrev, tr, writefilecopymeta,
+                )
+                if is_touched:
+                    if is_touched == 'added':
+                        files.mark_added(f)
+                    else:
+                        files.mark_touched(f)
+                m.setflag(f, fctx.flags())
+        except OSError:
+            repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
+            raise
+        except IOError as inst:
+            errcode = getattr(inst, 'errno', errno.ENOENT)
+            if error or errcode and errcode != errno.ENOENT:
+                repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
+            raise
+
+    # update manifest
+    removed = [f for f in removed if f in m1 or f in m2]
+    drop = sorted([f for f in removed if f in m])
+    for f in drop:
+        del m[f]
+    if p2.rev() == nullrev:
+        files.update_removed(removed)
+    else:
+        rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
+        for f in removed:
+            if not rf(f):
+                files.mark_removed(f)
+
+    mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
+
+    return mn, files
+
+
+def _filecommit(
+    repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
+):
+    """
+    commit an individual file as part of a larger transaction
+
+    input:
+
+        fctx:       a file context with the content we are trying to commit
+        manifest1:  manifest of changeset first parent
+        manifest2:  manifest of changeset second parent
+        linkrev:    revision number of the changeset being created
+        tr:         current transation
+        individual: boolean, set to False to skip storing the copy data
+                    (only used by the Google specific feature of using
+                    changeset extra as copy source of truth).
+
+    output: (filenode, touched)
+
+        filenode: the filenode that should be used by this changeset
+        touched:  one of: None (mean untouched), 'added' or 'modified'
+    """
+
+    fname = fctx.path()
+    fparent1 = manifest1.get(fname, nullid)
+    fparent2 = manifest2.get(fname, nullid)
+    touched = None
+    if fparent1 == fparent2 == nullid:
+        touched = 'added'
+
+    if isinstance(fctx, context.filectx):
+        # This block fast path most comparisons which are usually done. It
+        # assumes that bare filectx is used and no merge happened, hence no
+        # need to create a new file revision in this case.
+        node = fctx.filenode()
+        if node in [fparent1, fparent2]:
+            repo.ui.debug(b'reusing %s filelog entry\n' % fname)
+            if (
+                fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
+            ) or (
+                fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
+            ):
+                touched = 'modified'
+            return node, touched
+
+    flog = repo.file(fname)
+    meta = {}
+    cfname = fctx.copysource()
+    fnode = None
+
+    if cfname and cfname != fname:
+        # Mark the new revision of this file as a copy of another
+        # file.  This copy data will effectively act as a parent
+        # of this new revision.  If this is a merge, the first
+        # parent will be the nullid (meaning "look up the copy data")
+        # and the second one will be the other parent.  For example:
+        #
+        # 0 --- 1 --- 3   rev1 changes file foo
+        #   \       /     rev2 renames foo to bar and changes it
+        #    \- 2 -/      rev3 should have bar with all changes and
+        #                      should record that bar descends from
+        #                      bar in rev2 and foo in rev1
+        #
+        # this allows this merge to succeed:
+        #
+        # 0 --- 1 --- 3   rev4 reverts the content change from rev2
+        #   \       /     merging rev3 and rev4 should use bar@rev2
+        #    \- 2 --- 4        as the merge base
+        #
+
+        cnode = manifest1.get(cfname)
+        newfparent = fparent2
+
+        if manifest2:  # branch merge
+            if fparent2 == nullid or cnode is None:  # copied on remote side
+                if cfname in manifest2:
+                    cnode = manifest2[cfname]
+                    newfparent = fparent1
+
+        # Here, we used to search backwards through history to try to find
+        # where the file copy came from if the source of a copy was not in
+        # the parent directory. However, this doesn't actually make sense to
+        # do (what does a copy from something not in your working copy even
+        # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
+        # the user that copy information was dropped, so if they didn't
+        # expect this outcome it can be fixed, but this is the correct
+        # behavior in this circumstance.
+
+        if cnode:
+            repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
+            if includecopymeta:
+                meta[b"copy"] = cfname
+                meta[b"copyrev"] = hex(cnode)
+            fparent1, fparent2 = nullid, newfparent
+        else:
+            repo.ui.warn(
+                _(
+                    b"warning: can't find ancestor for '%s' "
+                    b"copied from '%s'!\n"
+                )
+                % (fname, cfname)
+            )
+
+    elif fparent1 == nullid:
+        fparent1, fparent2 = fparent2, nullid
+    elif fparent2 != nullid:
+        # is one parent an ancestor of the other?
+        fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
+        if fparent1 in fparentancestors:
+            fparent1, fparent2 = fparent2, nullid
+        elif fparent2 in fparentancestors:
+            fparent2 = nullid
+        elif not fparentancestors:
+            # TODO: this whole if-else might be simplified much more
+            ms = mergestate.mergestate.read(repo)
+            if ms.extras(fname).get(b'filenode-source') == b'other':
+                fparent1, fparent2 = fparent2, nullid
+
+    # is the file changed?
+    text = fctx.data()
+    if fparent2 != nullid or meta or flog.cmp(fparent1, text):
+        if touched is None:  # do not overwrite added
+            touched = 'modified'
+        fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
+    # are just the flags changed during merge?
+    elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
+        touched = 'modified'
+        fnode = fparent1
+    else:
+        fnode = fparent1
+    return fnode, touched
+
+
+def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
+    """make a new manifest entry (or reuse a new one)
+
+    given an initialised manifest context and precomputed list of
+    - files: files affected by the commit
+    - added: new entries in the manifest
+    - drop:  entries present in parents but absent of this one
+
+    Create a new manifest revision, reuse existing ones if possible.
+
+    Return the nodeid of the manifest revision.
+    """
+    repo = ctx.repo()
+
+    md = None
+
+    # all this is cached, so it is find to get them all from the ctx.
+    p1 = ctx.p1()
+    p2 = ctx.p2()
+    m1ctx = p1.manifestctx()
+
+    m1 = m1ctx.read()
+
+    if not files:
+        # if no "files" actually changed in terms of the changelog,
+        # try hard to detect unmodified manifest entry so that the
+        # exact same commit can be reproduced later on convert.
+        md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
+    if not files and md:
+        repo.ui.debug(
+            b'not reusing manifest (no file change in '
+            b'changelog, but manifest differs)\n'
+        )
+    if files or md:
+        repo.ui.note(_(b"committing manifest\n"))
+        # we're using narrowmatch here since it's already applied at
+        # other stages (such as dirstate.walk), so we're already
+        # ignoring things outside of narrowspec in most cases. The
+        # one case where we might have files outside the narrowspec
+        # at this point is merges, and we already error out in the
+        # case where the merge has files outside of the narrowspec,
+        # so this is safe.
+        mn = mctx.write(
+            tr,
+            linkrev,
+            p1.manifestnode(),
+            p2.manifestnode(),
+            added,
+            drop,
+            match=repo.narrowmatch(),
+        )
+    else:
+        repo.ui.debug(
+            b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
+        )
+        mn = p1.manifestnode()
+
+    return mn
+
+
+def _extra_with_copies(repo, extra, files):
+    """encode copy information into a `extra` dictionnary"""
+    p1copies = files.copied_from_p1
+    p2copies = files.copied_from_p2
+    filesadded = files.added
+    filesremoved = files.removed
+    files = sorted(files.touched)
+    if not _write_copy_meta(repo)[1]:
+        # If writing only to changeset extras, use None to indicate that
+        # no entry should be written. If writing to both, write an empty
+        # entry to prevent the reader from falling back to reading
+        # filelogs.
+        p1copies = p1copies or None
+        p2copies = p2copies or None
+        filesadded = filesadded or None
+        filesremoved = filesremoved or None
+
+    extrasentries = p1copies, p2copies, filesadded, filesremoved
+    if extra is None and any(x is not None for x in extrasentries):
+        extra = {}
+    if p1copies is not None:
+        p1copies = metadata.encodecopies(files, p1copies)
+        extra[b'p1copies'] = p1copies
+    if p2copies is not None:
+        p2copies = metadata.encodecopies(files, p2copies)
+        extra[b'p2copies'] = p2copies
+    if filesadded is not None:
+        filesadded = metadata.encodefileindices(files, filesadded)
+        extra[b'filesadded'] = filesadded
+    if filesremoved is not None:
+        filesremoved = metadata.encodefileindices(files, filesremoved)
+        extra[b'filesremoved'] = filesremoved
+    return extra
--- a/mercurial/config.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/config.py	Mon Sep 07 15:20:31 2020 -0400
@@ -21,10 +21,9 @@
 
 
 class config(object):
-    def __init__(self, data=None, includepaths=None):
+    def __init__(self, data=None):
         self._data = {}
         self._unset = []
-        self._includepaths = includepaths or []
         if data:
             for k in data._data:
                 self._data[k] = data[k].copy()
@@ -162,21 +161,15 @@
 
             if m and include:
                 expanded = util.expandpath(m.group(1))
-                includepaths = [os.path.dirname(src)] + self._includepaths
-
-                for base in includepaths:
-                    inc = os.path.normpath(os.path.join(base, expanded))
-
-                    try:
-                        include(inc, remap=remap, sections=sections)
-                        break
-                    except IOError as inst:
-                        if inst.errno != errno.ENOENT:
-                            raise error.ParseError(
-                                _(b"cannot include %s (%s)")
-                                % (inc, encoding.strtolocal(inst.strerror)),
-                                b"%s:%d" % (src, line),
-                            )
+                try:
+                    include(expanded, remap=remap, sections=sections)
+                except IOError as inst:
+                    if inst.errno != errno.ENOENT:
+                        raise error.ParseError(
+                            _(b"cannot include %s (%s)")
+                            % (expanded, encoding.strtolocal(inst.strerror)),
+                            b"%s:%d" % (src, line),
+                        )
                 continue
             if emptyre.match(l):
                 continue
@@ -216,8 +209,15 @@
             b'config files must be opened in binary mode, got fp=%r mode=%r'
             % (fp, fp.mode,)
         )
+
+        dir = os.path.dirname(path)
+
+        def include(rel, remap, sections):
+            abs = os.path.normpath(os.path.join(dir, rel))
+            self.read(abs, remap=remap, sections=sections)
+
         self.parse(
-            path, fp.read(), sections=sections, remap=remap, include=self.read
+            path, fp.read(), sections=sections, remap=remap, include=include
         )
 
 
--- a/mercurial/debugcommands.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/debugcommands.py	Mon Sep 07 15:20:31 2020 -0400
@@ -1668,11 +1668,11 @@
     fm.data(re2=bool(util._re2))
 
     # templates
-    p = templater.templatepaths()
-    fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
+    p = templater.templatedir()
+    fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
     fm.condwrite(not p, b'', _(b" no template directories found\n"))
     if p:
-        m = templater.templatepath(b"map-cmdline.default")
+        (m, fp) = templater.try_open_template(b"map-cmdline.default")
         if m:
             # template found, check if it is working
             err = None
--- a/mercurial/dirstate.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/dirstate.py	Mon Sep 07 15:20:31 2020 -0400
@@ -1425,6 +1425,7 @@
         self._opener = opener
         self._root = root
         self._filename = b'dirstate'
+        self._nodelen = 20
 
         self._parents = None
         self._dirtyparents = False
@@ -1609,7 +1610,7 @@
         if not self._parents:
             try:
                 fp = self._opendirstatefile()
-                st = fp.read(40)
+                st = fp.read(2 * self._nodelen)
                 fp.close()
             except IOError as err:
                 if err.errno != errno.ENOENT:
@@ -1618,8 +1619,11 @@
                 st = b''
 
             l = len(st)
-            if l == 40:
-                self._parents = (st[:20], st[20:40])
+            if l == self._nodelen * 2:
+                self._parents = (
+                    st[: self._nodelen],
+                    st[self._nodelen : 2 * self._nodelen],
+                )
             elif l == 0:
                 self._parents = (nullid, nullid)
             else:
@@ -1654,15 +1658,11 @@
 
         if util.safehasattr(parsers, b'dict_new_presized'):
             # Make an estimate of the number of files in the dirstate based on
-            # its size. From a linear regression on a set of real-world repos,
-            # all over 10,000 files, the size of a dirstate entry is 85
-            # bytes. The cost of resizing is significantly higher than the cost
-            # of filling in a larger presized dict, so subtract 20% from the
-            # size.
-            #
-            # This heuristic is imperfect in many ways, so in a future dirstate
-            # format update it makes sense to just record the number of entries
-            # on write.
+            # its size. This trades wasting some memory for avoiding costly
+            # resizes. Each entry have a prefix of 17 bytes followed by one or
+            # two path names. Studies on various large-scale real-world repositories
+            # found 54 bytes a reasonable upper limit for the average path names.
+            # Copy entries are ignored for the sake of this estimate.
             self._map = parsers.dict_new_presized(len(st) // 71)
 
         # Python's garbage collector triggers a GC each time a certain number
--- a/mercurial/exchange.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/exchange.py	Mon Sep 07 15:20:31 2020 -0400
@@ -32,6 +32,7 @@
     phases,
     pushkey,
     pycompat,
+    requirements,
     scmutil,
     sslutil,
     streamclone,
@@ -39,7 +40,6 @@
     util,
     wireprototypes,
 )
-from .interfaces import repository
 from .utils import (
     hashutil,
     stringutil,
@@ -1068,7 +1068,7 @@
     cgpart = bundler.newpart(b'changegroup', data=cgstream)
     if cgversions:
         cgpart.addparam(b'version', version)
-    if b'treemanifest' in pushop.repo.requirements:
+    if requirements.TREEMANIFEST_REQUIREMENT in pushop.repo.requirements:
         cgpart.addparam(b'treemanifest', b'1')
     if b'exp-sidedata-flag' in pushop.repo.requirements:
         cgpart.addparam(b'exp-sidedata', b'1')
@@ -1691,7 +1691,7 @@
         old_heads = unficl.heads()
         clstart = len(unficl)
         _pullbundle2(pullop)
-        if repository.NARROW_REQUIREMENT in repo.requirements:
+        if requirements.NARROW_REQUIREMENT in repo.requirements:
             # XXX narrow clones filter the heads on the server side during
             # XXX getbundle and result in partial replies as well.
             # XXX Disable pull bundles in this case as band aid to avoid
@@ -2557,7 +2557,7 @@
 
     part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
 
-    if b'treemanifest' in repo.requirements:
+    if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
         part.addparam(b'treemanifest', b'1')
 
     if b'exp-sidedata-flag' in repo.requirements:
--- a/mercurial/formatter.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/formatter.py	Mon Sep 07 15:20:31 2020 -0400
@@ -540,6 +540,25 @@
     tmpl = attr.ib()
     mapfile = attr.ib()
     refargs = attr.ib(default=None)
+    fp = attr.ib(default=None)
+
+
+def empty_templatespec():
+    return templatespec(None, None, None)
+
+
+def reference_templatespec(ref, refargs=None):
+    return templatespec(ref, None, None, refargs)
+
+
+def literal_templatespec(tmpl):
+    if pycompat.ispy3:
+        assert not isinstance(tmpl, str), b'tmpl must not be a str'
+    return templatespec(b'', tmpl, None)
+
+
+def mapfile_templatespec(topic, mapfile, fp=None):
+    return templatespec(topic, None, mapfile, fp=fp)
 
 
 def lookuptemplate(ui, topic, tmpl):
@@ -563,33 +582,33 @@
     """
 
     if not tmpl:
-        return templatespec(None, None, None)
+        return empty_templatespec()
 
     # looks like a literal template?
     if b'{' in tmpl:
-        return templatespec(b'', tmpl, None)
+        return literal_templatespec(tmpl)
 
     # a reference to built-in (formatter) template
     if tmpl in {b'cbor', b'json', b'pickle', b'debug'}:
-        return templatespec(tmpl, None, None)
+        return reference_templatespec(tmpl)
 
     # a function-style reference to built-in template
     func, fsep, ftail = tmpl.partition(b'(')
     if func in {b'cbor', b'json'} and fsep and ftail.endswith(b')'):
         templater.parseexpr(tmpl)  # make sure syntax errors are confined
-        return templatespec(func, None, None, refargs=ftail[:-1])
+        return reference_templatespec(func, refargs=ftail[:-1])
 
     # perhaps a stock style?
     if not os.path.split(tmpl)[0]:
-        mapname = templater.templatepath(
+        (mapname, fp) = templater.try_open_template(
             b'map-cmdline.' + tmpl
-        ) or templater.templatepath(tmpl)
-        if mapname and os.path.isfile(mapname):
-            return templatespec(topic, None, mapname)
+        ) or templater.try_open_template(tmpl)
+        if mapname:
+            return mapfile_templatespec(topic, mapname, fp)
 
     # perhaps it's a reference to [templates]
     if ui.config(b'templates', tmpl):
-        return templatespec(tmpl, None, None)
+        return reference_templatespec(tmpl)
 
     if tmpl == b'list':
         ui.write(_(b"available styles: %s\n") % templater.stylelist())
@@ -599,13 +618,13 @@
     if (b'/' in tmpl or b'\\' in tmpl) and os.path.isfile(tmpl):
         # is it a mapfile for a style?
         if os.path.basename(tmpl).startswith(b"map-"):
-            return templatespec(topic, None, os.path.realpath(tmpl))
+            return mapfile_templatespec(topic, os.path.realpath(tmpl))
         with util.posixfile(tmpl, b'rb') as f:
             tmpl = f.read()
-        return templatespec(b'', tmpl, None)
+        return literal_templatespec(tmpl)
 
     # constant string?
-    return templatespec(b'', tmpl, None)
+    return literal_templatespec(tmpl)
 
 
 def templatepartsmap(spec, t, partnames):
@@ -626,9 +645,12 @@
     a map file"""
     assert not (spec.tmpl and spec.mapfile)
     if spec.mapfile:
-        frommapfile = templater.templater.frommapfile
-        return frommapfile(
-            spec.mapfile, defaults=defaults, resources=resources, cache=cache
+        return templater.templater.frommapfile(
+            spec.mapfile,
+            spec.fp,
+            defaults=defaults,
+            resources=resources,
+            cache=cache,
         )
     return maketemplater(
         ui, spec.tmpl, defaults=defaults, resources=resources, cache=cache
--- a/mercurial/helptext/internals/revlogs.txt	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/helptext/internals/revlogs.txt	Mon Sep 07 15:20:31 2020 -0400
@@ -215,14 +215,16 @@
 Revision entries consist of an optional 1 byte header followed by an
 encoding of the revision data. The headers are as follows:
 
-\0 (0x00)
-   Revision data is the entirety of the entry, including this header.
-u (0x75)
-   Raw revision data follows.
-x (0x78)
-   zlib (RFC 1950) data.
+\0  (0x00)
+    Revision data is the entirety of the entry, including this header.
+(   (0x28)
+    zstd https://github.com/facebook/zstd
+u   (0x75)
+    Raw revision data follows.
+x   (0x78)
+    zlib (RFC 1950) data.
 
-   The 0x78 value is actually the first byte of the zlib header (CMF byte).
+    The 0x78 value is actually the first byte of the zlib header (CMF byte).
 
 Hash Computation
 ================
--- a/mercurial/hg.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/hg.py	Mon Sep 07 15:20:31 2020 -0400
@@ -38,6 +38,7 @@
     node,
     phases,
     pycompat,
+    requirements,
     scmutil,
     sshpeer,
     statichttprepo,
@@ -49,7 +50,6 @@
     vfs as vfsmod,
 )
 from .utils import hashutil
-from .interfaces import repository as repositorymod
 
 release = lock.release
 
@@ -354,8 +354,8 @@
             sharefile = repo.vfs.join(b'sharedpath')
             util.rename(sharefile, sharefile + b'.old')
 
-            repo.requirements.discard(b'shared')
-            repo.requirements.discard(b'relshared')
+            repo.requirements.discard(requirements.SHARED_REQUIREMENT)
+            repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
             scmutil.writereporequirements(repo)
 
     # Removing share changes some fundamental properties of the repo instance.
@@ -388,7 +388,7 @@
     if default:
         template = b'[paths]\ndefault = %s\n'
         destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
-    if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
+    if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
         with destrepo.wlock():
             narrowspec.copytoworkingcopy(destrepo)
 
--- a/mercurial/hgweb/common.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/hgweb/common.py	Mon Sep 07 15:20:31 2020 -0400
@@ -21,6 +21,7 @@
 from .. import (
     encoding,
     pycompat,
+    templater,
     util,
 )
 
@@ -178,7 +179,7 @@
     return True
 
 
-def staticfile(directory, fname, res):
+def staticfile(templatepath, directory, fname, res):
     """return a file inside directory with guessed Content-Type header
 
     fname always uses '/' as directory separator and isn't allowed to
@@ -190,24 +191,20 @@
     if not ispathsafe(fname):
         return
 
+    if not directory:
+        tp = templatepath or templater.templatedir()
+        if tp is not None:
+            directory = os.path.join(tp, b'static')
+
     fpath = os.path.join(*fname.split(b'/'))
-    if isinstance(directory, bytes):
-        directory = [directory]
-    for d in directory:
-        path = os.path.join(d, fpath)
-        if os.path.exists(path):
-            break
+    ct = pycompat.sysbytes(
+        mimetypes.guess_type(pycompat.fsdecode(fpath))[0] or r"text/plain"
+    )
+    path = os.path.join(directory, fpath)
     try:
         os.stat(path)
-        ct = pycompat.sysbytes(
-            mimetypes.guess_type(pycompat.fsdecode(path))[0] or r"text/plain"
-        )
         with open(path, b'rb') as fh:
             data = fh.read()
-
-        res.headers[b'Content-Type'] = ct
-        res.setbodybytes(data)
-        return res
     except TypeError:
         raise ErrorResponse(HTTP_SERVER_ERROR, b'illegal filename')
     except OSError as err:
@@ -218,6 +215,10 @@
                 HTTP_SERVER_ERROR, encoding.strtolocal(err.strerror)
             )
 
+    res.headers[b'Content-Type'] = ct
+    res.setbodybytes(data)
+    return res
+
 
 def paritygen(stripecount, offset=0):
     """count parity of horizontal stripes for easier reading"""
--- a/mercurial/hgweb/hgweb_mod.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/hgweb/hgweb_mod.py	Mon Sep 07 15:20:31 2020 -0400
@@ -53,7 +53,36 @@
         configfn(b'web', b'style'),
         b'paper',
     )
-    return styles, templater.stylemap(styles, templatepath)
+    return styles, _stylemap(styles, templatepath)
+
+
+def _stylemap(styles, path=None):
+    """Return path to mapfile for a given style.
+
+    Searches mapfile in the following locations:
+    1. templatepath/style/map
+    2. templatepath/map-style
+    3. templatepath/map
+    """
+
+    for style in styles:
+        # only plain name is allowed to honor template paths
+        if (
+            not style
+            or style in (pycompat.oscurdir, pycompat.ospardir)
+            or pycompat.ossep in style
+            or pycompat.osaltsep
+            and pycompat.osaltsep in style
+        ):
+            continue
+        locations = (os.path.join(style, b'map'), b'map-' + style, b'map')
+
+        for location in locations:
+            mapfile, fp = templater.try_open_template(location, path)
+            if mapfile:
+                return style, mapfile, fp
+
+    raise RuntimeError(b"No hgweb templates found in %r" % path)
 
 
 def makebreadcrumb(url, prefix=b''):
@@ -117,23 +146,21 @@
         self.csp, self.nonce = cspvalues(self.repo.ui)
 
     # Trust the settings from the .hg/hgrc files by default.
-    def config(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.config(section, name, default, untrusted=untrusted)
+    def config(self, *args, **kwargs):
+        kwargs.setdefault('untrusted', True)
+        return self.repo.ui.config(*args, **kwargs)
 
-    def configbool(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configbool(
-            section, name, default, untrusted=untrusted
-        )
+    def configbool(self, *args, **kwargs):
+        kwargs.setdefault('untrusted', True)
+        return self.repo.ui.configbool(*args, **kwargs)
 
-    def configint(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configint(
-            section, name, default, untrusted=untrusted
-        )
+    def configint(self, *args, **kwargs):
+        kwargs.setdefault('untrusted', True)
+        return self.repo.ui.configint(*args, **kwargs)
 
-    def configlist(self, section, name, default=uimod._unset, untrusted=True):
-        return self.repo.ui.configlist(
-            section, name, default, untrusted=untrusted
-        )
+    def configlist(self, *args, **kwargs):
+        kwargs.setdefault('untrusted', True)
+        return self.repo.ui.configlist(*args, **kwargs)
 
     def archivelist(self, nodeid):
         return webutil.archivelist(self.repo.ui, nodeid)
@@ -153,7 +180,9 @@
         # figure out which style to use
 
         vars = {}
-        styles, (style, mapfile) = getstyle(req, self.config, self.templatepath)
+        styles, (style, mapfile, fp) = getstyle(
+            req, self.config, self.templatepath
+        )
         if style == styles[0]:
             vars[b'style'] = style
 
@@ -196,10 +225,9 @@
             yield self.config(b'web', b'motd')
 
         tres = formatter.templateresources(self.repo.ui, self.repo)
-        tmpl = templater.templater.frommapfile(
-            mapfile, filters=filters, defaults=defaults, resources=tres
+        return templater.templater.frommapfile(
+            mapfile, fp=fp, filters=filters, defaults=defaults, resources=tres
         )
-        return tmpl
 
     def sendtemplate(self, name, **kwargs):
         """Helper function to send a response generated from a template."""
--- a/mercurial/hgweb/hgwebdir_mod.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/hgweb/hgwebdir_mod.py	Mon Sep 07 15:20:31 2020 -0400
@@ -413,13 +413,7 @@
                 else:
                     fname = req.qsparams[b'static']
                 static = self.ui.config(b"web", b"static", untrusted=False)
-                if not static:
-                    tp = self.templatepath or templater.templatepaths()
-                    if isinstance(tp, bytes):
-                        tp = [tp]
-                    static = [os.path.join(p, b'static') for p in tp]
-
-                staticfile(static, fname, res)
+                staticfile(self.templatepath, static, fname, res)
                 return res.sendresponse()
 
             # top-level index
@@ -538,11 +532,12 @@
         return res.sendresponse()
 
     def templater(self, req, nonce):
-        def config(section, name, default=uimod._unset, untrusted=True):
-            return self.ui.config(section, name, default, untrusted)
+        def config(*args, **kwargs):
+            kwargs.setdefault('untrusted', True)
+            return self.ui.config(*args, **kwargs)
 
         vars = {}
-        styles, (style, mapfile) = hgweb_mod.getstyle(
+        styles, (style, mapfile, fp) = hgweb_mod.getstyle(
             req, config, self.templatepath
         )
         if style == styles[0]:
@@ -577,5 +572,6 @@
             else:
                 yield config(b'web', b'motd')
 
-        tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
-        return tmpl
+        return templater.templater.frommapfile(
+            mapfile, fp=fp, defaults=defaults
+        )
--- a/mercurial/hgweb/webcommands.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/hgweb/webcommands.py	Mon Sep 07 15:20:31 2020 -0400
@@ -36,7 +36,6 @@
     revsetlang,
     scmutil,
     smartset,
-    templater,
     templateutil,
 )
 
@@ -1318,13 +1317,7 @@
     # a repo owner may set web.static in .hg/hgrc to get any file
     # readable by the user running the CGI script
     static = web.config(b"web", b"static", untrusted=False)
-    if not static:
-        tp = web.templatepath or templater.templatepaths()
-        if isinstance(tp, bytes):
-            tp = [tp]
-        static = [os.path.join(p, b'static') for p in tp]
-
-    staticfile(static, fname, web.res)
+    staticfile(web.templatepath, static, fname, web.res)
     return web.res.sendresponse()
 
 
--- a/mercurial/interfaces/repository.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/interfaces/repository.py	Mon Sep 07 15:20:31 2020 -0400
@@ -11,10 +11,6 @@
 from .. import error
 from . import util as interfaceutil
 
-# When narrowing is finalized and no longer subject to format changes,
-# we should move this to just "narrow" or similar.
-NARROW_REQUIREMENT = b'narrowhg-experimental'
-
 # Local repository feature string.
 
 # Revlogs are being used for file storage.
--- a/mercurial/localrepo.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/localrepo.py	Mon Sep 07 15:20:31 2020 -0400
@@ -32,6 +32,7 @@
     bundle2,
     changegroup,
     color,
+    commit,
     context,
     dirstate,
     dirstateguard,
@@ -46,7 +47,6 @@
     match as matchmod,
     mergestate as mergestatemod,
     mergeutil,
-    metadata,
     namespaces,
     narrowspec,
     obsolete,
@@ -56,6 +56,7 @@
     pycompat,
     rcutil,
     repoview,
+    requirements as requirementsmod,
     revset,
     revsetlang,
     scmutil,
@@ -425,30 +426,6 @@
     # End of baselegacywirecommands interface.
 
 
-# Increment the sub-version when the revlog v2 format changes to lock out old
-# clients.
-REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
-
-# A repository with the sparserevlog feature will have delta chains that
-# can spread over a larger span. Sparse reading cuts these large spans into
-# pieces, so that each piece isn't too big.
-# Without the sparserevlog capability, reading from the repository could use
-# huge amounts of memory, because the whole span would be read at once,
-# including all the intermediate revisions that aren't pertinent for the chain.
-# This is why once a repository has enabled sparse-read, it becomes required.
-SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
-
-# A repository with the sidedataflag requirement will allow to store extra
-# information for revision without altering their original hashes.
-SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
-
-# A repository with the the copies-sidedata-changeset requirement will store
-# copies related information in changeset's sidedata.
-COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
-
-# The repository use persistent nodemap for the changelog and the manifest.
-NODEMAP_REQUIREMENT = b'persistent-nodemap'
-
 # Functions receiving (ui, features) that extensions can register to impact
 # the ability to load repositories with custom requirements. Only
 # functions defined in loaded extensions are called.
@@ -459,6 +436,50 @@
 featuresetupfuncs = set()
 
 
+def _getsharedvfs(hgvfs, requirements):
+    """ returns the vfs object pointing to root of shared source
+    repo for a shared repository
+
+    hgvfs is vfs pointing at .hg/ of current repo (shared one)
+    requirements is a set of requirements of current repo (shared one)
+    """
+    # The ``shared`` or ``relshared`` requirements indicate the
+    # store lives in the path contained in the ``.hg/sharedpath`` file.
+    # This is an absolute path for ``shared`` and relative to
+    # ``.hg/`` for ``relshared``.
+    sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
+    if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
+        sharedpath = hgvfs.join(sharedpath)
+
+    sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
+
+    if not sharedvfs.exists():
+        raise error.RepoError(
+            _(b'.hg/sharedpath points to nonexistent directory %s')
+            % sharedvfs.base
+        )
+    return sharedvfs
+
+
+def _readrequires(vfs, allowmissing):
+    """ reads the require file present at root of this vfs
+    and return a set of requirements
+
+    If allowmissing is True, we suppress ENOENT if raised"""
+    # requires file contains a newline-delimited list of
+    # features/capabilities the opener (us) must have in order to use
+    # the repository. This file was introduced in Mercurial 0.9.2,
+    # which means very old repositories may not have one. We assume
+    # a missing file translates to no requirements.
+    try:
+        requirements = set(vfs.read(b'requires').splitlines())
+    except IOError as e:
+        if not (allowmissing and e.errno == errno.ENOENT):
+            raise
+        requirements = set()
+    return requirements
+
+
 def makelocalrepository(baseui, path, intents=None):
     """Create a local repository object.
 
@@ -500,6 +521,10 @@
     # Main VFS for .hg/ directory.
     hgpath = wdirvfs.join(b'.hg')
     hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
+    # Whether this repository is shared one or not
+    shared = False
+    # If this repository is shared, vfs pointing to shared repo
+    sharedvfs = None
 
     # The .hg/ path should exist and should be a directory. All other
     # cases are errors.
@@ -517,17 +542,7 @@
 
         raise error.RepoError(_(b'repository %s not found') % path)
 
-    # .hg/requires file contains a newline-delimited list of
-    # features/capabilities the opener (us) must have in order to use
-    # the repository. This file was introduced in Mercurial 0.9.2,
-    # which means very old repositories may not have one. We assume
-    # a missing file translates to no requirements.
-    try:
-        requirements = set(hgvfs.read(b'requires').splitlines())
-    except IOError as e:
-        if e.errno != errno.ENOENT:
-            raise
-        requirements = set()
+    requirements = _readrequires(hgvfs, True)
 
     # The .hg/hgrc file may load extensions or contain config options
     # that influence repository construction. Attempt to load it and
@@ -567,27 +582,18 @@
     features = set()
 
     # The "store" part of the repository holds versioned data. How it is
-    # accessed is determined by various requirements. The ``shared`` or
-    # ``relshared`` requirements indicate the store lives in the path contained
-    # in the ``.hg/sharedpath`` file. This is an absolute path for
-    # ``shared`` and relative to ``.hg/`` for ``relshared``.
-    if b'shared' in requirements or b'relshared' in requirements:
-        sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
-        if b'relshared' in requirements:
-            sharedpath = hgvfs.join(sharedpath)
-
-        sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
-
-        if not sharedvfs.exists():
-            raise error.RepoError(
-                _(b'.hg/sharedpath points to nonexistent directory %s')
-                % sharedvfs.base
-            )
-
-        features.add(repository.REPO_FEATURE_SHARED_STORAGE)
-
+    # accessed is determined by various requirements. If `shared` or
+    # `relshared` requirements are present, this indicates current repository
+    # is a share and store exists in path mentioned in `.hg/sharedpath`
+    shared = (
+        requirementsmod.SHARED_REQUIREMENT in requirements
+        or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
+    )
+    if shared:
+        sharedvfs = _getsharedvfs(hgvfs, requirements)
         storebasepath = sharedvfs.base
         cachepath = sharedvfs.join(b'cache')
+        features.add(repository.REPO_FEATURE_SHARED_STORAGE)
     else:
         storebasepath = hgvfs.base
         cachepath = hgvfs.join(b'cache')
@@ -790,7 +796,10 @@
 
     ``error.RepoError`` should be raised on failure.
     """
-    if b'exp-sparse' in requirements and not sparse.enabled:
+    if (
+        requirementsmod.SPARSE_REQUIREMENT in requirements
+        and not sparse.enabled
+    ):
         raise error.RepoError(
             _(
                 b'repository is using sparse feature but '
@@ -820,7 +829,7 @@
     """
     options = {}
 
-    if b'treemanifest' in requirements:
+    if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
         options[b'treemanifest'] = True
 
     # experimental config: format.manifestcachesize
@@ -833,12 +842,15 @@
     # This revlog format is super old and we don't bother trying to parse
     # opener options for it because those options wouldn't do anything
     # meaningful on such old repos.
-    if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
+    if (
+        b'revlogv1' in requirements
+        or requirementsmod.REVLOGV2_REQUIREMENT in requirements
+    ):
         options.update(resolverevlogstorevfsoptions(ui, requirements, features))
     else:  # explicitly mark repo as using revlogv0
         options[b'revlogv0'] = True
 
-    if COPIESSDC_REQUIREMENT in requirements:
+    if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
         options[b'copies-storage'] = b'changeset-sidedata'
     else:
         writecopiesto = ui.config(b'experimental', b'copies.write-to')
@@ -857,7 +869,7 @@
 
     if b'revlogv1' in requirements:
         options[b'revlogv1'] = True
-    if REVLOGV2_REQUIREMENT in requirements:
+    if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
         options[b'revlogv2'] = True
 
     if b'generaldelta' in requirements:
@@ -901,12 +913,12 @@
     options[b'sparse-read-density-threshold'] = srdensitythres
     options[b'sparse-read-min-gap-size'] = srmingapsize
 
-    sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
+    sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
     options[b'sparse-revlog'] = sparserevlog
     if sparserevlog:
         options[b'generaldelta'] = True
 
-    sidedata = SIDEDATA_REQUIREMENT in requirements
+    sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
     options[b'side-data'] = sidedata
 
     maxchainlen = None
@@ -937,12 +949,12 @@
             msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
             raise error.Abort(msg % options[b'zstd.level'])
 
-    if repository.NARROW_REQUIREMENT in requirements:
+    if requirementsmod.NARROW_REQUIREMENT in requirements:
         options[b'enableellipsis'] = True
 
     if ui.configbool(b'experimental', b'rust.index'):
         options[b'rust.index'] = True
-    if NODEMAP_REQUIREMENT in requirements:
+    if requirementsmod.NODEMAP_REQUIREMENT in requirements:
         options[b'persistent-nodemap'] = True
     if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
         options[b'persistent-nodemap.mmap'] = True
@@ -986,7 +998,7 @@
     features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
     features.add(repository.REPO_FEATURE_STREAM_CLONE)
 
-    if repository.NARROW_REQUIREMENT in requirements:
+    if requirementsmod.NARROW_REQUIREMENT in requirements:
         return revlognarrowfilestorage
     else:
         return revlogfilestorage
@@ -1027,22 +1039,22 @@
     supportedformats = {
         b'revlogv1',
         b'generaldelta',
-        b'treemanifest',
-        COPIESSDC_REQUIREMENT,
-        REVLOGV2_REQUIREMENT,
-        SIDEDATA_REQUIREMENT,
-        SPARSEREVLOG_REQUIREMENT,
-        NODEMAP_REQUIREMENT,
+        requirementsmod.TREEMANIFEST_REQUIREMENT,
+        requirementsmod.COPIESSDC_REQUIREMENT,
+        requirementsmod.REVLOGV2_REQUIREMENT,
+        requirementsmod.SIDEDATA_REQUIREMENT,
+        requirementsmod.SPARSEREVLOG_REQUIREMENT,
+        requirementsmod.NODEMAP_REQUIREMENT,
         bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
     }
     _basesupported = supportedformats | {
         b'store',
         b'fncache',
-        b'shared',
-        b'relshared',
+        requirementsmod.SHARED_REQUIREMENT,
+        requirementsmod.RELATIVE_SHARED_REQUIREMENT,
         b'dotencode',
-        b'exp-sparse',
-        b'internal-phase',
+        requirementsmod.SPARSE_REQUIREMENT,
+        requirementsmod.INTERNAL_PHASE_REQUIREMENT,
     }
 
     # list of prefix for file which can be written without 'wlock'
@@ -1211,7 +1223,7 @@
         self._extrafilterid = repoview.extrafilter(ui)
 
         self.filecopiesmode = None
-        if COPIESSDC_REQUIREMENT in self.requirements:
+        if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
             self.filecopiesmode = b'changeset-sidedata'
 
     def _getvfsward(self, origfunc):
@@ -1503,14 +1515,14 @@
 
     @storecache(narrowspec.FILENAME)
     def _storenarrowmatch(self):
-        if repository.NARROW_REQUIREMENT not in self.requirements:
+        if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
             return matchmod.always()
         include, exclude = self.narrowpats
         return narrowspec.match(self.root, include=include, exclude=exclude)
 
     @storecache(narrowspec.FILENAME)
     def _narrowmatch(self):
-        if repository.NARROW_REQUIREMENT not in self.requirements:
+        if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
             return matchmod.always()
         narrowspec.checkworkingcopynarrowspec(self)
         include, exclude = self.narrowpats
@@ -2771,140 +2783,6 @@
         """Returns the wlock if it's held, or None if it's not."""
         return self._currentlock(self._wlockref)
 
-    def _filecommit(
-        self,
-        fctx,
-        manifest1,
-        manifest2,
-        linkrev,
-        tr,
-        changelist,
-        includecopymeta,
-    ):
-        """
-        commit an individual file as part of a larger transaction
-
-        input:
-
-            fctx:       a file context with the content we are trying to commit
-            manifest1:  manifest of changeset first parent
-            manifest2:  manifest of changeset second parent
-            linkrev:    revision number of the changeset being created
-            tr:         current transation
-            changelist: list of file being changed (modified inplace)
-            individual: boolean, set to False to skip storing the copy data
-                        (only used by the Google specific feature of using
-                        changeset extra as copy source of truth).
-
-        output:
-
-            The resulting filenode
-        """
-
-        fname = fctx.path()
-        fparent1 = manifest1.get(fname, nullid)
-        fparent2 = manifest2.get(fname, nullid)
-        if isinstance(fctx, context.filectx):
-            node = fctx.filenode()
-            if node in [fparent1, fparent2]:
-                self.ui.debug(b'reusing %s filelog entry\n' % fname)
-                if (
-                    fparent1 != nullid
-                    and manifest1.flags(fname) != fctx.flags()
-                ) or (
-                    fparent2 != nullid
-                    and manifest2.flags(fname) != fctx.flags()
-                ):
-                    changelist.append(fname)
-                return node
-
-        flog = self.file(fname)
-        meta = {}
-        cfname = fctx.copysource()
-        if cfname and cfname != fname:
-            # Mark the new revision of this file as a copy of another
-            # file.  This copy data will effectively act as a parent
-            # of this new revision.  If this is a merge, the first
-            # parent will be the nullid (meaning "look up the copy data")
-            # and the second one will be the other parent.  For example:
-            #
-            # 0 --- 1 --- 3   rev1 changes file foo
-            #   \       /     rev2 renames foo to bar and changes it
-            #    \- 2 -/      rev3 should have bar with all changes and
-            #                      should record that bar descends from
-            #                      bar in rev2 and foo in rev1
-            #
-            # this allows this merge to succeed:
-            #
-            # 0 --- 1 --- 3   rev4 reverts the content change from rev2
-            #   \       /     merging rev3 and rev4 should use bar@rev2
-            #    \- 2 --- 4        as the merge base
-            #
-
-            cnode = manifest1.get(cfname)
-            newfparent = fparent2
-
-            if manifest2:  # branch merge
-                if fparent2 == nullid or cnode is None:  # copied on remote side
-                    if cfname in manifest2:
-                        cnode = manifest2[cfname]
-                        newfparent = fparent1
-
-            # Here, we used to search backwards through history to try to find
-            # where the file copy came from if the source of a copy was not in
-            # the parent directory. However, this doesn't actually make sense to
-            # do (what does a copy from something not in your working copy even
-            # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
-            # the user that copy information was dropped, so if they didn't
-            # expect this outcome it can be fixed, but this is the correct
-            # behavior in this circumstance.
-
-            if cnode:
-                self.ui.debug(
-                    b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
-                )
-                if includecopymeta:
-                    meta[b"copy"] = cfname
-                    meta[b"copyrev"] = hex(cnode)
-                fparent1, fparent2 = nullid, newfparent
-            else:
-                self.ui.warn(
-                    _(
-                        b"warning: can't find ancestor for '%s' "
-                        b"copied from '%s'!\n"
-                    )
-                    % (fname, cfname)
-                )
-
-        elif fparent1 == nullid:
-            fparent1, fparent2 = fparent2, nullid
-        elif fparent2 != nullid:
-            # is one parent an ancestor of the other?
-            fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
-            if fparent1 in fparentancestors:
-                fparent1, fparent2 = fparent2, nullid
-            elif fparent2 in fparentancestors:
-                fparent2 = nullid
-            elif not fparentancestors:
-                # TODO: this whole if-else might be simplified much more
-                ms = mergestatemod.mergestate.read(self)
-                if (
-                    fname in ms
-                    and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
-                ):
-                    fparent1, fparent2 = fparent2, nullid
-
-        # is the file changed?
-        text = fctx.data()
-        if fparent2 != nullid or meta or flog.cmp(fparent1, text):
-            changelist.append(fname)
-            return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
-        # are just the flags changed during merge?
-        elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
-            changelist.append(fname)
-
-        return fparent1
-
     def checkcommitpatterns(self, wctx, match, status, fail):
         """check for commit arguments that aren't committable"""
         if match.isexact() or match.prefix():
@@ -3062,203 +2940,7 @@
 
     @unfilteredmethod
     def commitctx(self, ctx, error=False, origctx=None):
-        """Add a new revision to current repository.
-        Revision information is passed via the context argument.
-
-        ctx.files() should list all files involved in this commit, i.e.
-        modified/added/removed files. On merge, it may be wider than the
-        ctx.files() to be committed, since any file nodes derived directly
-        from p1 or p2 are excluded from the committed ctx.files().
-
-        origctx is for convert to work around the problem that bug
-        fixes to the files list in changesets change hashes. For
-        convert to be the identity, it can pass an origctx and this
-        function will use the same files list when it makes sense to
-        do so.
-        """
-
-        p1, p2 = ctx.p1(), ctx.p2()
-        user = ctx.user()
-
-        if self.filecopiesmode == b'changeset-sidedata':
-            writechangesetcopy = True
-            writefilecopymeta = True
-            writecopiesto = None
-        else:
-            writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
-            writefilecopymeta = writecopiesto != b'changeset-only'
-            writechangesetcopy = writecopiesto in (
-                b'changeset-only',
-                b'compatibility',
-            )
-        p1copies, p2copies = None, None
-        if writechangesetcopy:
-            p1copies = ctx.p1copies()
-            p2copies = ctx.p2copies()
-        filesadded, filesremoved = None, None
-        with self.lock(), self.transaction(b"commit") as tr:
-            trp = weakref.proxy(tr)
-
-            if ctx.manifestnode():
-                # reuse an existing manifest revision
-                self.ui.debug(b'reusing known manifest\n')
-                mn = ctx.manifestnode()
-                files = ctx.files()
-                if writechangesetcopy:
-                    filesadded = ctx.filesadded()
-                    filesremoved = ctx.filesremoved()
-            elif ctx.files():
-                m1ctx = p1.manifestctx()
-                m2ctx = p2.manifestctx()
-                mctx = m1ctx.copy()
-
-                m = mctx.read()
-                m1 = m1ctx.read()
-                m2 = m2ctx.read()
-
-                # check in files
-                added = []
-                changed = []
-                removed = list(ctx.removed())
-                linkrev = len(self)
-                self.ui.note(_(b"committing files:\n"))
-                uipathfn = scmutil.getuipathfn(self)
-                for f in sorted(ctx.modified() + ctx.added()):
-                    self.ui.note(uipathfn(f) + b"\n")
-                    try:
-                        fctx = ctx[f]
-                        if fctx is None:
-                            removed.append(f)
-                        else:
-                            added.append(f)
-                            m[f] = self._filecommit(
-                                fctx,
-                                m1,
-                                m2,
-                                linkrev,
-                                trp,
-                                changed,
-                                writefilecopymeta,
-                            )
-                            m.setflag(f, fctx.flags())
-                    except OSError:
-                        self.ui.warn(
-                            _(b"trouble committing %s!\n") % uipathfn(f)
-                        )
-                        raise
-                    except IOError as inst:
-                        errcode = getattr(inst, 'errno', errno.ENOENT)
-                        if error or errcode and errcode != errno.ENOENT:
-                            self.ui.warn(
-                                _(b"trouble committing %s!\n") % uipathfn(f)
-                            )
-                        raise
-
-                # update manifest
-                removed = [f for f in removed if f in m1 or f in m2]
-                drop = sorted([f for f in removed if f in m])
-                for f in drop:
-                    del m[f]
-                if p2.rev() != nullrev:
-                    rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
-                    removed = [f for f in removed if not rf(f)]
-
-                files = changed + removed
-                md = None
-                if not files:
-                    # if no "files" actually changed in terms of the changelog,
-                    # try hard to detect unmodified manifest entry so that the
-                    # exact same commit can be reproduced later on convert.
-                    md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
-                if not files and md:
-                    self.ui.debug(
-                        b'not reusing manifest (no file change in '
-                        b'changelog, but manifest differs)\n'
-                    )
-                if files or md:
-                    self.ui.note(_(b"committing manifest\n"))
-                    # we're using narrowmatch here since it's already applied at
-                    # other stages (such as dirstate.walk), so we're already
-                    # ignoring things outside of narrowspec in most cases. The
-                    # one case where we might have files outside the narrowspec
-                    # at this point is merges, and we already error out in the
-                    # case where the merge has files outside of the narrowspec,
-                    # so this is safe.
-                    mn = mctx.write(
-                        trp,
-                        linkrev,
-                        p1.manifestnode(),
-                        p2.manifestnode(),
-                        added,
-                        drop,
-                        match=self.narrowmatch(),
-                    )
-
-                    if writechangesetcopy:
-                        filesadded = [
-                            f for f in changed if not (f in m1 or f in m2)
-                        ]
-                        filesremoved = removed
-                else:
-                    self.ui.debug(
-                        b'reusing manifest from p1 (listed files '
-                        b'actually unchanged)\n'
-                    )
-                    mn = p1.manifestnode()
-            else:
-                self.ui.debug(b'reusing manifest from p1 (no file change)\n')
-                mn = p1.manifestnode()
-                files = []
-
-            if writecopiesto == b'changeset-only':
-                # If writing only to changeset extras, use None to indicate that
-                # no entry should be written. If writing to both, write an empty
-                # entry to prevent the reader from falling back to reading
-                # filelogs.
-                p1copies = p1copies or None
-                p2copies = p2copies or None
-                filesadded = filesadded or None
-                filesremoved = filesremoved or None
-
-            if origctx and origctx.manifestnode() == mn:
-                files = origctx.files()
-
-            # update changelog
-            self.ui.note(_(b"committing changelog\n"))
-            self.changelog.delayupdate(tr)
-            n = self.changelog.add(
-                mn,
-                files,
-                ctx.description(),
-                trp,
-                p1.node(),
-                p2.node(),
-                user,
-                ctx.date(),
-                ctx.extra().copy(),
-                p1copies,
-                p2copies,
-                filesadded,
-                filesremoved,
-            )
-            xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
-            self.hook(
-                b'pretxncommit',
-                throw=True,
-                node=hex(n),
-                parent1=xp1,
-                parent2=xp2,
-            )
-            # set the new commit is proper phase
-            targetphase = subrepoutil.newcommitphase(self.ui, ctx)
-            if targetphase:
-                # retract boundary do not alter parent changeset.
-                # if a parent have higher the resulting phase will
-                # be compliant anyway
-                #
-                # if minimal phase was 0 we don't need to retract anything
-                phases.registernew(self, tr, targetphase, [n])
-            return n
+        return commit.commitctx(self, ctx, error=error, origctx=origctx)
 
     @unfilteredmethod
     def destroying(self):
@@ -3553,9 +3235,9 @@
     if b'sharedrepo' in createopts:
         requirements = set(createopts[b'sharedrepo'].requirements)
         if createopts.get(b'sharedrelative'):
-            requirements.add(b'relshared')
+            requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
         else:
-            requirements.add(b'shared')
+            requirements.add(requirementsmod.SHARED_REQUIREMENT)
 
         return requirements
 
@@ -3608,30 +3290,30 @@
     if scmutil.gdinitconfig(ui):
         requirements.add(b'generaldelta')
         if ui.configbool(b'format', b'sparse-revlog'):
-            requirements.add(SPARSEREVLOG_REQUIREMENT)
+            requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
 
     # experimental config: format.exp-use-side-data
     if ui.configbool(b'format', b'exp-use-side-data'):
-        requirements.add(SIDEDATA_REQUIREMENT)
+        requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
     # experimental config: format.exp-use-copies-side-data-changeset
     if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
-        requirements.add(SIDEDATA_REQUIREMENT)
-        requirements.add(COPIESSDC_REQUIREMENT)
+        requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
+        requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
     if ui.configbool(b'experimental', b'treemanifest'):
-        requirements.add(b'treemanifest')
+        requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
 
     revlogv2 = ui.config(b'experimental', b'revlogv2')
     if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
         requirements.remove(b'revlogv1')
         # generaldelta is implied by revlogv2.
         requirements.discard(b'generaldelta')
-        requirements.add(REVLOGV2_REQUIREMENT)
+        requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
     # experimental config: format.internal-phase
     if ui.configbool(b'format', b'internal-phase'):
-        requirements.add(b'internal-phase')
+        requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
 
     if createopts.get(b'narrowfiles'):
-        requirements.add(repository.NARROW_REQUIREMENT)
+        requirements.add(requirementsmod.NARROW_REQUIREMENT)
 
     if createopts.get(b'lfs'):
         requirements.add(b'lfs')
@@ -3640,11 +3322,44 @@
         requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
 
     if ui.configbool(b'format', b'use-persistent-nodemap'):
-        requirements.add(NODEMAP_REQUIREMENT)
+        requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
 
     return requirements
 
 
+def checkrequirementscompat(ui, requirements):
+    """ Checks compatibility of repository requirements enabled and disabled.
+
+    Returns a set of requirements which needs to be dropped because dependend
+    requirements are not enabled. Also warns users about it """
+
+    dropped = set()
+
+    if b'store' not in requirements:
+        if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
+            ui.warn(
+                _(
+                    b'ignoring enabled \'format.bookmarks-in-store\' config '
+                    b'beacuse it is incompatible with disabled '
+                    b'\'format.usestore\' config\n'
+                )
+            )
+            dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
+
+        if (
+            requirementsmod.SHARED_REQUIREMENT in requirements
+            or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
+        ):
+            raise error.Abort(
+                _(
+                    b"cannot create shared repository as source was created"
+                    b" with 'format.usestore' config disabled"
+                )
+            )
+
+    return dropped
+
+
 def filterknowncreateopts(ui, createopts):
     """Filters a dict of repo creation options against options that are known.
 
@@ -3719,6 +3434,7 @@
         )
 
     requirements = newreporequirements(ui, createopts=createopts)
+    requirements -= checkrequirementscompat(ui, requirements)
 
     wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
 
--- a/mercurial/logcmdutil.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/logcmdutil.py	Mon Sep 07 15:20:31 2020 -0400
@@ -603,12 +603,11 @@
 
 
 def templatespec(tmpl, mapfile):
-    if pycompat.ispy3:
-        assert not isinstance(tmpl, str), b'tmpl must not be a str'
+    assert not (tmpl and mapfile)
     if mapfile:
-        return formatter.templatespec(b'changeset', tmpl, mapfile)
+        return formatter.mapfile_templatespec(b'changeset', mapfile)
     else:
-        return formatter.templatespec(b'', tmpl, None)
+        return formatter.literal_templatespec(tmpl)
 
 
 def _lookuptemplate(ui, tmpl, style):
@@ -621,19 +620,20 @@
     if not tmpl and not style:  # template are stronger than style
         tmpl = ui.config(b'ui', b'logtemplate')
         if tmpl:
-            return templatespec(templater.unquotestring(tmpl), None)
+            return formatter.literal_templatespec(templater.unquotestring(tmpl))
         else:
             style = util.expandpath(ui.config(b'ui', b'style'))
 
     if not tmpl and style:
         mapfile = style
+        fp = None
         if not os.path.split(mapfile)[0]:
-            mapname = templater.templatepath(
+            (mapname, fp) = templater.try_open_template(
                 b'map-cmdline.' + mapfile
-            ) or templater.templatepath(mapfile)
+            ) or templater.try_open_template(mapfile)
             if mapname:
                 mapfile = mapname
-        return templatespec(None, mapfile)
+        return formatter.mapfile_templatespec(b'changeset', mapfile, fp)
 
     return formatter.lookuptemplate(ui, b'changeset', tmpl)
 
@@ -641,7 +641,7 @@
 def maketemplater(ui, repo, tmpl, buffered=False):
     """Create a changesettemplater from a literal template 'tmpl'
     byte-string."""
-    spec = templatespec(tmpl, None)
+    spec = formatter.literal_templatespec(tmpl)
     return changesettemplater(ui, repo, spec, buffered=buffered)
 
 
--- a/mercurial/manifest.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/manifest.py	Mon Sep 07 15:20:31 2020 -0400
@@ -315,16 +315,9 @@
                 b"Manifest values must be a tuple of (node, flags)."
             )
         hashval = value[0]
-        # hashes are either 20 or 32 bytes (sha1 or its replacement),
-        # and allow one extra byte taht won't be persisted to disk but
-        # is sometimes used in memory.
-        if not isinstance(hashval, bytes) or not (
-            20 <= len(hashval) <= 22 or 32 <= len(hashval) <= 34
-        ):
+        if not isinstance(hashval, bytes) or len(hashval) not in (20, 32):
             raise TypeError(b"node must be a 20-byte or 32-byte byte string")
         flags = value[1]
-        if len(hashval) == 22:
-            hashval = hashval[:-1]
         if not isinstance(flags, bytes) or len(flags) > 1:
             raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
         needle, found = self.bsearch2(key)
--- a/mercurial/merge.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/merge.py	Mon Sep 07 15:20:31 2020 -0400
@@ -7,6 +7,7 @@
 
 from __future__ import absolute_import
 
+import collections
 import errno
 import stat
 import struct
@@ -126,7 +127,7 @@
         return None
 
 
-def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
+def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
     """
     Considers any actions that care about the presence of conflicting unknown
     files. For some actions, the result is to abort; for others, it is to
@@ -150,20 +151,23 @@
                 warnconflicts.update(conflicts)
 
         checkunknowndirs = _unknowndirschecker()
-        for f, (m, args, msg) in pycompat.iteritems(actions):
-            if m in (
+        for f in mresult.files(
+            (
                 mergestatemod.ACTION_CREATED,
                 mergestatemod.ACTION_DELETED_CHANGED,
-            ):
-                if _checkunknownfile(repo, wctx, mctx, f):
-                    fileconflicts.add(f)
-                elif pathconfig and f not in wctx:
-                    path = checkunknowndirs(repo, wctx, f)
-                    if path is not None:
-                        pathconflicts.add(path)
-            elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
-                if _checkunknownfile(repo, wctx, mctx, f, args[0]):
-                    fileconflicts.add(f)
+            )
+        ):
+            if _checkunknownfile(repo, wctx, mctx, f):
+                fileconflicts.add(f)
+            elif pathconfig and f not in wctx:
+                path = checkunknowndirs(repo, wctx, f)
+                if path is not None:
+                    pathconflicts.add(path)
+        for f, args, msg in mresult.getactions(
+            [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
+        ):
+            if _checkunknownfile(repo, wctx, mctx, f, args[0]):
+                fileconflicts.add(f)
 
         allconflicts = fileconflicts | pathconflicts
         ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
@@ -171,49 +175,50 @@
         collectconflicts(ignoredconflicts, ignoredconfig)
         collectconflicts(unknownconflicts, unknownconfig)
     else:
-        for f, (m, args, msg) in pycompat.iteritems(actions):
-            if m == mergestatemod.ACTION_CREATED_MERGE:
-                fl2, anc = args
-                different = _checkunknownfile(repo, wctx, mctx, f)
-                if repo.dirstate._ignore(f):
-                    config = ignoredconfig
-                else:
-                    config = unknownconfig
+        for f, args, msg in list(
+            mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
+        ):
+            fl2, anc = args
+            different = _checkunknownfile(repo, wctx, mctx, f)
+            if repo.dirstate._ignore(f):
+                config = ignoredconfig
+            else:
+                config = unknownconfig
 
-                # The behavior when force is True is described by this table:
-                #  config  different  mergeforce  |    action    backup
-                #    *         n          *       |      get        n
-                #    *         y          y       |     merge       -
-                #   abort      y          n       |     merge       -   (1)
-                #   warn       y          n       |  warn + get     y
-                #  ignore      y          n       |      get        y
-                #
-                # (1) this is probably the wrong behavior here -- we should
-                #     probably abort, but some actions like rebases currently
-                #     don't like an abort happening in the middle of
-                #     merge.update.
-                if not different:
-                    actions[f] = (
-                        mergestatemod.ACTION_GET,
-                        (fl2, False),
-                        b'remote created',
-                    )
-                elif mergeforce or config == b'abort':
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f, f, None, False, anc),
-                        b'remote differs from untracked local',
-                    )
-                elif config == b'abort':
-                    abortconflicts.add(f)
-                else:
-                    if config == b'warn':
-                        warnconflicts.add(f)
-                    actions[f] = (
-                        mergestatemod.ACTION_GET,
-                        (fl2, True),
-                        b'remote created',
-                    )
+            # The behavior when force is True is described by this table:
+            #  config  different  mergeforce  |    action    backup
+            #    *         n          *       |      get        n
+            #    *         y          y       |     merge       -
+            #   abort      y          n       |     merge       -   (1)
+            #   warn       y          n       |  warn + get     y
+            #  ignore      y          n       |      get        y
+            #
+            # (1) this is probably the wrong behavior here -- we should
+            #     probably abort, but some actions like rebases currently
+            #     don't like an abort happening in the middle of
+            #     merge.update.
+            if not different:
+                mresult.addfile(
+                    f,
+                    mergestatemod.ACTION_GET,
+                    (fl2, False),
+                    b'remote created',
+                )
+            elif mergeforce or config == b'abort':
+                mresult.addfile(
+                    f,
+                    mergestatemod.ACTION_MERGE,
+                    (f, f, None, False, anc),
+                    b'remote differs from untracked local',
+                )
+            elif config == b'abort':
+                abortconflicts.add(f)
+            else:
+                if config == b'warn':
+                    warnconflicts.add(f)
+                mresult.addfile(
+                    f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
+                )
 
     for f in sorted(abortconflicts):
         warn = repo.ui.warn
@@ -238,18 +243,19 @@
         else:
             repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
 
-    for f, (m, args, msg) in pycompat.iteritems(actions):
-        if m == mergestatemod.ACTION_CREATED:
-            backup = (
-                f in fileconflicts
-                or f in pathconflicts
-                or any(p in pathconflicts for p in pathutil.finddirs(f))
-            )
-            (flags,) = args
-            actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
+    for f, args, msg in list(
+        mresult.getactions([mergestatemod.ACTION_CREATED])
+    ):
+        backup = (
+            f in fileconflicts
+            or f in pathconflicts
+            or any(p in pathconflicts for p in pathutil.finddirs(f))
+        )
+        (flags,) = args
+        mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
 
 
-def _forgetremoved(wctx, mctx, branchmerge):
+def _forgetremoved(wctx, mctx, branchmerge, mresult):
     """
     Forget removed files
 
@@ -264,27 +270,22 @@
     as removed.
     """
 
-    actions = {}
     m = mergestatemod.ACTION_FORGET
     if branchmerge:
         m = mergestatemod.ACTION_REMOVE
     for f in wctx.deleted():
         if f not in mctx:
-            actions[f] = m, None, b"forget deleted"
+            mresult.addfile(f, m, None, b"forget deleted")
 
     if not branchmerge:
         for f in wctx.removed():
             if f not in mctx:
-                actions[f] = (
-                    mergestatemod.ACTION_FORGET,
-                    None,
-                    b"forget removed",
+                mresult.addfile(
+                    f, mergestatemod.ACTION_FORGET, None, b"forget removed",
                 )
 
-    return actions
 
-
-def _checkcollision(repo, wmf, actions):
+def _checkcollision(repo, wmf, mresult):
     """
     Check for case-folding collisions.
     """
@@ -292,39 +293,38 @@
     narrowmatch = repo.narrowmatch()
     if not narrowmatch.always():
         pmmf = set(wmf.walk(narrowmatch))
-        if actions:
-            narrowactions = {}
-            for m, actionsfortype in pycompat.iteritems(actions):
-                narrowactions[m] = []
-                for (f, args, msg) in actionsfortype:
-                    if narrowmatch(f):
-                        narrowactions[m].append((f, args, msg))
-            actions = narrowactions
+        if mresult:
+            for f in list(mresult.files()):
+                if not narrowmatch(f):
+                    mresult.removefile(f)
     else:
         # build provisional merged manifest up
         pmmf = set(wmf)
 
-    if actions:
+    if mresult:
         # KEEP and EXEC are no-op
-        for m in (
-            mergestatemod.ACTION_ADD,
-            mergestatemod.ACTION_ADD_MODIFIED,
-            mergestatemod.ACTION_FORGET,
-            mergestatemod.ACTION_GET,
-            mergestatemod.ACTION_CHANGED_DELETED,
-            mergestatemod.ACTION_DELETED_CHANGED,
+        for f in mresult.files(
+            (
+                mergestatemod.ACTION_ADD,
+                mergestatemod.ACTION_ADD_MODIFIED,
+                mergestatemod.ACTION_FORGET,
+                mergestatemod.ACTION_GET,
+                mergestatemod.ACTION_CHANGED_DELETED,
+                mergestatemod.ACTION_DELETED_CHANGED,
+            )
         ):
-            for f, args, msg in actions[m]:
-                pmmf.add(f)
-        for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
+            pmmf.add(f)
+        for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
             pmmf.discard(f)
-        for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
+        for f, args, msg in mresult.getactions(
+            [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
+        ):
             f2, flags = args
             pmmf.discard(f2)
             pmmf.add(f)
-        for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
+        for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
             pmmf.add(f)
-        for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
+        for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
             f1, f2, fa, move, anc = args
             if move:
                 pmmf.discard(f1)
@@ -382,7 +382,7 @@
                 break
 
 
-def checkpathconflicts(repo, wctx, mctx, actions):
+def checkpathconflicts(repo, wctx, mctx, mresult):
     """
     Check if any actions introduce path conflicts in the repository, updating
     actions to record or handle the path conflict accordingly.
@@ -407,30 +407,33 @@
     # The set of files deleted by all the actions.
     deletedfiles = set()
 
-    for f, (m, args, msg) in actions.items():
-        if m in (
+    for f in mresult.files(
+        (
             mergestatemod.ACTION_CREATED,
             mergestatemod.ACTION_DELETED_CHANGED,
             mergestatemod.ACTION_MERGE,
             mergestatemod.ACTION_CREATED_MERGE,
-        ):
-            # This action may create a new local file.
-            createdfiledirs.update(pathutil.finddirs(f))
-            if mf.hasdir(f):
-                # The file aliases a local directory.  This might be ok if all
-                # the files in the local directory are being deleted.  This
-                # will be checked once we know what all the deleted files are.
-                remoteconflicts.add(f)
-        # Track the names of all deleted files.
-        if m == mergestatemod.ACTION_REMOVE:
-            deletedfiles.add(f)
-        if m == mergestatemod.ACTION_MERGE:
-            f1, f2, fa, move, anc = args
-            if move:
-                deletedfiles.add(f1)
-        if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
-            f2, flags = args
-            deletedfiles.add(f2)
+        )
+    ):
+        # This action may create a new local file.
+        createdfiledirs.update(pathutil.finddirs(f))
+        if mf.hasdir(f):
+            # The file aliases a local directory.  This might be ok if all
+            # the files in the local directory are being deleted.  This
+            # will be checked once we know what all the deleted files are.
+            remoteconflicts.add(f)
+    # Track the names of all deleted files.
+    for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
+        deletedfiles.add(f)
+    for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
+        f1, f2, fa, move, anc = args
+        if move:
+            deletedfiles.add(f1)
+    for (f, args, msg) in mresult.getactions(
+        (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
+    ):
+        f2, flags = args
+        deletedfiles.add(f2)
 
     # Check all directories that contain created files for path conflicts.
     for p in createdfiledirs:
@@ -444,7 +447,8 @@
                 # A file is in a directory which aliases a local file.
                 # We will need to rename the local file.
                 localconflicts.add(p)
-        if p in actions and actions[p][0] in (
+        pd = mresult.getfile(p)
+        if pd and pd[0] in (
             mergestatemod.ACTION_CREATED,
             mergestatemod.ACTION_DELETED_CHANGED,
             mergestatemod.ACTION_MERGE,
@@ -459,14 +463,16 @@
     for p in localconflicts:
         if p not in deletedfiles:
             ctxname = bytes(wctx).rstrip(b'+')
-            pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
+            pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
             porig = wctx[p].copysource() or p
-            actions[pnew] = (
+            mresult.addfile(
+                pnew,
                 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
                 (p, porig),
                 b'local path conflict',
             )
-            actions[p] = (
+            mresult.addfile(
+                p,
                 mergestatemod.ACTION_PATH_CONFLICT,
                 (pnew, b'l'),
                 b'path conflict',
@@ -477,23 +483,25 @@
         ctxname = bytes(mctx).rstrip(b'+')
         for f, p in _filesindirs(repo, mf, remoteconflicts):
             if f not in deletedfiles:
-                m, args, msg = actions[p]
-                pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
+                m, args, msg = mresult.getfile(p)
+                pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
                 if m in (
                     mergestatemod.ACTION_DELETED_CHANGED,
                     mergestatemod.ACTION_MERGE,
                 ):
                     # Action was merge, just update target.
-                    actions[pnew] = (m, args, msg)
+                    mresult.addfile(pnew, m, args, msg)
                 else:
                     # Action was create, change to renamed get action.
                     fl = args[0]
-                    actions[pnew] = (
+                    mresult.addfile(
+                        pnew,
                         mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
                         (p, fl),
                         b'remote path conflict',
                     )
-                actions[p] = (
+                mresult.addfile(
+                    p,
                     mergestatemod.ACTION_PATH_CONFLICT,
                     (pnew, mergestatemod.ACTION_REMOVE),
                     b'path conflict',
@@ -507,24 +515,34 @@
         raise error.Abort(_(b"destination manifest contains path conflicts"))
 
 
-def _filternarrowactions(narrowmatch, branchmerge, actions):
+def _filternarrowactions(narrowmatch, branchmerge, mresult):
     """
     Filters out actions that can ignored because the repo is narrowed.
 
     Raise an exception if the merge cannot be completed because the repo is
     narrowed.
     """
-    nooptypes = {b'k'}  # TODO: handle with nonconflicttypes
-    nonconflicttypes = set(b'a am c cm f g gs r e'.split())
+    # TODO: handle with nonconflicttypes
+    nooptypes = {mergestatemod.ACTION_KEEP}
+    nonconflicttypes = {
+        mergestatemod.ACTION_ADD,
+        mergestatemod.ACTION_ADD_MODIFIED,
+        mergestatemod.ACTION_CREATED,
+        mergestatemod.ACTION_CREATED_MERGE,
+        mergestatemod.ACTION_FORGET,
+        mergestatemod.ACTION_GET,
+        mergestatemod.ACTION_REMOVE,
+        mergestatemod.ACTION_EXEC,
+    }
     # We mutate the items in the dict during iteration, so iterate
     # over a copy.
-    for f, action in list(actions.items()):
+    for f, action in mresult.filemap():
         if narrowmatch(f):
             pass
         elif not branchmerge:
-            del actions[f]  # just updating, ignore changes outside clone
+            mresult.removefile(f)  # just updating, ignore changes outside clone
         elif action[0] in nooptypes:
-            del actions[f]  # merge does not affect file
+            mresult.removefile(f)  # merge does not affect file
         elif action[0] in nonconflicttypes:
             raise error.Abort(
                 _(
@@ -540,6 +558,171 @@
             )
 
 
+class mergeresult(object):
+    ''''An object representing result of merging manifests.
+
+    It has information about what actions need to be performed on dirstate
+    mapping of divergent renames and other such cases. '''
+
+    def __init__(self):
+        """
+        filemapping: dict of filename as keys and action related info as values
+        diverge: mapping of source name -> list of dest name for
+                 divergent renames
+        renamedelete: mapping of source name -> list of destinations for files
+                      deleted on one side and renamed on other.
+        commitinfo: dict containing data which should be used on commit
+                    contains a filename -> info mapping
+        actionmapping: dict of action names as keys and values are dict of
+                       filename as key and related data as values
+        """
+        self._filemapping = {}
+        self._diverge = {}
+        self._renamedelete = {}
+        self._commitinfo = collections.defaultdict(dict)
+        self._actionmapping = collections.defaultdict(dict)
+
+    def updatevalues(self, diverge, renamedelete):
+        self._diverge = diverge
+        self._renamedelete = renamedelete
+
+    def addfile(self, filename, action, data, message):
+        """ adds a new file to the mergeresult object
+
+        filename: file which we are adding
+        action: one of mergestatemod.ACTION_*
+        data: a tuple of information like fctx and ctx related to this merge
+        message: a message about the merge
+        """
+        # if the file already existed, we need to delete it's old
+        # entry form _actionmapping too
+        if filename in self._filemapping:
+            a, d, m = self._filemapping[filename]
+            del self._actionmapping[a][filename]
+
+        self._filemapping[filename] = (action, data, message)
+        self._actionmapping[action][filename] = (data, message)
+
+    def getfile(self, filename, default_return=None):
+        """ returns (action, args, msg) about this file
+
+        returns default_return if the file is not present """
+        if filename in self._filemapping:
+            return self._filemapping[filename]
+        return default_return
+
+    def files(self, actions=None):
+        """ returns files on which provided action needs to perfromed
+
+        If actions is None, all files are returned
+        """
+        # TODO: think whether we should return renamedelete and
+        # diverge filenames also
+        if actions is None:
+            for f in self._filemapping:
+                yield f
+
+        else:
+            for a in actions:
+                for f in self._actionmapping[a]:
+                    yield f
+
+    def removefile(self, filename):
+        """ removes a file from the mergeresult object as the file might
+        not merging anymore """
+        action, data, message = self._filemapping[filename]
+        del self._filemapping[filename]
+        del self._actionmapping[action][filename]
+
+    def getactions(self, actions, sort=False):
+        """ get list of files which are marked with these actions
+        if sort is true, files for each action is sorted and then added
+
+        Returns a list of tuple of form (filename, data, message)
+        """
+        for a in actions:
+            if sort:
+                for f in sorted(self._actionmapping[a]):
+                    args, msg = self._actionmapping[a][f]
+                    yield f, args, msg
+            else:
+                for f, (args, msg) in pycompat.iteritems(
+                    self._actionmapping[a]
+                ):
+                    yield f, args, msg
+
+    def len(self, actions=None):
+        """ returns number of files which needs actions
+
+        if actions is passed, total of number of files in that action
+        only is returned """
+
+        if actions is None:
+            return len(self._filemapping)
+
+        return sum(len(self._actionmapping[a]) for a in actions)
+
+    def filemap(self, sort=False):
+        if sorted:
+            for key, val in sorted(pycompat.iteritems(self._filemapping)):
+                yield key, val
+        else:
+            for key, val in pycompat.iteritems(self._filemapping):
+                yield key, val
+
+    def addcommitinfo(self, filename, key, value):
+        """ adds key-value information about filename which will be required
+        while committing this merge """
+        self._commitinfo[filename][key] = value
+
+    @property
+    def diverge(self):
+        return self._diverge
+
+    @property
+    def renamedelete(self):
+        return self._renamedelete
+
+    @property
+    def commitinfo(self):
+        return self._commitinfo
+
+    @property
+    def actionsdict(self):
+        """ returns a dictionary of actions to be perfomed with action as key
+        and a list of files and related arguments as values """
+        res = collections.defaultdict(list)
+        for a, d in pycompat.iteritems(self._actionmapping):
+            for f, (args, msg) in pycompat.iteritems(d):
+                res[a].append((f, args, msg))
+        return res
+
+    def setactions(self, actions):
+        self._filemapping = actions
+        self._actionmapping = collections.defaultdict(dict)
+        for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
+            self._actionmapping[act][f] = data, msg
+
+    def hasconflicts(self):
+        """ tells whether this merge resulted in some actions which can
+        result in conflicts or not """
+        for a in self._actionmapping.keys():
+            if (
+                a
+                not in (
+                    mergestatemod.ACTION_GET,
+                    mergestatemod.ACTION_KEEP,
+                    mergestatemod.ACTION_EXEC,
+                    mergestatemod.ACTION_REMOVE,
+                    mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
+                )
+                and self._actionmapping[a]
+            ):
+                return True
+
+        return False
+
+
 def manifestmerge(
     repo,
     wctx,
@@ -559,13 +742,9 @@
     matcher = matcher to filter file lists
     acceptremote = accept the incoming changes without prompting
 
-    Returns:
-
-    actions: dict of filename as keys and action related info as values
-    diverge: mapping of source name -> list of dest name for divergent renames
-    renamedelete: mapping of source name -> list of destinations for files
-                  deleted on one side and renamed on other.
+    Returns an object of mergeresult class
     """
+    mresult = mergeresult()
     if matcher is not None and matcher.always():
         matcher = None
 
@@ -578,6 +757,9 @@
     branch_copies1 = copies.branch_copies()
     branch_copies2 = copies.branch_copies()
     diverge = {}
+    # information from merge which is needed at commit time
+    # for example choosing filelog of which parent to commit
+    # TODO: use specific constants in future for this mapping
     if followcopies:
         branch_copies1, branch_copies2, diverge = copies.mergecopies(
             repo, wctx, p2, pa
@@ -626,7 +808,6 @@
 
     diff = m1.diff(m2, match=matcher)
 
-    actions = {}
     for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
         if n1 and n2:  # file exists on both local and remote side
             if f not in ma:
@@ -634,59 +815,60 @@
                 fa = branch_copies1.copy.get(
                     f, None
                 ) or branch_copies2.copy.get(f, None)
+                args, msg = None, None
                 if fa is not None:
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f, f, fa, False, pa.node()),
-                        b'both renamed from %s' % fa,
-                    )
+                    args = (f, f, fa, False, pa.node())
+                    msg = b'both renamed from %s' % fa
                 else:
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f, f, None, False, pa.node()),
-                        b'both created',
-                    )
+                    args = (f, f, None, False, pa.node())
+                    msg = b'both created'
+                mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
             else:
                 a = ma[f]
                 fla = ma.flags(f)
                 nol = b'l' not in fl1 + fl2 + fla
                 if n2 == a and fl2 == fla:
-                    actions[f] = (
-                        mergestatemod.ACTION_KEEP,
-                        (),
-                        b'remote unchanged',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
                     )
                 elif n1 == a and fl1 == fla:  # local unchanged - use remote
                     if n1 == n2:  # optimization: keep local content
-                        actions[f] = (
+                        mresult.addfile(
+                            f,
                             mergestatemod.ACTION_EXEC,
                             (fl2,),
                             b'update permissions',
                         )
                     else:
-                        actions[f] = (
-                            mergestatemod.ACTION_GET_OTHER_AND_STORE
-                            if branchmerge
-                            else mergestatemod.ACTION_GET,
+                        mresult.addfile(
+                            f,
+                            mergestatemod.ACTION_GET,
                             (fl2, False),
                             b'remote is newer',
                         )
+                        if branchmerge:
+                            mresult.addcommitinfo(
+                                f, b'filenode-source', b'other'
+                            )
                 elif nol and n2 == a:  # remote only changed 'x'
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_EXEC,
                         (fl2,),
                         b'update permissions',
                     )
                 elif nol and n1 == a:  # local only changed 'x'
-                    actions[f] = (
-                        mergestatemod.ACTION_GET_OTHER_AND_STORE
-                        if branchmerge
-                        else mergestatemod.ACTION_GET,
+                    mresult.addfile(
+                        f,
+                        mergestatemod.ACTION_GET,
                         (fl1, False),
                         b'remote is newer',
                     )
+                    if branchmerge:
+                        mresult.addcommitinfo(f, b'filenode-source', b'other')
                 else:  # both changed something
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_MERGE,
                         (f, f, f, False, pa.node()),
                         b'versions differ',
@@ -699,20 +881,23 @@
             ):  # directory rename, move local
                 f2 = branch_copies1.movewithdir[f]
                 if f2 in m2:
-                    actions[f2] = (
+                    mresult.addfile(
+                        f2,
                         mergestatemod.ACTION_MERGE,
                         (f, f2, None, True, pa.node()),
                         b'remote directory rename, both created',
                     )
                 else:
-                    actions[f2] = (
+                    mresult.addfile(
+                        f2,
                         mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
                         (f, fl1),
                         b'remote directory rename - move from %s' % f,
                     )
             elif f in branch_copies1.copy:
                 f2 = branch_copies1.copy[f]
-                actions[f] = (
+                mresult.addfile(
+                    f,
                     mergestatemod.ACTION_MERGE,
                     (f, f2, f2, False, pa.node()),
                     b'local copied/moved from %s' % f2,
@@ -720,13 +905,15 @@
             elif f in ma:  # clean, a different, no remote
                 if n1 != ma[f]:
                     if acceptremote:
-                        actions[f] = (
+                        mresult.addfile(
+                            f,
                             mergestatemod.ACTION_REMOVE,
                             None,
                             b'remote delete',
                         )
                     else:
-                        actions[f] = (
+                        mresult.addfile(
+                            f,
                             mergestatemod.ACTION_CHANGED_DELETED,
                             (f, None, f, False, pa.node()),
                             b'prompt changed/deleted',
@@ -734,16 +921,12 @@
                 elif n1 == addednodeid:
                     # This file was locally added. We should forget it instead of
                     # deleting it.
-                    actions[f] = (
-                        mergestatemod.ACTION_FORGET,
-                        None,
-                        b'remote deleted',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
                     )
                 else:
-                    actions[f] = (
-                        mergestatemod.ACTION_REMOVE,
-                        None,
-                        b'other deleted',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
                     )
         elif n2:  # file exists only on remote side
             if f in copied1:
@@ -751,31 +934,29 @@
             elif f in branch_copies2.movewithdir:
                 f2 = branch_copies2.movewithdir[f]
                 if f2 in m1:
-                    actions[f2] = (
+                    mresult.addfile(
+                        f2,
                         mergestatemod.ACTION_MERGE,
                         (f2, f, None, False, pa.node()),
                         b'local directory rename, both created',
                     )
                 else:
-                    actions[f2] = (
+                    mresult.addfile(
+                        f2,
                         mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
                         (f, fl2),
                         b'local directory rename - get from %s' % f,
                     )
             elif f in branch_copies2.copy:
                 f2 = branch_copies2.copy[f]
+                msg, args = None, None
                 if f2 in m2:
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f2, f, f2, False, pa.node()),
-                        b'remote copied from %s' % f2,
-                    )
+                    args = (f2, f, f2, False, pa.node())
+                    msg = b'remote copied from %s' % f2
                 else:
-                    actions[f] = (
-                        mergestatemod.ACTION_MERGE,
-                        (f2, f, f2, True, pa.node()),
-                        b'remote moved from %s' % f2,
-                    )
+                    args = (f2, f, f2, True, pa.node())
+                    msg = b'remote moved from %s' % f2
+                mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
             elif f not in ma:
                 # local unknown, remote created: the logic is described by the
                 # following table:
@@ -789,19 +970,22 @@
                 # Checking whether the files are different is expensive, so we
                 # don't do that when we can avoid it.
                 if not force:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_CREATED,
                         (fl2,),
                         b'remote created',
                     )
                 elif not branchmerge:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_CREATED,
                         (fl2,),
                         b'remote created',
                     )
                 else:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_CREATED_MERGE,
                         (fl2, pa.node()),
                         b'remote created, get or merge',
@@ -814,20 +998,23 @@
                         df = branch_copies1.dirmove[d] + f[len(d) :]
                         break
                 if df is not None and df in m1:
-                    actions[df] = (
+                    mresult.addfile(
+                        df,
                         mergestatemod.ACTION_MERGE,
                         (df, f, f, False, pa.node()),
                         b'local directory rename - respect move '
                         b'from %s' % f,
                     )
                 elif acceptremote:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_CREATED,
                         (fl2,),
                         b'remote recreating',
                     )
                 else:
-                    actions[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_DELETED_CHANGED,
                         (None, f, f, False, pa.node()),
                         b'prompt deleted/changed',
@@ -835,39 +1022,36 @@
 
     if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
         # If we are merging, look for path conflicts.
-        checkpathconflicts(repo, wctx, p2, actions)
+        checkpathconflicts(repo, wctx, p2, mresult)
 
     narrowmatch = repo.narrowmatch()
     if not narrowmatch.always():
         # Updates "actions" in place
-        _filternarrowactions(narrowmatch, branchmerge, actions)
+        _filternarrowactions(narrowmatch, branchmerge, mresult)
 
     renamedelete = branch_copies1.renamedelete
     renamedelete.update(branch_copies2.renamedelete)
 
-    return actions, diverge, renamedelete
+    mresult.updatevalues(diverge, renamedelete)
+    return mresult
 
 
-def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
+def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
     """Resolves false conflicts where the nodeid changed but the content
        remained the same."""
     # We force a copy of actions.items() because we're going to mutate
     # actions as we resolve trivial conflicts.
-    for f, (m, args, msg) in list(actions.items()):
-        if (
-            m == mergestatemod.ACTION_CHANGED_DELETED
-            and f in ancestor
-            and not wctx[f].cmp(ancestor[f])
-        ):
+    for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
+        if f in ancestor and not wctx[f].cmp(ancestor[f]):
             # local did change but ended up with same content
-            actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
-        elif (
-            m == mergestatemod.ACTION_DELETED_CHANGED
-            and f in ancestor
-            and not mctx[f].cmp(ancestor[f])
-        ):
+            mresult.addfile(
+                f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
+            )
+
+    for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
+        if f in ancestor and not mctx[f].cmp(ancestor[f]):
             # remote did change but ended up with same content
-            del actions[f]  # don't get = keep local deleted
+            mresult.removefile(f)  # don't get = keep local deleted
 
 
 def calculateupdates(
@@ -891,13 +1075,14 @@
 
     Also filters out actions which are unrequired if repository is sparse.
 
-    Returns same 3 element tuple as manifestmerge().
+    Returns mergeresult object same as manifestmerge().
     """
     # Avoid cycle.
     from . import sparse
 
+    mresult = None
     if len(ancestors) == 1:  # default
-        actions, diverge, renamedelete = manifestmerge(
+        mresult = manifestmerge(
             repo,
             wctx,
             mctx,
@@ -908,7 +1093,7 @@
             acceptremote,
             followcopies,
         )
-        _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
+        _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
 
     else:  # only when merge.preferancestor=* - the default
         repo.ui.note(
@@ -920,14 +1105,16 @@
             )
         )
 
-        # Call for bids
-        fbids = (
-            {}
-        )  # mapping filename to bids (action method to list af actions)
+        # mapping filename to bids (action method to list af actions)
+        # {FILENAME1 : BID1, FILENAME2 : BID2}
+        # BID is another dictionary which contains
+        # mapping of following form:
+        # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
+        fbids = {}
         diverge, renamedelete = None, None
         for ancestor in ancestors:
             repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
-            actions, diverge1, renamedelete1 = manifestmerge(
+            mresult1 = manifestmerge(
                 repo,
                 wctx,
                 mctx,
@@ -939,19 +1126,19 @@
                 followcopies,
                 forcefulldiff=True,
             )
-            _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
+            _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
 
             # Track the shortest set of warning on the theory that bid
             # merge will correctly incorporate more information
-            if diverge is None or len(diverge1) < len(diverge):
-                diverge = diverge1
-            if renamedelete is None or len(renamedelete) < len(renamedelete1):
-                renamedelete = renamedelete1
+            if diverge is None or len(mresult1.diverge) < len(diverge):
+                diverge = mresult1.diverge
+            if renamedelete is None or len(renamedelete) < len(
+                mresult1.renamedelete
+            ):
+                renamedelete = mresult1.renamedelete
 
-            for f, a in sorted(pycompat.iteritems(actions)):
+            for f, a in mresult1.filemap(sort=True):
                 m, args, msg = a
-                if m == mergestatemod.ACTION_GET_OTHER_AND_STORE:
-                    m = mergestatemod.ACTION_GET
                 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
                 if f in fbids:
                     d = fbids[f]
@@ -962,9 +1149,10 @@
                 else:
                     fbids[f] = {m: [a]}
 
+        # Call for bids
         # Pick the best bid for each file
         repo.ui.note(_(b'\nauction for merging merge bids\n'))
-        actions = {}
+        mresult = mergeresult()
         for f, bids in sorted(fbids.items()):
             # bids is a mapping from action method to list af actions
             # Consensus?
@@ -972,19 +1160,19 @@
                 m, l = list(bids.items())[0]
                 if all(a == l[0] for a in l[1:]):  # len(bids) is > 1
                     repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
-                    actions[f] = l[0]
+                    mresult.addfile(f, *l[0])
                     continue
             # If keep is an option, just do it.
             if mergestatemod.ACTION_KEEP in bids:
                 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
-                actions[f] = bids[mergestatemod.ACTION_KEEP][0]
+                mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
                 continue
             # If there are gets and they all agree [how could they not?], do it.
             if mergestatemod.ACTION_GET in bids:
                 ga0 = bids[mergestatemod.ACTION_GET][0]
                 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
                     repo.ui.note(_(b" %s: picking 'get' action\n") % f)
-                    actions[f] = ga0
+                    mresult.addfile(f, *ga0)
                     continue
             # TODO: Consider other simple actions such as mode changes
             # Handle inefficient democrazy.
@@ -997,20 +1185,18 @@
             repo.ui.warn(
                 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
             )
-            actions[f] = l[0]
+            mresult.addfile(f, *l[0])
             continue
         repo.ui.note(_(b'end of auction\n\n'))
+        mresult.updatevalues(diverge, renamedelete)
 
     if wctx.rev() is None:
-        fractions = _forgetremoved(wctx, mctx, branchmerge)
-        actions.update(fractions)
+        _forgetremoved(wctx, mctx, branchmerge, mresult)
 
-    prunedactions = sparse.filterupdatesactions(
-        repo, wctx, mctx, branchmerge, actions
-    )
-    _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
+    sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
+    _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
 
-    return prunedactions, diverge, renamedelete
+    return mresult
 
 
 def _getcwd():
@@ -1117,34 +1303,26 @@
     yield True, filedata
 
 
-def _prefetchfiles(repo, ctx, actions):
+def _prefetchfiles(repo, ctx, mresult):
     """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
     of merge actions.  ``ctx`` is the context being merged in."""
 
     # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
     # don't touch the context to be merged in.  'cd' is skipped, because
     # changed/deleted never resolves to something from the remote side.
-    oplist = [
-        actions[a]
-        for a in (
+    files = mresult.files(
+        [
             mergestatemod.ACTION_GET,
             mergestatemod.ACTION_DELETED_CHANGED,
             mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
             mergestatemod.ACTION_MERGE,
-        )
-    ]
+        ]
+    )
+
     prefetch = scmutil.prefetchfiles
     matchfiles = scmutil.matchfiles
     prefetch(
-        repo,
-        [
-            (
-                ctx.rev(),
-                matchfiles(
-                    repo, [f for sublist in oplist for f, args, msg in sublist]
-                ),
-            )
-        ],
+        repo, [(ctx.rev(), matchfiles(repo, files),)],
     )
 
 
@@ -1164,35 +1342,12 @@
         )
 
 
-def emptyactions():
-    """create an actions dict, to be populated and passed to applyupdates()"""
-    return {
-        m: []
-        for m in (
-            mergestatemod.ACTION_ADD,
-            mergestatemod.ACTION_ADD_MODIFIED,
-            mergestatemod.ACTION_FORGET,
-            mergestatemod.ACTION_GET,
-            mergestatemod.ACTION_CHANGED_DELETED,
-            mergestatemod.ACTION_DELETED_CHANGED,
-            mergestatemod.ACTION_REMOVE,
-            mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
-            mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
-            mergestatemod.ACTION_MERGE,
-            mergestatemod.ACTION_EXEC,
-            mergestatemod.ACTION_KEEP,
-            mergestatemod.ACTION_PATH_CONFLICT,
-            mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
-            mergestatemod.ACTION_GET_OTHER_AND_STORE,
-        )
-    }
-
-
 def applyupdates(
-    repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
+    repo, mresult, wctx, mctx, overwrite, wantfiledata, labels=None,
 ):
     """apply the merge action list to the working directory
 
+    mresult is a mergeresult object representing result of the merge
     wctx is the working copy context
     mctx is the context to be merged into the working copy
 
@@ -1202,25 +1357,31 @@
     batchget.
     """
 
-    _prefetchfiles(repo, mctx, actions)
+    _prefetchfiles(repo, mctx, mresult)
 
     updated, merged, removed = 0, 0, 0
     ms = mergestatemod.mergestate.clean(
         repo, wctx.p1().node(), mctx.node(), labels
     )
 
-    # add ACTION_GET_OTHER_AND_STORE to mergestate
-    for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
-        ms.addmergedother(e[0])
+    for f, op in pycompat.iteritems(mresult.commitinfo):
+        # the other side of filenode was choosen while merging, store this in
+        # mergestate so that it can be reused on commit
+        ms.addcommitinfo(f, op)
 
     moves = []
-    for m, l in actions.items():
-        l.sort()
 
     # 'cd' and 'dc' actions are treated like other merge conflicts
-    mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
-    mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
-    mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
+    mergeactions = list(
+        mresult.getactions(
+            [
+                mergestatemod.ACTION_CHANGED_DELETED,
+                mergestatemod.ACTION_DELETED_CHANGED,
+                mergestatemod.ACTION_MERGE,
+            ],
+            sort=True,
+        )
+    )
     for f, args, msg in mergeactions:
         f1, f2, fa, move, anc = args
         if f == b'.hgsubstate':  # merged internally
@@ -1251,22 +1412,18 @@
             wctx[f].audit()
             wctx[f].remove()
 
-    numupdates = sum(
-        len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
-    )
+    numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
     progress = repo.ui.makeprogress(
         _(b'updating'), unit=_(b'files'), total=numupdates
     )
 
-    if [
-        a
-        for a in actions[mergestatemod.ACTION_REMOVE]
-        if a[0] == b'.hgsubstate'
-    ]:
+    if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
         subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
 
     # record path conflicts
-    for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
+    for f, args, msg in mresult.getactions(
+        [mergestatemod.ACTION_PATH_CONFLICT], sort=True
+    ):
         f1, fo = args
         s = repo.ui.status
         s(
@@ -1294,14 +1451,16 @@
         cost,
         batchremove,
         (repo, wctx),
-        actions[mergestatemod.ACTION_REMOVE],
+        list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
     )
     for i, item in prog:
         progress.increment(step=i, item=item)
-    removed = len(actions[mergestatemod.ACTION_REMOVE])
+    removed = mresult.len((mergestatemod.ACTION_REMOVE,))
 
     # resolve path conflicts (must come before getting)
-    for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
+    for f, args, msg in mresult.getactions(
+        [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
+    ):
         repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
         (f0, origf0) = args
         if wctx[f0].lexists():
@@ -1320,7 +1479,7 @@
         cost,
         batchget,
         (repo, mctx, wctx, wantfiledata),
-        actions[mergestatemod.ACTION_GET],
+        list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
         threadsafe=threadsafe,
         hasretval=True,
     )
@@ -1331,33 +1490,42 @@
         else:
             i, item = res
             progress.increment(step=i, item=item)
-    updated = len(actions[mergestatemod.ACTION_GET])
 
-    if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
+    if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
         subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
 
     # forget (manifest only, just log it) (must come first)
-    for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_FORGET,), sort=True
+    ):
         repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
         progress.increment(item=f)
 
     # re-add (manifest only, just log it)
-    for f, args, msg in actions[mergestatemod.ACTION_ADD]:
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_ADD,), sort=True
+    ):
         repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
         progress.increment(item=f)
 
     # re-add/mark as modified (manifest only, just log it)
-    for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
+    ):
         repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
         progress.increment(item=f)
 
     # keep (noop, just log it)
-    for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_KEEP,), sort=True
+    ):
         repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
         # no progress
 
     # directory rename, move local
-    for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
+    ):
         repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
         progress.increment(item=f)
         f0, flags = args
@@ -1365,26 +1533,36 @@
         wctx[f].audit()
         wctx[f].write(wctx.filectx(f0).data(), flags)
         wctx[f0].remove()
-        updated += 1
 
     # local directory rename, get
-    for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
+    ):
         repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
         progress.increment(item=f)
         f0, flags = args
         repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
         wctx[f].write(mctx.filectx(f0).data(), flags)
-        updated += 1
 
     # exec
-    for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
+    for f, args, msg in mresult.getactions(
+        (mergestatemod.ACTION_EXEC,), sort=True
+    ):
         repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
         progress.increment(item=f)
         (flags,) = args
         wctx[f].audit()
         wctx[f].setflags(b'l' in flags, b'x' in flags)
-        updated += 1
 
+    # these actions updates the file
+    updated = mresult.len(
+        (
+            mergestatemod.ACTION_GET,
+            mergestatemod.ACTION_EXEC,
+            mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
+            mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
+        )
+    )
     # the ordering is important here -- ms.mergedriver will raise if the merge
     # driver has changed, and we want to be able to bypass it when overwrite is
     # True
@@ -1458,9 +1636,12 @@
 
     extraactions = ms.actions()
     if extraactions:
-        mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
+        mfiles = {
+            a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
+        }
         for k, acts in pycompat.iteritems(extraactions):
-            actions[k].extend(acts)
+            for a in acts:
+                mresult.addfile(a[0], k, *a[1:])
             if k == mergestatemod.ACTION_GET and wantfiledata:
                 # no filedata until mergestate is updated to provide it
                 for a in acts:
@@ -1483,13 +1664,13 @@
             # those lists aren't consulted again.
             mfiles.difference_update(a[0] for a in acts)
 
-        actions[mergestatemod.ACTION_MERGE] = [
-            a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
-        ]
+        for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
+            if a[0] not in mfiles:
+                mresult.removefile(a[0])
 
     progress.complete()
     assert len(getfiledata) == (
-        len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
+        mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
     )
     return updateresult(updated, merged, removed, unresolved), getfiledata
 
@@ -1734,7 +1915,7 @@
             followcopies = False
 
         ### calculate phase
-        actionbyfile, diverge, renamedelete = calculateupdates(
+        mresult = calculateupdates(
             repo,
             wc,
             p2,
@@ -1748,25 +1929,18 @@
         )
 
         if updatecheck == UPDATECHECK_NO_CONFLICT:
-            for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
-                if m not in (
-                    mergestatemod.ACTION_GET,
-                    mergestatemod.ACTION_KEEP,
-                    mergestatemod.ACTION_EXEC,
-                    mergestatemod.ACTION_REMOVE,
-                    mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
-                    mergestatemod.ACTION_GET_OTHER_AND_STORE,
-                ):
-                    msg = _(b"conflicting changes")
-                    hint = _(b"commit or update --clean to discard changes")
-                    raise error.Abort(msg, hint=hint)
+            if mresult.hasconflicts():
+                msg = _(b"conflicting changes")
+                hint = _(b"commit or update --clean to discard changes")
+                raise error.Abort(msg, hint=hint)
 
         # Prompt and create actions. Most of this is in the resolve phase
         # already, but we can't handle .hgsubstate in filemerge or
         # subrepoutil.submerge yet so we have to keep prompting for it.
-        if b'.hgsubstate' in actionbyfile:
+        vals = mresult.getfile(b'.hgsubstate')
+        if vals:
             f = b'.hgsubstate'
-            m, args, msg = actionbyfile[f]
+            m, args, msg = vals
             prompts = filemerge.partextras(labels)
             prompts[b'f'] = f
             if m == mergestatemod.ACTION_CHANGED_DELETED:
@@ -1779,22 +1953,19 @@
                     % prompts,
                     0,
                 ):
-                    actionbyfile[f] = (
-                        mergestatemod.ACTION_REMOVE,
-                        None,
-                        b'prompt delete',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
                     )
                 elif f in p1:
-                    actionbyfile[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_ADD_MODIFIED,
                         None,
                         b'prompt keep',
                     )
                 else:
-                    actionbyfile[f] = (
-                        mergestatemod.ACTION_ADD,
-                        None,
-                        b'prompt keep',
+                    mresult.addfile(
+                        f, mergestatemod.ACTION_ADD, None, b'prompt keep',
                     )
             elif m == mergestatemod.ACTION_DELETED_CHANGED:
                 f1, f2, fa, move, anc = args
@@ -1811,24 +1982,14 @@
                     )
                     == 0
                 ):
-                    actionbyfile[f] = (
+                    mresult.addfile(
+                        f,
                         mergestatemod.ACTION_GET,
                         (flags, False),
                         b'prompt recreating',
                     )
                 else:
-                    del actionbyfile[f]
-
-        # Convert to dictionary-of-lists format
-        actions = emptyactions()
-        for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
-            if m not in actions:
-                actions[m] = []
-            actions[m].append((f, args, msg))
-
-        # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate
-        for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
-            actions[mergestatemod.ACTION_GET].append(e)
+                    mresult.removefile(f)
 
         if not util.fscasesensitive(repo.path):
             # check collision between files only in p2 for clean update
@@ -1837,10 +1998,10 @@
             ):
                 _checkcollision(repo, p2.manifest(), None)
             else:
-                _checkcollision(repo, wc.manifest(), actions)
+                _checkcollision(repo, wc.manifest(), mresult)
 
         # divergent renames
-        for f, fl in sorted(pycompat.iteritems(diverge)):
+        for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
             repo.ui.warn(
                 _(
                     b"note: possible conflict - %s was renamed "
@@ -1852,7 +2013,7 @@
                 repo.ui.warn(b" %s\n" % nf)
 
         # rename and delete
-        for f, fl in sorted(pycompat.iteritems(renamedelete)):
+        for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
             repo.ui.warn(
                 _(
                     b"note: possible conflict - %s was deleted "
@@ -1876,19 +2037,19 @@
             repo.vfs.write(b'updatestate', p2.hex())
 
         _advertisefsmonitor(
-            repo, len(actions[mergestatemod.ACTION_GET]), p1.node()
+            repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
         )
 
         wantfiledata = updatedirstate and not branchmerge
         stats, getfiledata = applyupdates(
-            repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
+            repo, mresult, wc, p2, overwrite, wantfiledata, labels=labels,
         )
 
         if updatedirstate:
             with repo.dirstate.parentchange():
                 repo.setparents(fp1, fp2)
                 mergestatemod.recordupdates(
-                    repo, actions, branchmerge, getfiledata
+                    repo, mresult.actionsdict, branchmerge, getfiledata
                 )
                 # update completed, clear state
                 util.unlink(repo.vfs.join(b'updatestate'))
--- a/mercurial/mergestate.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/mergestate.py	Mon Sep 07 15:20:31 2020 -0400
@@ -1,5 +1,6 @@
 from __future__ import absolute_import
 
+import collections
 import errno
 import shutil
 import struct
@@ -80,6 +81,8 @@
 MERGE_RECORD_DRIVER_RESOLVED = b'd'
 # represents that the file was automatically merged in favor
 # of other version. This info is used on commit.
+# This is now deprecated and commit related information is now
+# stored in RECORD_FILE_VALUES
 MERGE_RECORD_MERGED_OTHER = b'o'
 
 #####
@@ -119,8 +122,6 @@
 ACTION_KEEP = b'k'
 ACTION_EXEC = b'e'
 ACTION_CREATED_MERGE = b'cm'
-# GET the other/remote side and store this info in mergestate
-ACTION_GET_OTHER_AND_STORE = b'gs'
 
 
 class mergestate(object):
@@ -196,7 +197,7 @@
 
     def reset(self, node=None, other=None, labels=None):
         self._state = {}
-        self._stateextras = {}
+        self._stateextras = collections.defaultdict(dict)
         self._local = None
         self._other = None
         self._labels = labels
@@ -222,7 +223,7 @@
         of on disk file.
         """
         self._state = {}
-        self._stateextras = {}
+        self._stateextras = collections.defaultdict(dict)
         self._local = None
         self._other = None
         for var in ('localctx', 'otherctx'):
@@ -258,7 +259,13 @@
                 LEGACY_RECORD_RESOLVED_OTHER,
             ):
                 bits = record.split(b'\0')
-                self._state[bits[0]] = bits[1:]
+                # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
+                # and we now store related information in _stateextras, so
+                # lets write to _stateextras directly
+                if bits[1] == MERGE_RECORD_MERGED_OTHER:
+                    self._stateextras[bits[0]][b'filenode-source'] = b'other'
+                else:
+                    self._state[bits[0]] = bits[1:]
             elif rtype == RECORD_FILE_VALUES:
                 filename, rawextras = record.split(b'\0', 1)
                 extraparts = rawextras.split(b'\0')
@@ -487,8 +494,6 @@
                 records.append(
                     (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
                 )
-            elif v[0] == MERGE_RECORD_MERGED_OTHER:
-                records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
             elif v[1] == nullhex or v[6] == nullhex:
                 # Change/Delete or Delete/Change conflicts. These are stored in
                 # 'C' records. v[1] is the local file, and is nullhex when the
@@ -587,8 +592,10 @@
         self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
         self._dirty = True
 
-    def addmergedother(self, path):
-        self._state[path] = [MERGE_RECORD_MERGED_OTHER, nullhex, nullhex]
+    def addcommitinfo(self, path, data):
+        """ stores information which is required at commit
+        into _stateextras """
+        self._stateextras[path].update(data)
         self._dirty = True
 
     def __contains__(self, dfile):
@@ -628,7 +635,7 @@
                 yield f
 
     def extras(self, filename):
-        return self._stateextras.setdefault(filename, {})
+        return self._stateextras[filename]
 
     def _resolve(self, preresolve, dfile, wctx):
         """rerun merge process for file path `dfile`.
@@ -637,8 +644,6 @@
         """
         if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
             return True, 0
-        if self._state[dfile][0] == MERGE_RECORD_MERGED_OTHER:
-            return True, 0
         stateentry = self._state[dfile]
         state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
         octx = self._repo[self._other]
--- a/mercurial/metadata.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/metadata.py	Mon Sep 07 15:20:31 2020 -0400
@@ -22,6 +22,79 @@
 )
 
 
+class ChangingFiles(object):
+    """A class recording the changes made to a file by a revision
+    """
+
+    def __init__(
+        self, touched=(), added=(), removed=(), p1_copies=(), p2_copies=(),
+    ):
+        self._added = set(added)
+        self._removed = set(removed)
+        self._touched = set(touched)
+        self._touched.update(self._added)
+        self._touched.update(self._removed)
+        self._p1_copies = dict(p1_copies)
+        self._p2_copies = dict(p2_copies)
+
+    @property
+    def added(self):
+        return frozenset(self._added)
+
+    def mark_added(self, filename):
+        self._added.add(filename)
+        self._touched.add(filename)
+
+    def update_added(self, filenames):
+        for f in filenames:
+            self.mark_added(f)
+
+    @property
+    def removed(self):
+        return frozenset(self._removed)
+
+    def mark_removed(self, filename):
+        self._removed.add(filename)
+        self._touched.add(filename)
+
+    def update_removed(self, filenames):
+        for f in filenames:
+            self.mark_removed(f)
+
+    @property
+    def touched(self):
+        return frozenset(self._touched)
+
+    def mark_touched(self, filename):
+        self._touched.add(filename)
+
+    def update_touched(self, filenames):
+        for f in filenames:
+            self.mark_touched(f)
+
+    @property
+    def copied_from_p1(self):
+        return self._p1_copies.copy()
+
+    def mark_copied_from_p1(self, source, dest):
+        self._p1_copies[dest] = source
+
+    def update_copies_from_p1(self, copies):
+        for dest, source in copies.items():
+            self.mark_copied_from_p1(source, dest)
+
+    @property
+    def copied_from_p2(self):
+        return self._p2_copies.copy()
+
+    def mark_copied_from_p2(self, source, dest):
+        self._p2_copies[dest] = source
+
+    def update_copies_from_p2(self, copies):
+        for dest, source in copies.items():
+            self.mark_copied_from_p2(source, dest)
+
+
 def computechangesetfilesadded(ctx):
     """return the list of files added in a changeset
     """
@@ -181,6 +254,30 @@
         return None
 
 
+def encode_copies_sidedata(files):
+    sortedfiles = sorted(files.touched)
+    sidedata = {}
+    p1copies = files.copied_from_p1
+    if p1copies:
+        p1copies = encodecopies(sortedfiles, p1copies)
+        sidedata[sidedatamod.SD_P1COPIES] = p1copies
+    p2copies = files.copied_from_p2
+    if p2copies:
+        p2copies = encodecopies(sortedfiles, p2copies)
+        sidedata[sidedatamod.SD_P2COPIES] = p2copies
+    filesadded = files.added
+    if filesadded:
+        filesadded = encodefileindices(sortedfiles, filesadded)
+        sidedata[sidedatamod.SD_FILESADDED] = filesadded
+    filesremoved = files.removed
+    if filesremoved:
+        filesremoved = encodefileindices(sortedfiles, filesremoved)
+        sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
+    if not sidedata:
+        sidedata = None
+    return sidedata
+
+
 def _getsidedata(srcrepo, rev):
     ctx = srcrepo[rev]
     filescopies = computechangesetcopies(ctx)
--- a/mercurial/narrowspec.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/narrowspec.py	Mon Sep 07 15:20:31 2020 -0400
@@ -9,12 +9,12 @@
 
 from .i18n import _
 from .pycompat import getattr
-from .interfaces import repository
 from . import (
     error,
     match as matchmod,
     merge,
     mergestate as mergestatemod,
+    requirements,
     scmutil,
     sparse,
     util,
@@ -186,7 +186,7 @@
 
 
 def savebackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     svfs = repo.svfs
     svfs.tryunlink(backupname)
@@ -194,13 +194,13 @@
 
 
 def restorebackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
 
 
 def savewcbackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     vfs = repo.vfs
     vfs.tryunlink(backupname)
@@ -212,7 +212,7 @@
 
 
 def restorewcbackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     # It may not exist in old repos
     if repo.vfs.exists(backupname):
@@ -220,7 +220,7 @@
 
 
 def clearwcbackup(repo, backupname):
-    if repository.NARROW_REQUIREMENT not in repo.requirements:
+    if requirements.NARROW_REQUIREMENT not in repo.requirements:
         return
     repo.vfs.tryunlink(backupname)
 
@@ -272,15 +272,19 @@
 
 
 def _writeaddedfiles(repo, pctx, files):
-    actions = merge.emptyactions()
-    addgaction = actions[mergestatemod.ACTION_GET].append
+    mresult = merge.mergeresult()
     mf = repo[b'.'].manifest()
     for f in files:
         if not repo.wvfs.exists(f):
-            addgaction((f, (mf.flags(f), False), b"narrowspec updated"))
+            mresult.addfile(
+                f,
+                mergestatemod.ACTION_GET,
+                (mf.flags(f), False),
+                b"narrowspec updated",
+            )
     merge.applyupdates(
         repo,
-        actions,
+        mresult,
         wctx=repo[None],
         mctx=repo[b'.'],
         overwrite=False,
--- a/mercurial/phases.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/phases.py	Mon Sep 07 15:20:31 2020 -0400
@@ -121,6 +121,7 @@
 from . import (
     error,
     pycompat,
+    requirements,
     smartset,
     txnutil,
     util,
@@ -154,7 +155,7 @@
 
 def supportinternal(repo):
     """True if the internal phase can be used on a repository"""
-    return b'internal-phase' in repo.requirements
+    return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
 
 
 def _readroots(repo, phasedefaults=None):
--- a/mercurial/registrar.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/registrar.py	Mon Sep 07 15:20:31 2020 -0400
@@ -121,7 +121,7 @@
         return self._docformat % (decl, doc)
 
     def _extrasetup(self, name, func):
-        """Execute exra setup for registered function, if needed
+        """Execute extra setup for registered function, if needed
         """
 
 
--- a/mercurial/repair.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/repair.py	Mon Sep 07 15:20:31 2020 -0400
@@ -26,6 +26,7 @@
     pathutil,
     phases,
     pycompat,
+    requirements,
     util,
 )
 from .utils import (
@@ -418,7 +419,7 @@
 
 def manifestrevlogs(repo):
     yield repo.manifestlog.getstorage(b'')
-    if b'treemanifest' in repo.requirements:
+    if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
         # This logic is safe if treemanifest isn't enabled, but also
         # pointless, so we skip it if treemanifest isn't enabled.
         for unencoded, encoded, size in repo.store.datafiles():
@@ -476,7 +477,7 @@
 
         progress.complete()
 
-        if b'treemanifest' in repo.requirements:
+        if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
             # This logic is safe if treemanifest isn't enabled, but also
             # pointless, so we skip it if treemanifest isn't enabled.
             for dir in pathutil.dirs(seenfiles):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/requirements.py	Mon Sep 07 15:20:31 2020 -0400
@@ -0,0 +1,67 @@
+# requirements.py - objects and functions related to repository requirements
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+# When narrowing is finalized and no longer subject to format changes,
+# we should move this to just "narrow" or similar.
+NARROW_REQUIREMENT = b'narrowhg-experimental'
+
+# Enables sparse working directory usage
+SPARSE_REQUIREMENT = b'exp-sparse'
+
+# Enables the internal phase which is used to hide changesets instead
+# of stripping them
+INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
+
+# Stores manifest in Tree structure
+TREEMANIFEST_REQUIREMENT = b'treemanifest'
+
+# Increment the sub-version when the revlog v2 format changes to lock out old
+# clients.
+REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
+
+# A repository with the sparserevlog feature will have delta chains that
+# can spread over a larger span. Sparse reading cuts these large spans into
+# pieces, so that each piece isn't too big.
+# Without the sparserevlog capability, reading from the repository could use
+# huge amounts of memory, because the whole span would be read at once,
+# including all the intermediate revisions that aren't pertinent for the chain.
+# This is why once a repository has enabled sparse-read, it becomes required.
+SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
+
+# A repository with the sidedataflag requirement will allow to store extra
+# information for revision without altering their original hashes.
+SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
+
+# A repository with the the copies-sidedata-changeset requirement will store
+# copies related information in changeset's sidedata.
+COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
+
+# The repository use persistent nodemap for the changelog and the manifest.
+NODEMAP_REQUIREMENT = b'persistent-nodemap'
+
+# Denotes that the current repository is a share
+SHARED_REQUIREMENT = b'shared'
+
+# Denotes that current repository is a share and the shared source path is
+# relative to the current repository root path
+RELATIVE_SHARED_REQUIREMENT = b'relshared'
+
+# List of requirements which are working directory specific
+# These requirements cannot be shared between repositories if they
+# share the same store
+# * sparse is a working directory specific functionality and hence working
+#   directory specific requirement
+# * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
+#   represents that the current working copy/repository shares store of another
+#   repo. Hence both of them should be stored in working copy
+WORKING_DIR_REQUIREMENTS = {
+    SPARSE_REQUIREMENT,
+    SHARED_REQUIREMENT,
+    RELATIVE_SHARED_REQUIREMENT,
+}
--- a/mercurial/shelve.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/shelve.py	Mon Sep 07 15:20:31 2020 -0400
@@ -772,7 +772,7 @@
     with ui.configoverride({(b'ui', b'quiet'): True}):
         hg.update(repo, wctx.node())
         ui.pushbuffer(True)
-        cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents())
+        cmdutil.revert(ui, repo, shelvectx)
         ui.popbuffer()
 
 
--- a/mercurial/sparse.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/sparse.py	Mon Sep 07 15:20:31 2020 -0400
@@ -21,11 +21,13 @@
     mergestate as mergestatemod,
     pathutil,
     pycompat,
+    requirements,
     scmutil,
     util,
 )
 from .utils import hashutil
 
+
 # Whether sparse features are enabled. This variable is intended to be
 # temporary to facilitate porting sparse to core. It should eventually be
 # a per-repo option, possibly a repo requirement.
@@ -269,19 +271,17 @@
 
     sparsematch = matcher(repo, includetemp=False)
     dirstate = repo.dirstate
-    actions = []
+    mresult = mergemod.mergeresult()
     dropped = []
     tempincludes = readtemporaryincludes(repo)
     for file in tempincludes:
         if file in dirstate and not sparsematch(file):
             message = _(b'dropping temporarily included sparse files')
-            actions.append((file, None, message))
+            mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
             dropped.append(file)
 
-    typeactions = mergemod.emptyactions()
-    typeactions[b'r'] = actions
     mergemod.applyupdates(
-        repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+        repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
     )
 
     # Fix dirstate
@@ -366,16 +366,16 @@
     return result
 
 
-def filterupdatesactions(repo, wctx, mctx, branchmerge, actions):
+def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
     """Filter updates to only lay out files that match the sparse rules."""
     if not enabled:
-        return actions
+        return
 
     oldrevs = [pctx.rev() for pctx in wctx.parents()]
     oldsparsematch = matcher(repo, oldrevs)
 
     if oldsparsematch.always():
-        return actions
+        return
 
     files = set()
     prunedactions = {}
@@ -390,23 +390,29 @@
         sparsematch = matcher(repo, [mctx.rev()])
 
     temporaryfiles = []
-    for file, action in pycompat.iteritems(actions):
+    for file, action in mresult.filemap():
         type, args, msg = action
         files.add(file)
         if sparsematch(file):
             prunedactions[file] = action
-        elif type == b'm':
+        elif type == mergestatemod.ACTION_MERGE:
             temporaryfiles.append(file)
             prunedactions[file] = action
         elif branchmerge:
-            if type != b'k':
+            if type != mergestatemod.ACTION_KEEP:
                 temporaryfiles.append(file)
                 prunedactions[file] = action
-        elif type == b'f':
+        elif type == mergestatemod.ACTION_FORGET:
             prunedactions[file] = action
         elif file in wctx:
-            prunedactions[file] = (b'r', args, msg)
+            prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
 
+        # in case or rename on one side, it is possible that f1 might not
+        # be present in sparse checkout we should include it
+        # TODO: should we do the same for f2?
+        # exists as a separate check because file can be in sparse and hence
+        # if we try to club this condition in above `elif type == ACTION_MERGE`
+        # it won't be triggered
         if branchmerge and type == mergestatemod.ACTION_MERGE:
             f1, f2, fa, move, anc = args
             if not sparsematch(f1):
@@ -423,22 +429,25 @@
         addtemporaryincludes(repo, temporaryfiles)
 
         # Add the new files to the working copy so they can be merged, etc
-        actions = []
+        tmresult = mergemod.mergeresult()
         message = b'temporarily adding to sparse checkout'
         wctxmanifest = repo[None].manifest()
         for file in temporaryfiles:
             if file in wctxmanifest:
                 fctx = repo[None][file]
-                actions.append((file, (fctx.flags(), False), message))
+                tmresult.addfile(
+                    file,
+                    mergestatemod.ACTION_GET,
+                    (fctx.flags(), False),
+                    message,
+                )
 
-        typeactions = mergemod.emptyactions()
-        typeactions[b'g'] = actions
         mergemod.applyupdates(
-            repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+            repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
         )
 
         dirstate = repo.dirstate
-        for file, flags, msg in actions:
+        for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
             dirstate.normal(file)
 
     profiles = activeconfig(repo)[2]
@@ -453,11 +462,15 @@
             new = sparsematch(file)
             if not old and new:
                 flags = mf.flags(file)
-                prunedactions[file] = (b'g', (flags, False), b'')
+                prunedactions[file] = (
+                    mergestatemod.ACTION_GET,
+                    (flags, False),
+                    b'',
+                )
             elif old and not new:
-                prunedactions[file] = (b'r', [], b'')
+                prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
 
-    return prunedactions
+    mresult.setactions(prunedactions)
 
 
 def refreshwdir(repo, origstatus, origsparsematch, force=False):
@@ -487,7 +500,7 @@
             _(b'could not update sparseness due to pending changes')
         )
 
-    # Calculate actions
+    # Calculate merge result
     dirstate = repo.dirstate
     ctx = repo[b'.']
     added = []
@@ -495,8 +508,7 @@
     dropped = []
     mf = ctx.manifest()
     files = set(mf)
-
-    actions = {}
+    mresult = mergemod.mergeresult()
 
     for file in files:
         old = origsparsematch(file)
@@ -506,17 +518,19 @@
         if (new and not old) or (old and new and not file in dirstate):
             fl = mf.flags(file)
             if repo.wvfs.exists(file):
-                actions[file] = (b'e', (fl,), b'')
+                mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
                 lookup.append(file)
             else:
-                actions[file] = (b'g', (fl, False), b'')
+                mresult.addfile(
+                    file, mergestatemod.ACTION_GET, (fl, False), b''
+                )
                 added.append(file)
         # Drop files that are newly excluded, or that still exist in
         # the dirstate.
         elif (old and not new) or (not old and not new and file in dirstate):
             dropped.append(file)
             if file not in pending:
-                actions[file] = (b'r', [], b'')
+                mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
 
     # Verify there are no pending changes in newly included files
     abort = False
@@ -540,13 +554,8 @@
             if old and not new:
                 dropped.append(file)
 
-    # Apply changes to disk
-    typeactions = mergemod.emptyactions()
-    for f, (m, args, msg) in pycompat.iteritems(actions):
-        typeactions[m].append((f, args, msg))
-
     mergemod.applyupdates(
-        repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False
+        repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
     )
 
     # Fix dirstate
@@ -599,11 +608,11 @@
     # updated. But this requires massive rework to matcher() and its
     # consumers.
 
-    if b'exp-sparse' in oldrequires and removing:
-        repo.requirements.discard(b'exp-sparse')
+    if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
+        repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
         scmutil.writereporequirements(repo)
-    elif b'exp-sparse' not in oldrequires:
-        repo.requirements.add(b'exp-sparse')
+    elif requirements.SPARSE_REQUIREMENT not in oldrequires:
+        repo.requirements.add(requirements.SPARSE_REQUIREMENT)
         scmutil.writereporequirements(repo)
 
     try:
--- a/mercurial/state.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/state.py	Mon Sep 07 15:20:31 2020 -0400
@@ -164,10 +164,17 @@
         operation
         """
         if not self._cmdhint:
-            return _(b"use 'hg %s --continue' or 'hg %s --abort'") % (
-                self._opname,
-                self._opname,
-            )
+            if not self._stopflag:
+                return _(b"use 'hg %s --continue' or 'hg %s --abort'") % (
+                    self._opname,
+                    self._opname,
+                )
+            else:
+                return _(
+                    b"use 'hg %s --continue', 'hg %s --abort', "
+                    b"or 'hg %s --stop'"
+                ) % (self._opname, self._opname, self._opname,)
+
         return self._cmdhint
 
     def msg(self):
--- a/mercurial/store.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/store.py	Mon Sep 07 15:20:31 2020 -0400
@@ -373,10 +373,18 @@
     return mode
 
 
-_data = (
-    b'bookmarks narrowspec data meta 00manifest.d 00manifest.i'
-    b' 00changelog.d 00changelog.i phaseroots obsstore'
-)
+_data = [
+    b'bookmarks',
+    b'narrowspec',
+    b'data',
+    b'meta',
+    b'00manifest.d',
+    b'00manifest.i',
+    b'00changelog.d',
+    b'00changelog.i',
+    b'phaseroots',
+    b'obsstore',
+]
 
 
 def isrevlog(f, kind, st):
@@ -447,7 +455,7 @@
             yield x
 
     def copylist(self):
-        return [b'requires'] + _data.split()
+        return [b'requires'] + _data
 
     def write(self, tr):
         pass
@@ -494,9 +502,7 @@
         return self.path + b'/' + encodefilename(f)
 
     def copylist(self):
-        return [b'requires', b'00changelog.i'] + [
-            b'store/' + f for f in _data.split()
-        ]
+        return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
 
 
 class fncache(object):
@@ -686,12 +692,20 @@
 
     def copylist(self):
         d = (
-            b'bookmarks narrowspec data meta dh fncache phaseroots obsstore'
-            b' 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
+            b'bookmarks',
+            b'narrowspec',
+            b'data',
+            b'meta',
+            b'dh',
+            b'fncache',
+            b'phaseroots',
+            b'obsstore',
+            b'00manifest.d',
+            b'00manifest.i',
+            b'00changelog.d',
+            b'00changelog.i',
         )
-        return [b'requires', b'00changelog.i'] + [
-            b'store/' + f for f in d.split()
-        ]
+        return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
 
     def write(self, tr):
         self.fncache.write(tr)
--- a/mercurial/subrepo.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/subrepo.py	Mon Sep 07 15:20:31 2020 -0400
@@ -986,12 +986,11 @@
 
     def filerevert(self, *pats, **opts):
         ctx = self._repo[opts['rev']]
-        parents = self._repo.dirstate.parents()
         if opts.get('all'):
             pats = [b'set:modified()']
         else:
             pats = []
-        cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
+        cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts)
 
     def shortid(self, revid):
         return revid[:12]
--- a/mercurial/templater.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/templater.py	Mon Sep 07 15:20:31 2020 -0400
@@ -800,10 +800,10 @@
 
 
 def stylelist():
-    paths = templatepaths()
-    if not paths:
+    path = templatedir()
+    if not path:
         return _(b'no templates found, try `hg debuginstall` for more info')
-    dirlist = os.listdir(paths[0])
+    dirlist = os.listdir(path)
     stylelist = []
     for file in dirlist:
         split = file.split(b".")
@@ -814,17 +814,46 @@
     return b", ".join(sorted(stylelist))
 
 
-def _readmapfile(mapfile):
+def _open_mapfile(mapfile):
+    if os.path.exists(mapfile):
+        return util.posixfile(mapfile, b'rb')
+    raise error.Abort(
+        _(b"style '%s' not found") % mapfile,
+        hint=_(b"available styles: %s") % stylelist(),
+    )
+
+
+def _readmapfile(fp, mapfile):
     """Load template elements from the given map file"""
-    if not os.path.exists(mapfile):
-        raise error.Abort(
-            _(b"style '%s' not found") % mapfile,
-            hint=_(b"available styles: %s") % stylelist(),
-        )
+    base = os.path.dirname(mapfile)
+    conf = config.config()
 
-    base = os.path.dirname(mapfile)
-    conf = config.config(includepaths=templatepaths())
-    conf.read(mapfile, remap={b'': b'templates'})
+    def include(rel, remap, sections):
+        subresource = None
+        if base:
+            abs = os.path.normpath(os.path.join(base, rel))
+            if os.path.isfile(abs):
+                subresource = util.posixfile(abs, b'rb')
+        if not subresource:
+            if pycompat.ossep not in rel:
+                abs = rel
+                subresource = resourceutil.open_resource(
+                    b'mercurial.templates', rel
+                )
+            else:
+                dir = templatedir()
+                if dir:
+                    abs = os.path.normpath(os.path.join(dir, rel))
+                    if os.path.isfile(abs):
+                        subresource = util.posixfile(abs, b'rb')
+        if subresource:
+            data = subresource.read()
+            conf.parse(
+                abs, data, sections=sections, remap=remap, include=include,
+            )
+
+    data = fp.read()
+    conf.parse(mapfile, data, remap={b'': b'templates'}, include=include)
 
     cache = {}
     tmap = {}
@@ -833,21 +862,22 @@
     val = conf.get(b'templates', b'__base__')
     if val and val[0] not in b"'\"":
         # treat as a pointer to a base class for this style
-        path = util.normpath(os.path.join(base, val))
+        path = os.path.normpath(os.path.join(base, val))
 
         # fallback check in template paths
         if not os.path.exists(path):
-            for p in templatepaths():
-                p2 = util.normpath(os.path.join(p, val))
+            dir = templatedir()
+            if dir is not None:
+                p2 = os.path.normpath(os.path.join(dir, val))
                 if os.path.isfile(p2):
                     path = p2
-                    break
-                p3 = util.normpath(os.path.join(p2, b"map"))
-                if os.path.isfile(p3):
-                    path = p3
-                    break
+                else:
+                    p3 = os.path.normpath(os.path.join(p2, b"map"))
+                    if os.path.isfile(p3):
+                        path = p3
 
-        cache, tmap, aliases = _readmapfile(path)
+        fp = _open_mapfile(path)
+        cache, tmap, aliases = _readmapfile(fp, path)
 
     for key, val in conf[b'templates'].items():
         if not val:
@@ -883,7 +913,8 @@
         """Get parsed tree for the given template name. Use a local cache."""
         if t not in self.cache:
             try:
-                self.cache[t] = util.readfile(self._map[t])
+                mapfile, fp = open_template(self._map[t])
+                self.cache[t] = fp.read()
             except KeyError as inst:
                 raise templateutil.TemplateNotFound(
                     _(b'"%s" not in template map') % inst.args[0]
@@ -975,6 +1006,7 @@
     def frommapfile(
         cls,
         mapfile,
+        fp=None,
         filters=None,
         defaults=None,
         resources=None,
@@ -984,7 +1016,9 @@
     ):
         """Create templater from the specified map file"""
         t = cls(filters, defaults, resources, cache, [], minchunk, maxchunk)
-        cache, tmap, aliases = _readmapfile(mapfile)
+        if not fp:
+            fp = _open_mapfile(mapfile)
+        cache, tmap, aliases = _readmapfile(fp, mapfile)
         t._loader.cache.update(cache)
         t._loader._map = tmap
         t._loader._aliasmap = _aliasrules.buildmap(aliases)
@@ -1045,59 +1079,42 @@
         return stream
 
 
-def templatepaths():
-    '''return locations used for template files.'''
-    pathsrel = [b'templates']
-    paths = [
-        os.path.normpath(os.path.join(resourceutil.datapath, f))
-        for f in pathsrel
-    ]
-    return [p for p in paths if os.path.isdir(p)]
-
-
-def templatepath(name):
-    '''return location of template file. returns None if not found.'''
-    for p in templatepaths():
-        f = os.path.join(p, name)
-        if os.path.exists(f):
-            return f
-    return None
+def templatedir():
+    '''return the directory used for template files, or None.'''
+    path = os.path.normpath(os.path.join(resourceutil.datapath, b'templates'))
+    return path if os.path.isdir(path) else None
 
 
-def stylemap(styles, paths=None):
-    """Return path to mapfile for a given style.
+def open_template(name, templatepath=None):
+    '''returns a file-like object for the given template, and its full path
 
-    Searches mapfile in the following locations:
-    1. templatepath/style/map
-    2. templatepath/map-style
-    3. templatepath/map
-    """
-
-    if paths is None:
-        paths = templatepaths()
-    elif isinstance(paths, bytes):
-        paths = [paths]
-
-    if isinstance(styles, bytes):
-        styles = [styles]
+    If the name is a relative path and we're in a frozen binary, the template
+    will be read from the mercurial.templates package instead. The returned path
+    will then be the relative path.
+    '''
+    # Does the name point directly to a map file?
+    if os.path.isfile(name) or os.path.isabs(name):
+        return name, open(name, mode='rb')
 
-    for style in styles:
-        # only plain name is allowed to honor template paths
-        if (
-            not style
-            or style in (pycompat.oscurdir, pycompat.ospardir)
-            or pycompat.ossep in style
-            or pycompat.osaltsep
-            and pycompat.osaltsep in style
-        ):
-            continue
-        locations = [os.path.join(style, b'map'), b'map-' + style]
-        locations.append(b'map')
+    # Does the name point to a template in the provided templatepath, or
+    # in mercurial/templates/ if no path was provided?
+    if templatepath is None:
+        templatepath = templatedir()
+    if templatepath is not None:
+        f = os.path.join(templatepath, name)
+        return f, open(f, mode='rb')
 
-        for path in paths:
-            for location in locations:
-                mapfile = os.path.join(path, location)
-                if os.path.isfile(mapfile):
-                    return style, mapfile
+    # Otherwise try to read it using the resources API
+    name_parts = pycompat.sysstr(name).split('/')
+    package_name = '.'.join(['mercurial', 'templates'] + name_parts[:-1])
+    return (
+        name,
+        resourceutil.open_resource(package_name, name_parts[-1]),
+    )
 
-    raise RuntimeError(b"No hgweb templates found in %r" % paths)
+
+def try_open_template(name, templatepath=None):
+    try:
+        return open_template(name, templatepath)
+    except (EnvironmentError, ImportError):
+        return None, None
--- a/mercurial/upgrade.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/upgrade.py	Mon Sep 07 15:20:31 2020 -0400
@@ -20,6 +20,7 @@
     manifest,
     metadata,
     pycompat,
+    requirements,
     revlog,
     scmutil,
     util,
@@ -31,7 +32,7 @@
 # list of requirements that request a clone of all revlog if added/removed
 RECLONES_REQUIREMENTS = {
     b'generaldelta',
-    localrepo.SPARSEREVLOG_REQUIREMENT,
+    requirements.SPARSEREVLOG_REQUIREMENT,
 }
 
 
@@ -58,12 +59,12 @@
     return {
         # The upgrade code does not yet support these experimental features.
         # This is an artificial limitation.
-        b'treemanifest',
+        requirements.TREEMANIFEST_REQUIREMENT,
         # This was a precursor to generaldelta and was never enabled by default.
         # It should (hopefully) not exist in the wild.
         b'parentdelta',
         # Upgrade should operate on the actual store, not the shared link.
-        b'shared',
+        requirements.SHARED_REQUIREMENT,
     }
 
 
@@ -75,10 +76,10 @@
     to be allowed.
     """
     supported = {
-        localrepo.SPARSEREVLOG_REQUIREMENT,
-        localrepo.SIDEDATA_REQUIREMENT,
-        localrepo.COPIESSDC_REQUIREMENT,
-        localrepo.NODEMAP_REQUIREMENT,
+        requirements.SPARSEREVLOG_REQUIREMENT,
+        requirements.SIDEDATA_REQUIREMENT,
+        requirements.COPIESSDC_REQUIREMENT,
+        requirements.NODEMAP_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -103,10 +104,10 @@
         b'generaldelta',
         b'revlogv1',
         b'store',
-        localrepo.SPARSEREVLOG_REQUIREMENT,
-        localrepo.SIDEDATA_REQUIREMENT,
-        localrepo.COPIESSDC_REQUIREMENT,
-        localrepo.NODEMAP_REQUIREMENT,
+        requirements.SPARSEREVLOG_REQUIREMENT,
+        requirements.SIDEDATA_REQUIREMENT,
+        requirements.COPIESSDC_REQUIREMENT,
+        requirements.NODEMAP_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -131,10 +132,10 @@
         b'dotencode',
         b'fncache',
         b'generaldelta',
-        localrepo.SPARSEREVLOG_REQUIREMENT,
-        localrepo.SIDEDATA_REQUIREMENT,
-        localrepo.COPIESSDC_REQUIREMENT,
-        localrepo.NODEMAP_REQUIREMENT,
+        requirements.SPARSEREVLOG_REQUIREMENT,
+        requirements.SIDEDATA_REQUIREMENT,
+        requirements.COPIESSDC_REQUIREMENT,
+        requirements.NODEMAP_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -338,7 +339,7 @@
 class sparserevlog(requirementformatvariant):
     name = b'sparserevlog'
 
-    _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
+    _requirement = requirements.SPARSEREVLOG_REQUIREMENT
 
     default = True
 
@@ -364,7 +365,7 @@
 class sidedata(requirementformatvariant):
     name = b'sidedata'
 
-    _requirement = localrepo.SIDEDATA_REQUIREMENT
+    _requirement = requirements.SIDEDATA_REQUIREMENT
 
     default = False
 
@@ -380,7 +381,7 @@
 class persistentnodemap(requirementformatvariant):
     name = b'persistent-nodemap'
 
-    _requirement = localrepo.NODEMAP_REQUIREMENT
+    _requirement = requirements.NODEMAP_REQUIREMENT
 
     default = False
 
@@ -395,7 +396,7 @@
 class copiessdc(requirementformatvariant):
     name = b'copies-sdc'
 
-    _requirement = localrepo.COPIESSDC_REQUIREMENT
+    _requirement = requirements.COPIESSDC_REQUIREMENT
 
     default = False
 
@@ -725,7 +726,7 @@
     sidedatacompanion = None
     removedreqs = srcrepo.requirements - dstrepo.requirements
     addedreqs = dstrepo.requirements - srcrepo.requirements
-    if localrepo.SIDEDATA_REQUIREMENT in removedreqs:
+    if requirements.SIDEDATA_REQUIREMENT in removedreqs:
 
         def sidedatacompanion(rl, rev):
             rl = getattr(rl, '_revlog', rl)
@@ -733,9 +734,9 @@
                 return True, (), {}
             return False, (), {}
 
-    elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
+    elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
         sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
-    elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
+    elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
         sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
     return sidedatacompanion
 
--- a/mercurial/utils/storageutil.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/utils/storageutil.py	Mon Sep 07 15:20:31 2020 -0400
@@ -180,9 +180,9 @@
 
     ``fileid`` can be:
 
-    * A 20 byte binary node.
+    * A 20 or 32 byte binary node.
     * An integer revision number
-    * A 40 byte hex node.
+    * A 40 or 64 byte hex node.
     * A bytes that can be parsed as an integer representing a revision number.
 
     ``identifier`` is used to populate ``error.LookupError`` with an identifier
@@ -198,14 +198,14 @@
                 b'%d' % fileid, identifier, _(b'no match found')
             )
 
-    if len(fileid) == 20:
+    if len(fileid) in (20, 32):
         try:
             store.rev(fileid)
             return fileid
         except error.LookupError:
             pass
 
-    if len(fileid) == 40:
+    if len(fileid) in (40, 64):
         try:
             rawnode = bin(fileid)
             store.rev(rawnode)
--- a/mercurial/worker.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/mercurial/worker.py	Mon Sep 07 15:20:31 2020 -0400
@@ -71,8 +71,12 @@
         def __init__(self, wrapped):
             self._wrapped = wrapped
 
-        def __getattr__(self, attr):
-            return getattr(self._wrapped, attr)
+        # Do NOT implement readinto() by making it delegate to
+        # _wrapped.readinto(), since that is unbuffered. The unpickler is fine
+        # with just read() and readline(), so we don't need to implement it.
+
+        def readline(self):
+            return self._wrapped.readline()
 
         # issue multiple reads until size is fulfilled
         def read(self, size=-1):
@@ -91,7 +95,7 @@
 
             del view
             del buf[pos:]
-            return buf
+            return bytes(buf)
 
 
 else:
@@ -211,7 +215,7 @@
     parentpid = os.getpid()
     pipes = []
     retval = {}
-    for pargs in partition(args, workers):
+    for pargs in partition(args, min(workers, len(args))):
         # Every worker gets its own pipe to send results on, so we don't have to
         # implement atomic writes larger than PIPE_BUF. Each forked process has
         # its own pipe's descriptors in the local variables, and the parent
--- a/relnotes/next	Wed Sep 02 12:31:37 2020 +0200
+++ b/relnotes/next	Mon Sep 07 15:20:31 2020 -0400
@@ -1,5 +1,9 @@
 == New Features ==
 
+ * `hg mv -A` can now be used with `--at-rev`. It behaves just like
+   `hg cp -A --at-rev`, i.e. it marks the destination as a copy of the
+   source whether or not the source still exists (but the source must
+   exist in the parent revision).
 
 
 == New Experimental Features ==
--- a/rust/hg-core/src/dirstate/dirstate_map.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -364,11 +364,17 @@
             return Ok(None);
         }
 
-        let parents = parse_dirstate(
-            &mut self.state_map,
-            &mut self.copy_map,
-            file_contents,
-        )?;
+        let (parents, entries, copies) = parse_dirstate(file_contents)?;
+        self.state_map.extend(
+            entries
+                .into_iter()
+                .map(|(path, entry)| (path.to_owned(), entry)),
+        );
+        self.copy_map.extend(
+            copies
+                .into_iter()
+                .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
+        );
 
         if !self.dirty_parents {
             self.set_parents(&parents);
--- a/rust/hg-core/src/dirstate/parsers.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/hg-core/src/dirstate/parsers.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -19,17 +19,21 @@
 /// Dirstate entries have a static part of 8 + 32 + 32 + 32 + 32 bits.
 const MIN_ENTRY_SIZE: usize = 17;
 
-// TODO parse/pack: is mutate-on-loop better for performance?
+type ParseResult<'a> = (
+    DirstateParents,
+    Vec<(&'a HgPath, DirstateEntry)>,
+    Vec<(&'a HgPath, &'a HgPath)>,
+);
 
 #[timed]
 pub fn parse_dirstate(
-    state_map: &mut StateMap,
-    copy_map: &mut CopyMap,
     contents: &[u8],
-) -> Result<DirstateParents, DirstateParseError> {
+) -> Result<ParseResult, DirstateParseError> {
     if contents.len() < PARENT_SIZE * 2 {
         return Err(DirstateParseError::TooLittleData);
     }
+    let mut copies = vec![];
+    let mut entries = vec![];
 
     let mut curr_pos = PARENT_SIZE * 2;
     let parents = DirstateParents {
@@ -63,24 +67,21 @@
         };
 
         if let Some(copy_path) = copy {
-            copy_map.insert(
-                HgPath::new(path).to_owned(),
-                HgPath::new(copy_path).to_owned(),
-            );
+            copies.push((HgPath::new(path), HgPath::new(copy_path)));
         };
-        state_map.insert(
-            HgPath::new(path).to_owned(),
+        entries.push((
+            HgPath::new(path),
             DirstateEntry {
                 state,
                 mode,
                 size,
                 mtime,
             },
-        );
+        ));
         curr_pos = curr_pos + MIN_ENTRY_SIZE + (path_len);
     }
 
-    Ok(parents)
+    Ok((parents, entries, copies))
 }
 
 /// `now` is the duration in seconds since the Unix epoch
@@ -285,14 +286,17 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = FastHashMap::default();
-        let mut new_copy_map: CopyMap = FastHashMap::default();
-        let new_parents = parse_dirstate(
-            &mut new_state_map,
-            &mut new_copy_map,
-            result.as_slice(),
-        )
-        .unwrap();
+        let (new_parents, entries, copies) =
+            parse_dirstate(result.as_slice()).unwrap();
+        let new_state_map: StateMap = entries
+            .into_iter()
+            .map(|(path, entry)| (path.to_owned(), entry))
+            .collect();
+        let new_copy_map: CopyMap = copies
+            .into_iter()
+            .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
+            .collect();
+
         assert_eq!(
             (parents, state_map, copymap),
             (new_parents, new_state_map, new_copy_map)
@@ -360,14 +364,17 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = FastHashMap::default();
-        let mut new_copy_map: CopyMap = FastHashMap::default();
-        let new_parents = parse_dirstate(
-            &mut new_state_map,
-            &mut new_copy_map,
-            result.as_slice(),
-        )
-        .unwrap();
+        let (new_parents, entries, copies) =
+            parse_dirstate(result.as_slice()).unwrap();
+        let new_state_map: StateMap = entries
+            .into_iter()
+            .map(|(path, entry)| (path.to_owned(), entry))
+            .collect();
+        let new_copy_map: CopyMap = copies
+            .into_iter()
+            .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
+            .collect();
+
         assert_eq!(
             (parents, state_map, copymap),
             (new_parents, new_state_map, new_copy_map)
@@ -403,14 +410,16 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = FastHashMap::default();
-        let mut new_copy_map: CopyMap = FastHashMap::default();
-        let new_parents = parse_dirstate(
-            &mut new_state_map,
-            &mut new_copy_map,
-            result.as_slice(),
-        )
-        .unwrap();
+        let (new_parents, entries, copies) =
+            parse_dirstate(result.as_slice()).unwrap();
+        let new_state_map: StateMap = entries
+            .into_iter()
+            .map(|(path, entry)| (path.to_owned(), entry))
+            .collect();
+        let new_copy_map: CopyMap = copies
+            .into_iter()
+            .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
+            .collect();
 
         assert_eq!(
             (
--- a/rust/hg-core/src/dirstate/status.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/hg-core/src/dirstate/status.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -13,7 +13,6 @@
     dirstate::SIZE_FROM_OTHER_PARENT,
     filepatterns::PatternFileWarning,
     matchers::{get_ignore_function, Matcher, VisitChildrenSet},
-    operations::Operation,
     utils::{
         files::{find_dirs, HgMetadata},
         hg_path::{
--- a/rust/hg-core/src/lib.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/hg-core/src/lib.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -57,6 +57,7 @@
 pub enum DirstateParseError {
     TooLittleData,
     Overflow,
+    // TODO refactor to use bytes instead of String
     CorruptedEntry(String),
     Damaged,
 }
--- a/rust/hg-core/src/operations/dirstate_status.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/hg-core/src/operations/dirstate_status.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -7,7 +7,6 @@
 
 use crate::dirstate::status::{build_response, Dispatch, HgPathCow, Status};
 use crate::matchers::Matcher;
-use crate::operations::Operation;
 use crate::{DirstateStatus, StatusError};
 
 /// A tuple of the paths that need to be checked in the filelog because it's
@@ -15,10 +14,8 @@
 /// files.
 pub type LookupAndStatus<'a> = (Vec<HgPathCow<'a>>, DirstateStatus<'a>);
 
-impl<'a, M: Matcher + Sync> Operation<LookupAndStatus<'a>> for Status<'a, M> {
-    type Error = StatusError;
-
-    fn run(&self) -> Result<LookupAndStatus<'a>, Self::Error> {
+impl<'a, M: Matcher + Sync> Status<'a, M> {
+    pub(crate) fn run(&self) -> Result<LookupAndStatus<'a>, StatusError> {
         let (traversed_sender, traversed_receiver) =
             crossbeam::channel::unbounded();
 
--- a/rust/hg-core/src/operations/find_root.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/hg-core/src/operations/find_root.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -1,4 +1,3 @@
-use super::Operation;
 use std::fmt;
 use std::path::{Path, PathBuf};
 
@@ -45,12 +44,8 @@
             current_dir: Some(current_dir),
         }
     }
-}
 
-impl<'a> Operation<PathBuf> for FindRoot<'a> {
-    type Error = FindRootError;
-
-    fn run(&self) -> Result<PathBuf, Self::Error> {
+    pub fn run(&self) -> Result<PathBuf, FindRootError> {
         let current_dir = match self.current_dir {
             None => std::env::current_dir().or_else(|e| {
                 Err(FindRootError {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/list_tracked_files.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -0,0 +1,85 @@
+// list_tracked_files.rs
+//
+// Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use super::find_root;
+use crate::dirstate::parsers::parse_dirstate;
+use crate::utils::hg_path::HgPath;
+use crate::{DirstateParseError, EntryState};
+use rayon::prelude::*;
+use std::convert::From;
+use std::fmt;
+use std::fs;
+use std::io;
+use std::path::PathBuf;
+
+/// Kind of error encoutered by ListTrackedFiles
+#[derive(Debug)]
+pub enum ListTrackedFilesErrorKind {
+    ParseError(DirstateParseError),
+}
+
+/// A ListTrackedFiles error
+#[derive(Debug)]
+pub struct ListTrackedFilesError {
+    /// Kind of error encoutered by ListTrackedFiles
+    pub kind: ListTrackedFilesErrorKind,
+}
+
+impl std::error::Error for ListTrackedFilesError {}
+
+impl fmt::Display for ListTrackedFilesError {
+    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        unimplemented!()
+    }
+}
+
+impl From<ListTrackedFilesErrorKind> for ListTrackedFilesError {
+    fn from(kind: ListTrackedFilesErrorKind) -> Self {
+        ListTrackedFilesError { kind }
+    }
+}
+
+/// List files under Mercurial control in the working directory
+pub struct ListTrackedFiles {
+    root: PathBuf,
+}
+
+impl ListTrackedFiles {
+    pub fn new() -> Result<Self, find_root::FindRootError> {
+        let root = find_root::FindRoot::new().run()?;
+        Ok(ListTrackedFiles { root })
+    }
+
+    /// Load the tracked files data from disk
+    pub fn load(&self) -> Result<ListDirstateTrackedFiles, io::Error> {
+        let dirstate = &self.root.join(".hg/dirstate");
+        let content = fs::read(&dirstate)?;
+        Ok(ListDirstateTrackedFiles { content })
+    }
+}
+
+/// List files under Mercurial control in the working directory
+/// by reading the dirstate
+pub struct ListDirstateTrackedFiles {
+    content: Vec<u8>,
+}
+
+impl ListDirstateTrackedFiles {
+    pub fn run(&self) -> Result<Vec<&HgPath>, ListTrackedFilesError> {
+        let (_, entries, _) = parse_dirstate(&self.content)
+            .map_err(ListTrackedFilesErrorKind::ParseError)?;
+        let mut files: Vec<&HgPath> = entries
+            .into_iter()
+            .filter_map(|(path, entry)| match entry.state {
+                EntryState::Removed => None,
+                _ => Some(path),
+            })
+            .collect();
+        files.par_sort_unstable();
+        Ok(files)
+    }
+}
--- a/rust/hg-core/src/operations/mod.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/hg-core/src/operations/mod.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -1,13 +1,17 @@
+//! A distinction is made between operations and commands.
+//! An operation is what can be done whereas a command is what is exposed by
+//! the cli. A single command can use several operations to achieve its goal.
+
 mod dirstate_status;
 mod find_root;
+mod list_tracked_files;
 pub use find_root::{FindRoot, FindRootError, FindRootErrorKind};
+pub use list_tracked_files::{
+    ListTrackedFiles, ListTrackedFilesError, ListTrackedFilesErrorKind,
+};
 
-/// An interface for high-level hg operations.
-///
-/// A distinction is made between operation and commands.
-/// An operation is what can be done whereas a command is what is exposed by
-/// the cli. A single command can use several operations to achieve its goal.
-pub trait Operation<T> {
-    type Error;
-    fn run(&self) -> Result<T, Self::Error>;
-}
+// TODO add an `Operation` trait when GAT have landed (rust #44265):
+// there is no way to currently define a trait which can both return
+// references to `self` and to passed data, which is what we would need.
+// Generic Associated Types may fix this and allow us to have a unified
+// interface.
--- a/rust/hg-cpython/src/parsers.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/hg-cpython/src/parsers.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -14,7 +14,7 @@
     PythonObject, ToPyObject,
 };
 use hg::{
-    pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf,
+    pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, DirstateEntry,
     DirstatePackError, DirstateParents, DirstateParseError, FastHashMap,
     PARENT_SIZE,
 };
@@ -29,11 +29,17 @@
     copymap: PyDict,
     st: PyBytes,
 ) -> PyResult<PyTuple> {
-    let mut dirstate_map = FastHashMap::default();
-    let mut copies = FastHashMap::default();
+    match parse_dirstate(st.data(py)) {
+        Ok((parents, entries, copies)) => {
+            let dirstate_map: FastHashMap<HgPathBuf, DirstateEntry> = entries
+                .into_iter()
+                .map(|(path, entry)| (path.to_owned(), entry))
+                .collect();
+            let copy_map: FastHashMap<HgPathBuf, HgPathBuf> = copies
+                .into_iter()
+                .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
+                .collect();
 
-    match parse_dirstate(&mut dirstate_map, &mut copies, st.data(py)) {
-        Ok(parents) => {
             for (filename, entry) in &dirstate_map {
                 dmap.set_item(
                     py,
@@ -41,7 +47,7 @@
                     make_dirstate_tuple(py, entry)?,
                 )?;
             }
-            for (path, copy_path) in copies {
+            for (path, copy_path) in copy_map {
                 copymap.set_item(
                     py,
                     PyBytes::new(py, path.as_bytes()),
--- a/rust/rhg/src/commands.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/rhg/src/commands.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -1,9 +1,10 @@
+pub mod files;
 pub mod root;
 use crate::error::CommandError;
 
 /// The common trait for rhg commands
 ///
 /// Normalize the interface of the commands provided by rhg
-pub trait Command {
+pub trait Command<'a> {
     fn run(&self) -> Result<(), CommandError>;
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/files.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -0,0 +1,50 @@
+use crate::commands::Command;
+use crate::error::{CommandError, CommandErrorKind};
+use crate::ui::Ui;
+use hg::operations::{ListTrackedFiles, ListTrackedFilesErrorKind};
+
+pub const HELP_TEXT: &str = "
+List tracked files.
+
+Returns 0 on success.
+";
+
+pub struct FilesCommand<'a> {
+    ui: &'a Ui,
+}
+
+impl<'a> FilesCommand<'a> {
+    pub fn new(ui: &'a Ui) -> Self {
+        FilesCommand { ui }
+    }
+}
+
+impl<'a> Command<'a> for FilesCommand<'a> {
+    fn run(&self) -> Result<(), CommandError> {
+        let operation_builder = ListTrackedFiles::new()?;
+        let operation = operation_builder.load().map_err(|err| {
+            CommandErrorKind::Abort(Some(
+                [b"abort: ", err.to_string().as_bytes(), b"\n"]
+                    .concat()
+                    .to_vec(),
+            ))
+        })?;
+        let files = operation.run().map_err(|err| match err.kind {
+            ListTrackedFilesErrorKind::ParseError(_) => {
+                CommandErrorKind::Abort(Some(
+                    // TODO find a better error message
+                    b"abort: parse error\n".to_vec(),
+                ))
+            }
+        })?;
+
+        let mut stdout = self.ui.stdout_buffer();
+        for file in files {
+            stdout.write_all(file.as_bytes())?;
+            stdout.write_all(b"\n")?;
+        }
+        stdout.flush()?;
+
+        Ok(())
+    }
+}
--- a/rust/rhg/src/commands/root.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/rhg/src/commands/root.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -1,9 +1,8 @@
 use crate::commands::Command;
-use crate::error::{CommandError, CommandErrorKind};
+use crate::error::CommandError;
 use crate::ui::Ui;
-use hg::operations::{FindRoot, FindRootError, FindRootErrorKind, Operation};
+use hg::operations::FindRoot;
 use hg::utils::files::get_bytes_from_path;
-use std::path::PathBuf;
 
 pub const HELP_TEXT: &str = "
 Print the root directory of the current repository.
@@ -11,66 +10,25 @@
 Returns 0 on success.
 ";
 
-pub struct RootCommand {
-    ui: Ui,
+pub struct RootCommand<'a> {
+    ui: &'a Ui,
 }
 
-impl RootCommand {
-    pub fn new() -> Self {
-        RootCommand { ui: Ui::new() }
+impl<'a> RootCommand<'a> {
+    pub fn new(ui: &'a Ui) -> Self {
+        RootCommand { ui }
     }
+}
 
-    fn display_found_path(
-        &self,
-        path_buf: PathBuf,
-    ) -> Result<(), CommandError> {
+impl<'a> Command<'a> for RootCommand<'a> {
+    fn run(&self) -> Result<(), CommandError> {
+        let path_buf = FindRoot::new().run()?;
+
         let bytes = get_bytes_from_path(path_buf);
 
         // TODO use formating macro
         self.ui.write_stdout(&[bytes.as_slice(), b"\n"].concat())?;
 
-        Err(CommandErrorKind::Ok.into())
-    }
-
-    fn display_error(&self, error: FindRootError) -> Result<(), CommandError> {
-        match error.kind {
-            FindRootErrorKind::RootNotFound(path) => {
-                let bytes = get_bytes_from_path(path);
-
-                // TODO use formating macro
-                self.ui.write_stderr(
-                    &[
-                        b"abort: no repository found in '",
-                        bytes.as_slice(),
-                        b"' (.hg not found)!\n",
-                    ]
-                    .concat(),
-                )?;
-
-                Err(CommandErrorKind::RootNotFound.into())
-            }
-            FindRootErrorKind::GetCurrentDirError(e) => {
-                // TODO use formating macro
-                self.ui.write_stderr(
-                    &[
-                        b"abort: error getting current working directory: ",
-                        e.to_string().as_bytes(),
-                        b"\n",
-                    ]
-                    .concat(),
-                )?;
-
-                Err(CommandErrorKind::CurrentDirNotFound.into())
-            }
-        }
+        Ok(())
     }
 }
-
-impl Command for RootCommand {
-    fn run(&self) -> Result<(), CommandError> {
-        match FindRoot::new().run() {
-            Ok(path_buf) => self.display_found_path(path_buf),
-            Err(e) => self.display_error(e),
-        }
-    }
-}
--- a/rust/rhg/src/error.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/rhg/src/error.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -1,36 +1,68 @@
 use crate::exitcode;
 use crate::ui::UiError;
+use hg::operations::{FindRootError, FindRootErrorKind};
+use hg::utils::files::get_bytes_from_path;
 use std::convert::From;
+use std::path::PathBuf;
 
 /// The kind of command error
-#[derive(Debug, PartialEq)]
+#[derive(Debug)]
 pub enum CommandErrorKind {
-    /// The command finished without error
-    Ok,
     /// The root of the repository cannot be found
-    RootNotFound,
+    RootNotFound(PathBuf),
     /// The current directory cannot be found
-    CurrentDirNotFound,
+    CurrentDirNotFound(std::io::Error),
     /// The standard output stream cannot be written to
     StdoutError,
     /// The standard error stream cannot be written to
     StderrError,
+    /// The command aborted
+    Abort(Option<Vec<u8>>),
 }
 
 impl CommandErrorKind {
     pub fn get_exit_code(&self) -> exitcode::ExitCode {
         match self {
-            CommandErrorKind::Ok => exitcode::OK,
-            CommandErrorKind::RootNotFound => exitcode::ABORT,
-            CommandErrorKind::CurrentDirNotFound => exitcode::ABORT,
+            CommandErrorKind::RootNotFound(_) => exitcode::ABORT,
+            CommandErrorKind::CurrentDirNotFound(_) => exitcode::ABORT,
             CommandErrorKind::StdoutError => exitcode::ABORT,
             CommandErrorKind::StderrError => exitcode::ABORT,
+            CommandErrorKind::Abort(_) => exitcode::ABORT,
+        }
+    }
+
+    /// Return the message corresponding to the error kind if any
+    pub fn get_error_message_bytes(&self) -> Option<Vec<u8>> {
+        match self {
+            // TODO use formating macro
+            CommandErrorKind::RootNotFound(path) => {
+                let bytes = get_bytes_from_path(path);
+                Some(
+                    [
+                        b"abort: no repository found in '",
+                        bytes.as_slice(),
+                        b"' (.hg not found)!\n",
+                    ]
+                    .concat(),
+                )
+            }
+            // TODO use formating macro
+            CommandErrorKind::CurrentDirNotFound(e) => Some(
+                [
+                    b"abort: error getting current working directory: ",
+                    e.to_string().as_bytes(),
+                    b"\n",
+                ]
+                .concat(),
+            ),
+            CommandErrorKind::Abort(message) => message.to_owned(),
+            _ => None,
         }
     }
 }
 
 /// The error type for the Command trait
-#[derive(Debug, PartialEq)]
+#[derive(Debug)]
 pub struct CommandError {
     pub kind: CommandErrorKind,
 }
@@ -40,6 +72,11 @@
     pub fn exit(&self) -> () {
         std::process::exit(self.kind.get_exit_code())
     }
+
+    /// Return the message corresponding to the command error if any
+    pub fn get_error_message_bytes(&self) -> Option<Vec<u8>> {
+        self.kind.get_error_message_bytes()
+    }
 }
 
 impl From<CommandErrorKind> for CommandError {
@@ -58,3 +95,16 @@
         }
     }
 }
+
+impl From<FindRootError> for CommandError {
+    fn from(err: FindRootError) -> Self {
+        match err.kind {
+            FindRootErrorKind::RootNotFound(path) => CommandError {
+                kind: CommandErrorKind::RootNotFound(path),
+            },
+            FindRootErrorKind::GetCurrentDirError(e) => CommandError {
+                kind: CommandErrorKind::CurrentDirNotFound(e),
+            },
+        }
+    }
+}
--- a/rust/rhg/src/main.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/rhg/src/main.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -16,15 +16,21 @@
         .version("0.0.1")
         .subcommand(
             SubCommand::with_name("root").about(commands::root::HELP_TEXT),
+        )
+        .subcommand(
+            SubCommand::with_name("files").about(commands::files::HELP_TEXT),
         );
 
     let matches = app.clone().get_matches_safe().unwrap_or_else(|_| {
         std::process::exit(exitcode::UNIMPLEMENTED_COMMAND)
     });
 
+    let ui = ui::Ui::new();
+
     let command_result = match matches.subcommand_name() {
         Some(name) => match name {
-            "root" => commands::root::RootCommand::new().run(),
+            "root" => commands::root::RootCommand::new(&ui).run(),
+            "files" => commands::files::FilesCommand::new(&ui).run(),
             _ => std::process::exit(exitcode::UNIMPLEMENTED_COMMAND),
         },
         _ => {
@@ -37,6 +43,15 @@
 
     match command_result {
         Ok(_) => std::process::exit(exitcode::OK),
-        Err(e) => e.exit(),
+        Err(e) => {
+            let message = e.get_error_message_bytes();
+            if let Some(msg) = message {
+                match ui.write_stderr(&msg) {
+                    Ok(_) => (),
+                    Err(_) => std::process::exit(exitcode::ABORT),
+                };
+            };
+            e.exit()
+        }
     }
 }
--- a/rust/rhg/src/ui.rs	Wed Sep 02 12:31:37 2020 +0200
+++ b/rust/rhg/src/ui.rs	Mon Sep 07 15:20:31 2020 -0400
@@ -1,7 +1,11 @@
 use std::io;
-use std::io::Write;
+use std::io::{ErrorKind, Write};
 
-pub struct Ui {}
+#[derive(Debug)]
+pub struct Ui {
+    stdout: std::io::Stdout,
+    stderr: std::io::Stderr,
+}
 
 /// The kind of user interface error
 pub enum UiError {
@@ -14,41 +18,89 @@
 /// The commandline user interface
 impl Ui {
     pub fn new() -> Self {
-        Ui {}
+        Ui {
+            stdout: std::io::stdout(),
+            stderr: std::io::stderr(),
+        }
+    }
+
+    /// Returns a buffered handle on stdout for faster batch printing
+    /// operations.
+    pub fn stdout_buffer(&self) -> StdoutBuffer<std::io::StdoutLock> {
+        StdoutBuffer::new(self.stdout.lock())
     }
 
     /// Write bytes to stdout
     pub fn write_stdout(&self, bytes: &[u8]) -> Result<(), UiError> {
-        let mut stdout = io::stdout();
-
-        self.write_stream(&mut stdout, bytes)
-            .or_else(|e| self.into_stdout_error(e))?;
+        let mut stdout = self.stdout.lock();
 
-        stdout.flush().or_else(|e| self.into_stdout_error(e))
-    }
+        stdout
+            .write_all(bytes)
+            .or_else(|e| handle_stdout_error(e))?;
 
-    fn into_stdout_error(&self, error: io::Error) -> Result<(), UiError> {
-        self.write_stderr(
-            &[b"abort: ", error.to_string().as_bytes(), b"\n"].concat(),
-        )?;
-        Err(UiError::StdoutError(error))
+        stdout.flush().or_else(|e| handle_stdout_error(e))
     }
 
     /// Write bytes to stderr
     pub fn write_stderr(&self, bytes: &[u8]) -> Result<(), UiError> {
-        let mut stderr = io::stderr();
+        let mut stderr = self.stderr.lock();
+
+        stderr
+            .write_all(bytes)
+            .or_else(|e| handle_stderr_error(e))?;
+
+        stderr.flush().or_else(|e| handle_stderr_error(e))
+    }
+}
 
-        self.write_stream(&mut stderr, bytes)
-            .or_else(|e| Err(UiError::StderrError(e)))?;
+/// A buffered stdout writer for faster batch printing operations.
+pub struct StdoutBuffer<W: Write> {
+    buf: io::BufWriter<W>,
+}
 
-        stderr.flush().or_else(|e| Err(UiError::StderrError(e)))
+impl<W: Write> StdoutBuffer<W> {
+    pub fn new(writer: W) -> Self {
+        let buf = io::BufWriter::new(writer);
+        Self { buf }
+    }
+
+    /// Write bytes to stdout buffer
+    pub fn write_all(&mut self, bytes: &[u8]) -> Result<(), UiError> {
+        self.buf
+            .write_all(bytes)
+            .or_else(|e| handle_stdout_error(e))
     }
 
-    fn write_stream(
-        &self,
-        stream: &mut impl Write,
-        bytes: &[u8],
-    ) -> Result<(), io::Error> {
-        stream.write_all(bytes)
+    /// Flush bytes to stdout
+    pub fn flush(&mut self) -> Result<(), UiError> {
+        self.buf.flush().or_else(|e| handle_stdout_error(e))
     }
 }
+
+/// Sometimes writing to stdout is not possible, try writing to stderr to
+/// signal that failure, otherwise just bail.
+fn handle_stdout_error(error: io::Error) -> Result<(), UiError> {
+    if let ErrorKind::BrokenPipe = error.kind() {
+        // This makes `| head` work for example
+        return Ok(());
+    }
+    let mut stderr = io::stderr();
+
+    stderr
+        .write_all(&[b"abort: ", error.to_string().as_bytes(), b"\n"].concat())
+        .map_err(|e| UiError::StderrError(e))?;
+
+    stderr.flush().map_err(|e| UiError::StderrError(e))?;
+
+    Err(UiError::StdoutError(error))
+}
+
+/// Sometimes writing to stderr is not possible.
+fn handle_stderr_error(error: io::Error) -> Result<(), UiError> {
+    // A broken pipe should not result in a error
+    // like with `| head` for example
+    if let ErrorKind::BrokenPipe = error.kind() {
+        return Ok(());
+    }
+    Err(UiError::StdoutError(error))
+}
--- a/setup.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/setup.py	Mon Sep 07 15:20:31 2020 -0400
@@ -1268,6 +1268,7 @@
     'mercurial.hgweb',
     'mercurial.interfaces',
     'mercurial.pure',
+    'mercurial.templates',
     'mercurial.thirdparty',
     'mercurial.thirdparty.attr',
     'mercurial.thirdparty.zope',
@@ -1292,6 +1293,13 @@
     'hgext3rd',
     'hgdemandimport',
 ]
+
+for name in os.listdir(os.path.join('mercurial', 'templates')):
+    if name != '__pycache__' and os.path.isdir(
+        os.path.join('mercurial', 'templates', name)
+    ):
+        packages.append('mercurial.templates.%s' % name)
+
 if sys.version_info[0] == 2:
     packages.extend(
         [
@@ -1614,11 +1622,8 @@
     msvccompiler.MSVCCompiler = HackedMSVCCompiler
 
 packagedata = {
-    'mercurial': [
-        'locale/*/LC_MESSAGES/hg.mo',
-        'defaultrc/*.rc',
-        'dummycert.pem',
-    ],
+    'mercurial': ['locale/*/LC_MESSAGES/hg.mo', 'dummycert.pem',],
+    'mercurial.defaultrc': ['*.rc',],
     'mercurial.helptext': ['*.txt',],
     'mercurial.helptext.internals': ['*.txt',],
 }
@@ -1630,11 +1635,8 @@
 
 for root in ('templates',):
     for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
-        curdir = curdir.split(os.sep, 1)[1]
-        dirs[:] = filter(ordinarypath, dirs)
-        for f in filter(ordinarypath, files):
-            f = os.path.join(curdir, f)
-            packagedata['mercurial'].append(f)
+        packagename = curdir.replace(os.sep, '.')
+        packagedata[packagename] = list(filter(ordinarypath, files))
 
 datafiles = []
 
--- a/tests/hghave.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/hghave.py	Mon Sep 07 15:20:31 2020 -0400
@@ -886,8 +886,11 @@
         return False
 
 
-@check("virtualenv", "Python virtualenv support")
-def has_virtualenv():
+@check("py2virtualenv", "Python2 virtualenv support")
+def has_py2virtualenv():
+    if sys.version_info[0] != 2:
+        return False
+
     try:
         import virtualenv
 
--- a/tests/pullext.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/pullext.py	Mon Sep 07 15:20:31 2020 -0400
@@ -13,8 +13,8 @@
     error,
     extensions,
     localrepo,
+    requirements,
 )
-from mercurial.interfaces import repository
 
 
 def clonecommand(orig, ui, repo, *args, **kwargs):
@@ -31,7 +31,7 @@
 
 
 def featuresetup(ui, features):
-    features.add(repository.NARROW_REQUIREMENT)
+    features.add(requirements.NARROW_REQUIREMENT)
 
 
 def extsetup(ui):
--- a/tests/run-tests.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/run-tests.py	Mon Sep 07 15:20:31 2020 -0400
@@ -2336,7 +2336,6 @@
         jobs=1,
         whitelist=None,
         blacklist=None,
-        retest=False,
         keywords=None,
         loop=False,
         runs_per_test=1,
@@ -2364,9 +2363,6 @@
         backwards compatible behavior which reports skipped tests as part
         of the results.
 
-        retest denotes whether to retest failed tests. This arguably belongs
-        outside of TestSuite.
-
         keywords denotes key words that will be used to filter which tests
         to execute. This arguably belongs outside of TestSuite.
 
@@ -2377,7 +2373,6 @@
         self._jobs = jobs
         self._whitelist = whitelist
         self._blacklist = blacklist
-        self._retest = retest
         self._keywords = keywords
         self._loop = loop
         self._runs_per_test = runs_per_test
@@ -2407,10 +2402,6 @@
                     result.addSkip(test, 'blacklisted')
                     continue
 
-                if self._retest and not os.path.exists(test.errpath):
-                    result.addIgnore(test, 'not retesting')
-                    continue
-
                 if self._keywords:
                     with open(test.path, 'rb') as f:
                         t = f.read().lower() + test.bname.lower()
@@ -3253,6 +3244,14 @@
                     tests.append({'path': t})
             else:
                 tests.append({'path': t})
+
+        if self.options.retest:
+            retest_args = []
+            for test in tests:
+                errpath = self._geterrpath(test)
+                if os.path.exists(errpath):
+                    retest_args.append(test)
+            tests = retest_args
         return tests
 
     def _runtests(self, testdescs):
@@ -3269,13 +3268,7 @@
                 orig = list(testdescs)
                 while testdescs:
                     desc = testdescs[0]
-                    # desc['path'] is a relative path
-                    if 'case' in desc:
-                        casestr = b'#'.join(desc['case'])
-                        errpath = b'%s#%s.err' % (desc['path'], casestr)
-                    else:
-                        errpath = b'%s.err' % desc['path']
-                    errpath = os.path.join(self._outputdir, errpath)
+                    errpath = self._geterrpath(desc)
                     if os.path.exists(errpath):
                         break
                     testdescs.pop(0)
@@ -3298,7 +3291,6 @@
                 jobs=jobs,
                 whitelist=self.options.whitelisted,
                 blacklist=self.options.blacklist,
-                retest=self.options.retest,
                 keywords=kws,
                 loop=self.options.loop,
                 runs_per_test=self.options.runs_per_test,
@@ -3346,6 +3338,18 @@
         if failed:
             return 1
 
+    def _geterrpath(self, test):
+        # test['path'] is a relative path
+        if 'case' in test:
+            # for multiple dimensions test cases
+            casestr = b'#'.join(test['case'])
+            errpath = b'%s#%s.err' % (test['path'], casestr)
+        else:
+            errpath = b'%s.err' % test['path']
+        if self.options.outputdir:
+            errpath = os.path.join(self.options.outputdir, errpath)
+        return errpath
+
     def _getport(self, count):
         port = self._ports.get(count)  # do we have a cached entry?
         if port is None:
--- a/tests/test-absorb-unfinished.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-absorb-unfinished.t	Mon Sep 07 15:20:31 2020 -0400
@@ -25,6 +25,6 @@
 
   $ hg --config extensions.rebase= absorb
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
--- a/tests/test-amend.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-amend.t	Mon Sep 07 15:20:31 2020 -0400
@@ -93,6 +93,29 @@
   nothing changed
   [1]
 
+#if obsstore-on
+  $ hg init repo-merge-state
+  $ cd repo-merge-state
+  $ echo a > f
+  $ hg ci -Aqm a
+  $ echo b > f
+  $ hg ci -Aqm b
+  $ echo c > f
+  $ hg co -m '.^'
+  merging f
+  warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges
+  [1]
+  $ echo d > f
+  $ hg resolve -m f
+  (no more unresolved files)
+  $ hg ci --amend --config experimental.evolution.allowunstable=True
+  1 new orphan changesets
+  $ hg resolve -l
+  $ cd ..
+#endif
+
 Matcher and metadata options
 
   $ echo 3 > C
--- a/tests/test-annotate.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-annotate.t	Mon Sep 07 15:20:31 2020 -0400
@@ -479,26 +479,24 @@
 
   $ cat > ../legacyrepo.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import error, node
-  > def reposetup(ui, repo):
-  >     class legacyrepo(repo.__class__):
-  >         def _filecommit(self, fctx, manifest1, manifest2,
-  >                         linkrev, tr, changelist, includecopymeta):
-  >             fname = fctx.path()
-  >             text = fctx.data()
-  >             flog = self.file(fname)
-  >             fparent1 = manifest1.get(fname, node.nullid)
-  >             fparent2 = manifest2.get(fname, node.nullid)
-  >             meta = {}
-  >             copy = fctx.copysource()
-  >             if copy and copy != fname:
-  >                 raise error.Abort('copying is not supported')
-  >             if fparent2 != node.nullid:
-  >                 changelist.append(fname)
-  >                 return flog.add(text, meta, tr, linkrev,
-  >                                 fparent1, fparent2)
-  >             raise error.Abort('only merging is supported')
-  >     repo.__class__ = legacyrepo
+  > from mercurial import commit, error, extensions, node
+  > def _filecommit(orig, repo, fctx, manifest1, manifest2,
+  >                 linkrev, tr, includecopymeta):
+  >     fname = fctx.path()
+  >     text = fctx.data()
+  >     flog = repo.file(fname)
+  >     fparent1 = manifest1.get(fname, node.nullid)
+  >     fparent2 = manifest2.get(fname, node.nullid)
+  >     meta = {}
+  >     copy = fctx.copysource()
+  >     if copy and copy != fname:
+  >         raise error.Abort('copying is not supported')
+  >     if fparent2 != node.nullid:
+  >         return flog.add(text, meta, tr, linkrev,
+  >                         fparent1, fparent2), 'modified'
+  >     raise error.Abort('only merging is supported')
+  > def uisetup(ui):
+  >     extensions.wrapfunction(commit, '_filecommit', _filecommit)
   > EOF
 
   $ cat > baz <<EOF
--- a/tests/test-completion.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-completion.t	Mon Sep 07 15:20:31 2020 -0400
@@ -353,7 +353,7 @@
   push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
   recover: verify
   remove: after, force, subrepos, include, exclude, dry-run
-  rename: after, force, include, exclude, dry-run
+  rename: after, at-rev, force, include, exclude, dry-run
   resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
   revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
   rollback: dry-run, force
--- a/tests/test-convert-identity.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-convert-identity.t	Mon Sep 07 15:20:31 2020 -0400
@@ -8,9 +8,10 @@
   > convert =
   > EOF
   $ cat <<'EOF' > changefileslist.py
-  > from mercurial import (changelog, extensions)
+  > from mercurial import (changelog, extensions, metadata)
   > def wrap(orig, clog, manifest, files, *args, **kwargs):
-  >   return orig(clog, manifest, [b"a"], *args, **kwargs)
+  >   files = metadata.ChangingFiles(touched=[b"a"])
+  >   return orig(clog, manifest, files, *args, **kwargs)
   > def extsetup(ui):
   >   extensions.wrapfunction(changelog.changelog, 'add', wrap)
   > EOF
--- a/tests/test-fastannotate-hg.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-fastannotate-hg.t	Mon Sep 07 15:20:31 2020 -0400
@@ -481,26 +481,25 @@
 and its ancestor by overriding "repo._filecommit".
 
   $ cat > ../legacyrepo.py <<EOF
-  > from mercurial import error, node
-  > def reposetup(ui, repo):
-  >     class legacyrepo(repo.__class__):
-  >         def _filecommit(self, fctx, manifest1, manifest2,
-  >                         linkrev, tr, changelist, includecopymeta):
-  >             fname = fctx.path()
-  >             text = fctx.data()
-  >             flog = self.file(fname)
-  >             fparent1 = manifest1.get(fname, node.nullid)
-  >             fparent2 = manifest2.get(fname, node.nullid)
-  >             meta = {}
-  >             copy = fctx.renamed()
-  >             if copy and copy[0] != fname:
-  >                 raise error.Abort('copying is not supported')
-  >             if fparent2 != node.nullid:
-  >                 changelist.append(fname)
-  >                 return flog.add(text, meta, tr, linkrev,
-  >                                 fparent1, fparent2)
-  >             raise error.Abort('only merging is supported')
-  >     repo.__class__ = legacyrepo
+  > from __future__ import absolute_import
+  > from mercurial import commit, error, extensions, node
+  > def _filecommit(orig, repo, fctx, manifest1, manifest2,
+  >                 linkrev, tr, includecopymeta):
+  >     fname = fctx.path()
+  >     text = fctx.data()
+  >     flog = repo.file(fname)
+  >     fparent1 = manifest1.get(fname, node.nullid)
+  >     fparent2 = manifest2.get(fname, node.nullid)
+  >     meta = {}
+  >     copy = fctx.copysource()
+  >     if copy and copy != fname:
+  >         raise error.Abort('copying is not supported')
+  >     if fparent2 != node.nullid:
+  >         return flog.add(text, meta, tr, linkrev,
+  >                         fparent1, fparent2), 'modified'
+  >     raise error.Abort('only merging is supported')
+  > def uisetup(ui):
+  >     extensions.wrapfunction(commit, '_filecommit', _filecommit)
   > EOF
 
   $ cat > baz <<EOF
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fix-pickle.t	Mon Sep 07 15:20:31 2020 -0400
@@ -0,0 +1,45 @@
+A script that implements uppercasing all letters in a file.
+
+  $ UPPERCASEPY="$TESTTMP/uppercase.py"
+  $ cat > $UPPERCASEPY <<EOF
+  > import sys
+  > from mercurial.utils.procutil import setbinary
+  > setbinary(sys.stdin)
+  > setbinary(sys.stdout)
+  > sys.stdout.write(sys.stdin.read().upper())
+  > EOF
+  $ TESTLINES="foo\nbar\nbaz\n"
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY
+  FOO
+  BAR
+  BAZ
+
+This file attempts to test our workarounds for pickle's lack of
+support for short reads.
+
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > fix =
+  > [fix]
+  > uppercase-whole-file:command="$PYTHON" $UPPERCASEPY
+  > uppercase-whole-file:pattern=set:**
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+# Create a file that's large enough that it seems to not fit in
+# pickle's buffer, making it use the code path that expects our
+# _blockingreader's read() method to return bytes.
+  $ echo "some stuff" > file
+  $ for i in $($TESTDIR/seq.py 13); do
+  >   cat file file > tmp
+  >   mv -f tmp file
+  > done
+  $ hg commit -Am "add large file"
+  adding file
+
+Check that we don't get a crash
+
+  $ hg fix -r .
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-fix.hg (glob)
--- a/tests/test-fix.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-fix.t	Mon Sep 07 15:20:31 2020 -0400
@@ -84,15 +84,15 @@
       lines of files, unless the --whole flag is used. Some tools may always
       affect the whole file regardless of --whole.
   
-      If revisions are specified with --rev, those revisions will be checked,
-      and they may be replaced with new revisions that have fixed file content.
-      It is desirable to specify all descendants of each specified revision, so
-      that the fixes propagate to the descendants. If all descendants are fixed
-      at the same time, no merging, rebasing, or evolution will be required.
+      If --working-dir is used, files with uncommitted changes in the working
+      copy will be fixed. Note that no backup are made.
   
-      If --working-dir is used, files with uncommitted changes in the working
-      copy will be fixed. If the checked-out revision is also fixed, the working
-      directory will update to the replacement revision.
+      If revisions are specified with --source, those revisions and their
+      descendants will be checked, and they may be replaced with new revisions
+      that have fixed file content. By automatically including the descendants,
+      no merging, rebasing, or evolution will be required. If an ancestor of the
+      working copy is included, then the working copy itself will also be fixed,
+      and the working copy will be updated to the fixed parent.
   
       When determining what lines of each file to fix at each revision, the
       whole set of revisions being fixed is considered, so that fixes to earlier
@@ -878,7 +878,7 @@
 
   $ hg --config extensions.rebase= fix -r .
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
   $ cd ..
--- a/tests/test-install.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-install.t	Mon Sep 07 15:20:31 2020 -0400
@@ -187,6 +187,14 @@
 #if py3 ensurepip
   $ "$PYTHON" -m venv installenv >> pip.log
 
+Hack: Debian does something a bit different in ensurepip.bootstrap. This makes
+it so that pip thinks the 'wheel' wheel is installed so it can build wheels;
+when it goes to try, however, it shells out to run `python3 -u <setup.py>`,
+that *doesn't* get the 'wheel' wheel, and it fails with an invalid command
+'bdist_wheel'. To fix this, we just delete the wheel from where Debian put it in
+our virtual env. Then pip doesn't think it's installed and doesn't try to build.
+  $ rm installenv/share/python-wheels/wheel-*.whl >/dev/null 2>&1 || true
+
 Note: we use this weird path to run pip and hg to avoid platform differences,
 since it's bin on most platforms but Scripts on Windows.
   $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
@@ -214,7 +222,7 @@
   no problems detected
 #endif
 
-#if no-py3 virtualenv
+#if py2virtualenv
 
 Note: --no-site-packages is deprecated, but some places have an
 ancient virtualenv from their linux distro or similar and it's not yet
--- a/tests/test-phabricator.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-phabricator.t	Mon Sep 07 15:20:31 2020 -0400
@@ -24,6 +24,11 @@
   > EOF
   $ VCR="$TESTDIR/phabricator"
 
+debugcallconduit doesn't claim invalid arguments without --test-vcr:
+  $ echo '{}' | HGRCSKIPREPO= hg debugcallconduit 'conduit.ping'
+  abort: config phabricator.url is required
+  [255]
+
 Error is handled reasonably. We override the phabtoken here so that
 when you're developing changes to phabricator.py you can edit the
 above config and have a real token in the test but not have to edit
--- a/tests/test-rebase-abort.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-rebase-abort.t	Mon Sep 07 15:20:31 2020 -0400
@@ -327,7 +327,7 @@
   $ echo new > a
   $ hg up 1               # user gets an error saying to run hg rebase --abort
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
   $ cat a
@@ -397,20 +397,20 @@
 
   $ hg rebase -s 3 -d tip
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
   $ hg up .
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
   $ hg up -C .
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
   $ hg graft 3
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
 
   $ hg abort
--- a/tests/test-rebase-inmemory.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-rebase-inmemory.t	Mon Sep 07 15:20:31 2020 -0400
@@ -901,7 +901,7 @@
   [1]
   $ hg rebase -r 3 -d 1 -t:merge3
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
   $ hg resolve --list
   U foo
--- a/tests/test-rebase-obsolete.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-rebase-obsolete.t	Mon Sep 07 15:20:31 2020 -0400
@@ -2055,7 +2055,7 @@
 
   $ hg rebase -s 3 -d 5
   abort: rebase in progress
-  (use 'hg rebase --continue' or 'hg rebase --abort')
+  (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop')
   [255]
   $ hg rebase --stop --continue
   abort: cannot specify both --stop and --continue
--- a/tests/test-rename-rev.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-rename-rev.t	Mon Sep 07 15:20:31 2020 -0400
@@ -43,7 +43,7 @@
   A d1/d
     d1/b
 
-Test moved file (not copied)
+Test moved file (not copied) using 'hg cp' command
 
   $ hg co 0
   0 files updated, 0 files merged, 2 files removed, 0 files unresolved
@@ -59,10 +59,40 @@
     d1/b
   R d1/b
 
+Test moved file (not copied) using 'hg mv' command
+
+  $ hg co 0
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mv d1/b d1/d
+  $ hg rm -A d1/b
+  $ hg add d1/d
+  $ hg ci -m 'move d1/b to d1/d'
+  created new head
+  $ hg mv -A --at-rev . d1/b d1/d
+  saved backup bundle to $TESTTMP/.hg/strip-backup/519850c3ea27-153c8fbb-copy.hg
+  $ hg st -C --change .
+  A d1/d
+    d1/b
+  R d1/b
+
+Test moved file (not copied) for which source still exists
+
+  $ hg co 0
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ cp d1/b d1/d
+  $ hg add d1/d
+  $ hg ci -m 'copy d1/b to d1/d'
+  created new head
+  $ hg mv -A --at-rev . d1/b d1/d
+  saved backup bundle to $TESTTMP/.hg/strip-backup/c8d0f6bcf7ca-1c9bb53e-copy.hg
+  $ hg st -C --change .
+  A d1/d
+    d1/b
+
 Test using directory as destination
 
   $ hg co 0
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ cp -R d1 d3
   $ hg add d3
   adding d3/a
--- a/tests/test-revlog-v2.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-revlog-v2.t	Mon Sep 07 15:20:31 2020 -0400
@@ -32,10 +32,10 @@
 Unknown flags to revlog are rejected
 
   >>> with open('.hg/store/00changelog.i', 'wb') as fh:
-  ...     fh.write(b'\x00\x04\xde\xad') and None
+  ...     fh.write(b'\xff\x00\xde\xad') and None
 
   $ hg log
-  abort: unknown flags (0x04) in version 57005 revlog 00changelog.i!
+  abort: unknown flags (0xff00) in version 57005 revlog 00changelog.i!
   [255]
 
   $ cd ..
--- a/tests/test-run-tests.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-run-tests.t	Mon Sep 07 15:20:31 2020 -0400
@@ -497,7 +497,7 @@
 ====================
 
   $ rt --retest
-  running 2 tests using 1 parallel processes 
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -512,7 +512,7 @@
   ERROR: test-failure.t output changed
   !
   Failed test-failure.t: output changed
-  # Ran 2 tests, 1 skipped, 1 failed.
+  # Ran 1 tests, 0 skipped, 1 failed.
   python hash seed: * (glob)
   [1]
 
@@ -521,7 +521,7 @@
   $ mkdir output
   $ mv test-failure.t.err output
   $ rt --retest --outputdir output
-  running 2 tests using 1 parallel processes 
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/output/test-failure.t.err
@@ -536,7 +536,7 @@
   ERROR: test-failure.t output changed
   !
   Failed test-failure.t: output changed
-  # Ran 2 tests, 1 skipped, 1 failed.
+  # Ran 1 tests, 0 skipped, 1 failed.
   python hash seed: * (glob)
   [1]
 
@@ -844,6 +844,8 @@
     $ echo 'saved backup bundle to $TESTTMP/foo.hg'
     saved backup bundle to $TESTTMP/*.hg (glob)<
 
+  $ rm test-failure.t
+
 Race condition - test file was modified when test is running
 
   $ TESTRACEDIR=`pwd`
@@ -972,6 +974,25 @@
   python hash seed: * (glob)
   [1]
 
+  $ rt --retest
+  running 1 tests using 1 parallel processes 
+  
+  --- $TESTTMP/test-cases.t
+  +++ $TESTTMP/test-cases.t#b#c.err
+  @@ -6,5 +6,5 @@
+   #endif
+   #if b c
+     $ echo yes
+  -  no
+  +  yes
+   #endif
+  
+  ERROR: test-cases.t#b#c output changed
+  !
+  Failed test-cases.t#b#c: output changed
+  # Ran 1 tests, 0 skipped, 1 failed.
+  python hash seed: * (glob)
+  [1]
   $ rm test-cases.t#b#c.err
   $ rm test-cases.t
 
--- a/tests/test-share-bookmarks.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-share-bookmarks.t	Mon Sep 07 15:20:31 2020 -0400
@@ -279,3 +279,8 @@
      bm3                       4:62f4ded848e4
      bm4                       5:92793bfc8cad
   $ cd ..
+
+Test that if store is disabled, we drop the bookmarksinstore requirement
+
+  $ hg init brokenrepo --config format.bookmarks-in-store=True --config format.usestore=false
+  ignoring enabled 'format.bookmarks-in-store' config beacuse it is incompatible with disabled 'format.usestore' config
--- a/tests/test-share.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-share.t	Mon Sep 07 15:20:31 2020 -0400
@@ -252,3 +252,9 @@
 
   $ killdaemons.py
 
+Test sharing a repository which was created with store requirement disable
+
+  $ hg init nostore --config format.usestore=false
+  $ hg share nostore sharednostore
+  abort: cannot create shared repository as source was created with 'format.usestore' config disabled
+  [255]
--- a/tests/test-template-map.t	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/test-template-map.t	Mon Sep 07 15:20:31 2020 -0400
@@ -125,6 +125,54 @@
   date:        Wed Jan 01 10:01:00 2020 +0000
   summary:     third
   
+Test map inheritance with non-existent base
+
+  $ echo "__base__ = non-existent" > map-base-nonexistent
+  $ hg log -l1 -T./map-base-nonexistent
+  abort: style '$TESTTMP/a/non-existent' not found
+  (available styles: bisect, changelog, compact, default, phases, show, status, xml)
+  [255]
+
+Test map inheritance with directory as base
+
+  $ mkdir somedir
+  $ echo "__base__ = somedir" > map-base-dir
+  $ hg log -l1 -T./map-base-dir
+  abort: Is a directory: '$TESTTMP/a/somedir'
+  [255]
+
+Test including a built-in template map
+
+  $ cat <<'EOF' > map-include-builtin
+  > %include map-cmdline.default
+  > [templates]
+  > changeset = "{changeset_quiet}\n"
+  > EOF
+  $ hg log -l1 -T./map-include-builtin
+  8:95c24699272e
+  
+
+Test including a nonexistent template map
+BROKEN: This should probably be an error just like the bad __base__ above
+
+  $ cat <<'EOF' > map-include-nonexistent
+  > %include nonexistent
+  > [templates]
+  > changeset = "test\n"
+  > EOF
+  $ hg log -l1 -T./map-include-nonexistent
+  test
+
+Test including a directory as template map
+BROKEN: This should probably be an error just like the bad __base__ above
+
+  $ cat <<'EOF' > map-include-dir
+  > %include somedir
+  > [templates]
+  > changeset = "test\n"
+  > EOF
+  $ hg log -l1 -T./map-include-dir
+  test
 
 Test docheader, docfooter and separator in template map
 
@@ -1227,6 +1275,19 @@
   abort: specify a template
   [255]
 
+Error if style is a directory:
+
+  $ hg log --style somedir
+  abort: Is a directory: 'somedir'
+  [255]
+
+Error if style is a directory whose name is a built-in style:
+
+  $ hg log --style coal
+  abort: style 'coal' not found
+  (available styles: bisect, changelog, compact, default, phases, show, status, xml)
+  [255]
+
 Error if style missing key:
 
   $ echo 'q = q' > t
--- a/tests/testlib/ext-sidedata.py	Wed Sep 02 12:31:37 2020 +0200
+++ b/tests/testlib/ext-sidedata.py	Mon Sep 07 15:20:31 2020 -0400
@@ -12,8 +12,8 @@
 
 from mercurial import (
     extensions,
-    localrepo,
     node,
+    requirements,
     revlog,
     upgrade,
 )
@@ -54,7 +54,7 @@
 def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
     sidedatacompanion = orig(srcrepo, dstrepo)
     addedreqs = dstrepo.requirements - srcrepo.requirements
-    if localrepo.SIDEDATA_REQUIREMENT in addedreqs:
+    if requirements.SIDEDATA_REQUIREMENT in addedreqs:
         assert sidedatacompanion is None  # deal with composition later
 
         def sidedatacompanion(revlog, rev):