# HG changeset patch # User Augie Fackler # Date 1600440523 14400 # Node ID e3df1f560d9a23753bcca4244cf80ea221be340a # Parent a8843eda9a35e4880dda58f59be42823e1078308# Parent bd5b2b29b82df1e2de214a59a8aa60eb80ee27f9 merge with stable diff -r bd5b2b29b82d -r e3df1f560d9a .editorconfig --- a/.editorconfig Sun Sep 13 15:59:23 2020 +0900 +++ b/.editorconfig Fri Sep 18 10:48:43 2020 -0400 @@ -6,13 +6,16 @@ indent_size = 4 indent_style = space trim_trailing_whitespace = true +end_of_line = lf [*.{c,h}] indent_size = 8 indent_style = tab trim_trailing_whitespace = true +end_of_line = lf [*.t] indent_size = 2 indent_style = space trim_trailing_whitespace = false +end_of_line = lf diff -r bd5b2b29b82d -r e3df1f560d9a Makefile --- a/Makefile Sun Sep 13 15:59:23 2020 +0900 +++ b/Makefile Fri Sep 18 10:48:43 2020 -0400 @@ -234,7 +234,6 @@ make -C contrib/chg \ HGPATH=/usr/local/bin/hg \ PYTHON=/usr/bin/python2.7 \ - HGEXTDIR=/Library/Python/2.7/site-packages/hgext \ DESTDIR=../../build/mercurial \ PREFIX=/usr/local \ clean install diff -r bd5b2b29b82d -r e3df1f560d9a contrib/check-py3-compat.py --- a/contrib/check-py3-compat.py Sun Sep 13 15:59:23 2020 +0900 +++ b/contrib/check-py3-compat.py Fri Sep 18 10:48:43 2020 -0400 @@ -97,6 +97,15 @@ if sys.version_info[0] == 2: fn = check_compat_py2 else: + # check_compat_py3 will import every filename we specify as long as it + # starts with one of a few prefixes. It does this by converting + # specified filenames like 'mercurial/foo.py' to 'mercurial.foo' and + # importing that. When running standalone (not as part of a test), this + # means we actually import the installed versions, not the files we just + # specified. When running as test-check-py3-compat.t, we technically + # would import the correct paths, but it's cleaner to have both cases + # use the same import logic. + sys.path.insert(0, '.') fn = check_compat_py3 for f in sys.argv[1:]: diff -r bd5b2b29b82d -r e3df1f560d9a hgext/churn.py --- a/hgext/churn.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/churn.py Fri Sep 18 10:48:43 2020 -0400 @@ -183,6 +183,9 @@ # display count of lines changed in every year hg churn -f "%Y" -s + # display count of lines changed in a time range + hg churn -d "2020-04 to 2020-09" + It is possible to map alternate email addresses to a main address by providing a file using the following format:: diff -r bd5b2b29b82d -r e3df1f560d9a hgext/convert/hg.py --- a/hgext/convert/hg.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/convert/hg.py Fri Sep 18 10:48:43 2020 -0400 @@ -217,7 +217,8 @@ """ anc = [p1ctx.ancestor(p2ctx)] # Calculate what files are coming from p2 - actions, diverge, rename = mergemod.calculateupdates( + # TODO: mresult.commitinfo might be able to get that info + mresult = mergemod.calculateupdates( self.repo, p1ctx, p2ctx, @@ -228,7 +229,7 @@ followcopies=False, ) - for file, (action, info, msg) in pycompat.iteritems(actions): + for file, (action, info, msg) in mresult.filemap(): if source.targetfilebelongstosource(file): # If the file belongs to the source repo, ignore the p2 # since it will be covered by the existing fileset. diff -r bd5b2b29b82d -r e3df1f560d9a hgext/extdiff.py --- a/hgext/extdiff.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/extdiff.py Fri Sep 18 10:48:43 2020 -0400 @@ -255,7 +255,6 @@ tmproot, dir1a, dir1b, - dir2root, dir2, rev1a, rev1b, @@ -267,7 +266,7 @@ waitprocs = [] totalfiles = len(commonfiles) for idx, commonfile in enumerate(sorted(commonfiles)): - path1a = os.path.join(tmproot, dir1a, commonfile) + path1a = os.path.join(dir1a, commonfile) label1a = commonfile + rev1a if not os.path.isfile(path1a): path1a = pycompat.osdevnull @@ -275,12 +274,12 @@ path1b = b'' label1b = b'' if do3way: - path1b = os.path.join(tmproot, dir1b, commonfile) + path1b = os.path.join(dir1b, commonfile) label1b = commonfile + rev1b if not os.path.isfile(path1b): path1b = pycompat.osdevnull - path2 = os.path.join(dir2root, dir2, commonfile) + path2 = os.path.join(dir2, commonfile) label2 = commonfile + rev2 if confirm: @@ -457,23 +456,23 @@ label1b = rev1b label2 = rev2 - # If only one change, diff the files instead of the directories - # Handle bogus modifies correctly by checking if the files exist - if len(common) == 1: - common_file = util.localpath(common.pop()) - dir1a = os.path.join(tmproot, dir1a, common_file) - label1a = common_file + rev1a - if not os.path.isfile(dir1a): - dir1a = pycompat.osdevnull - if do3way: - dir1b = os.path.join(tmproot, dir1b, common_file) - label1b = common_file + rev1b - if not os.path.isfile(dir1b): - dir1b = pycompat.osdevnull - dir2 = os.path.join(dir2root, dir2, common_file) - label2 = common_file + rev2 + if not opts.get(b'per_file'): + # If only one change, diff the files instead of the directories + # Handle bogus modifies correctly by checking if the files exist + if len(common) == 1: + common_file = util.localpath(common.pop()) + dir1a = os.path.join(tmproot, dir1a, common_file) + label1a = common_file + rev1a + if not os.path.isfile(dir1a): + dir1a = pycompat.osdevnull + if do3way: + dir1b = os.path.join(tmproot, dir1b, common_file) + label1b = common_file + rev1b + if not os.path.isfile(dir1b): + dir1b = pycompat.osdevnull + dir2 = os.path.join(dir2root, dir2, common_file) + label2 = common_file + rev2 - if not opts.get(b'per_file'): # Run the external tool on the 2 temp directories or the patches cmdline = formatcmdline( cmdline, @@ -499,10 +498,9 @@ confirm=opts.get(b'confirm'), commonfiles=common, tmproot=tmproot, - dir1a=dir1a, - dir1b=dir1b, - dir2root=dir2root, - dir2=dir2, + dir1a=os.path.join(tmproot, dir1a), + dir1b=os.path.join(tmproot, dir1b) if do3way else None, + dir2=os.path.join(dir2root, dir2), rev1a=rev1a, rev1b=rev1b, rev2=rev2, @@ -711,45 +709,67 @@ ) +def _gettooldetails(ui, cmd, path): + """ + returns following things for a + ``` + [extdiff] + = + ``` + entry: + + cmd: command/tool name + path: path to the tool + cmdline: the command which should be run + isgui: whether the tool uses GUI or not + + Reads all external tools related configs, whether it be extdiff section, + diff-tools or merge-tools section, or its specified in an old format or + the latest format. + """ + path = util.expandpath(path) + if cmd.startswith(b'cmd.'): + cmd = cmd[4:] + if not path: + path = procutil.findexe(cmd) + if path is None: + path = filemerge.findexternaltool(ui, cmd) or cmd + diffopts = ui.config(b'extdiff', b'opts.' + cmd) + cmdline = procutil.shellquote(path) + if diffopts: + cmdline += b' ' + diffopts + isgui = ui.configbool(b'extdiff', b'gui.' + cmd) + else: + if path: + # case "cmd = path opts" + cmdline = path + diffopts = len(pycompat.shlexsplit(cmdline)) > 1 + else: + # case "cmd =" + path = procutil.findexe(cmd) + if path is None: + path = filemerge.findexternaltool(ui, cmd) or cmd + cmdline = procutil.shellquote(path) + diffopts = False + isgui = ui.configbool(b'extdiff', b'gui.' + cmd) + # look for diff arguments in [diff-tools] then [merge-tools] + if not diffopts: + key = cmd + b'.diffargs' + for section in (b'diff-tools', b'merge-tools'): + args = ui.config(section, key) + if args: + cmdline += b' ' + args + if isgui is None: + isgui = ui.configbool(section, cmd + b'.gui') or False + break + return cmd, path, cmdline, isgui + + def uisetup(ui): for cmd, path in ui.configitems(b'extdiff'): - path = util.expandpath(path) - if cmd.startswith(b'cmd.'): - cmd = cmd[4:] - if not path: - path = procutil.findexe(cmd) - if path is None: - path = filemerge.findexternaltool(ui, cmd) or cmd - diffopts = ui.config(b'extdiff', b'opts.' + cmd) - cmdline = procutil.shellquote(path) - if diffopts: - cmdline += b' ' + diffopts - isgui = ui.configbool(b'extdiff', b'gui.' + cmd) - elif cmd.startswith(b'opts.') or cmd.startswith(b'gui.'): + if cmd.startswith(b'opts.') or cmd.startswith(b'gui.'): continue - else: - if path: - # case "cmd = path opts" - cmdline = path - diffopts = len(pycompat.shlexsplit(cmdline)) > 1 - else: - # case "cmd =" - path = procutil.findexe(cmd) - if path is None: - path = filemerge.findexternaltool(ui, cmd) or cmd - cmdline = procutil.shellquote(path) - diffopts = False - isgui = ui.configbool(b'extdiff', b'gui.' + cmd) - # look for diff arguments in [diff-tools] then [merge-tools] - if not diffopts: - key = cmd + b'.diffargs' - for section in (b'diff-tools', b'merge-tools'): - args = ui.config(section, key) - if args: - cmdline += b' ' + args - if isgui is None: - isgui = ui.configbool(section, cmd + b'.gui') or False - break + cmd, path, cmdline, isgui = _gettooldetails(ui, cmd, path) command( cmd, extdiffopts[:], diff -r bd5b2b29b82d -r e3df1f560d9a hgext/fix.py --- a/hgext/fix.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/fix.py Fri Sep 18 10:48:43 2020 -0400 @@ -241,15 +241,15 @@ of files, unless the --whole flag is used. Some tools may always affect the whole file regardless of --whole. - If revisions are specified with --rev, those revisions will be checked, and - they may be replaced with new revisions that have fixed file content. It is - desirable to specify all descendants of each specified revision, so that the - fixes propagate to the descendants. If all descendants are fixed at the same - time, no merging, rebasing, or evolution will be required. + If --working-dir is used, files with uncommitted changes in the working copy + will be fixed. Note that no backup are made. - If --working-dir is used, files with uncommitted changes in the working copy - will be fixed. If the checked-out revision is also fixed, the working - directory will update to the replacement revision. + If revisions are specified with --source, those revisions and their + descendants will be checked, and they may be replaced with new revisions + that have fixed file content. By automatically including the descendants, + no merging, rebasing, or evolution will be required. If an ancestor of the + working copy is included, then the working copy itself will also be fixed, + and the working copy will be updated to the fixed parent. When determining what lines of each file to fix at each revision, the whole set of revisions being fixed is considered, so that fixes to earlier diff -r bd5b2b29b82d -r e3df1f560d9a hgext/fsmonitor/__init__.py --- a/hgext/fsmonitor/__init__.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/fsmonitor/__init__.py Fri Sep 18 10:48:43 2020 -0400 @@ -73,6 +73,8 @@ [fsmonitor] warn_update_file_count = (integer) + # or when mercurial is built with rust support + warn_update_file_count_rust = (integer) If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will be printed during working directory updates if this many files will be diff -r bd5b2b29b82d -r e3df1f560d9a hgext/git/__init__.py --- a/hgext/git/__init__.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/git/__init__.py Fri Sep 18 10:48:43 2020 -0400 @@ -297,6 +297,10 @@ def commit(self, *args, **kwargs): ret = orig.commit(self, *args, **kwargs) + if ret is None: + # there was nothing to commit, so we should skip + # the index fixup logic we'd otherwise do. + return None tid = self.store.git[gitutil.togitnode(ret)].tree.id # DANGER! This will flush any writes staged to the # index in Git, but we're sidestepping the index in a diff -r bd5b2b29b82d -r e3df1f560d9a hgext/git/dirstate.py --- a/hgext/git/dirstate.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/git/dirstate.py Fri Sep 18 10:48:43 2020 -0400 @@ -129,6 +129,7 @@ return False def status(self, match, subrepos, ignored, clean, unknown): + listclean = clean # TODO handling of clean files - can we get that from git.status()? modified, added, removed, deleted, unknown, ignored, clean = ( [], @@ -142,6 +143,8 @@ gstatus = self.git.status() for path, status in gstatus.items(): path = pycompat.fsencode(path) + if not match(path): + continue if status == pygit2.GIT_STATUS_IGNORED: if path.endswith(b'/'): continue @@ -166,6 +169,22 @@ b'unhandled case: status for %r is %r' % (path, status) ) + if listclean: + observed = set( + modified + added + removed + deleted + unknown + ignored + ) + index = self.git.index + index.read() + for entry in index: + path = pycompat.fsencode(entry.path) + if not match(path): + continue + if path in observed: + continue # already in some other set + if path[-1] == b'/': + continue # directory + clean.append(path) + # TODO are we really always sure of status here? return ( False, @@ -276,13 +295,24 @@ pass def add(self, f): - self.git.index.add(pycompat.fsdecode(f)) + index = self.git.index + index.read() + index.add(pycompat.fsdecode(f)) + index.write() def drop(self, f): - self.git.index.remove(pycompat.fsdecode(f)) + index = self.git.index + index.read() + fs = pycompat.fsdecode(f) + if fs in index: + index.remove(fs) + index.write() def remove(self, f): - self.git.index.remove(pycompat.fsdecode(f)) + index = self.git.index + index.read() + index.remove(pycompat.fsdecode(f)) + index.write() def copied(self, path): # TODO: track copies? diff -r bd5b2b29b82d -r e3df1f560d9a hgext/git/gitlog.py --- a/hgext/git/gitlog.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/git/gitlog.py Fri Sep 18 10:48:43 2020 -0400 @@ -96,6 +96,10 @@ # TODO: an interface for the changelog type? class changelog(baselog): + # TODO: this appears to be an enumerated type, and should probably + # be part of the public changelog interface + _copiesstorage = b'extra' + def __contains__(self, rev): try: self.node(rev) @@ -386,7 +390,7 @@ encoding.unifromlocal(stringutil.person(user)), encoding.unifromlocal(stringutil.email(user)), timestamp, - -(tz // 60), + -int(tz // 60), ) oid = self.gitrepo.create_commit( None, sig, sig, desc, gitutil.togitnode(manifest), parents diff -r bd5b2b29b82d -r e3df1f560d9a hgext/git/manifest.py --- a/hgext/git/manifest.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/git/manifest.py Fri Sep 18 10:48:43 2020 -0400 @@ -217,7 +217,9 @@ return b'' def copy(self): - pass + return gittreemanifest( + self._git_repo, self._tree, dict(self._pending_changes) + ) def items(self): for f in self: diff -r bd5b2b29b82d -r e3df1f560d9a hgext/histedit.py --- a/hgext/histedit.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/histedit.py Fri Sep 18 10:48:43 2020 -0400 @@ -635,12 +635,11 @@ def applychanges(ui, repo, ctx, opts): """Merge changeset from ctx (only) in the current working directory""" - wcpar = repo.dirstate.p1() - if ctx.p1().node() == wcpar: + if ctx.p1().node() == repo.dirstate.p1(): # edits are "in place" we do not need to make any merge, # just applies changes on parent for editing ui.pushbuffer() - cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True) + cmdutil.revert(ui, repo, ctx, all=True) stats = mergemod.updateresult(0, 0, 0, 0) ui.popbuffer() else: @@ -1152,7 +1151,7 @@ h, ) - @property + @util.propertycache def desc(self): summary = ( cmdutil.rendertemplate( diff -r bd5b2b29b82d -r e3df1f560d9a hgext/hooklib/changeset_obsoleted.py --- a/hgext/hooklib/changeset_obsoleted.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/hooklib/changeset_obsoleted.py Fri Sep 18 10:48:43 2020 -0400 @@ -13,7 +13,7 @@ messageidseed = myseed [hooks] - pretxnclose.changeset_obsoleted = \ + txnclose.changeset_obsoleted = \ python:hgext.hooklib.changeset_obsoleted.hook """ @@ -26,6 +26,7 @@ from mercurial import ( encoding, error, + formatter, logcmdutil, mail, obsutil, @@ -62,7 +63,7 @@ b'notify_obsoleted', b'messageidseed' ) or ui.config(b'notify', b'messageidseed') template = ui.config(b'notify_obsoleted', b'template') - spec = logcmdutil.templatespec(template, None) + spec = formatter.literal_templatespec(template) templater = logcmdutil.changesettemplater(ui, repo, spec) ui.pushbuffer() n = notify.notifier(ui, repo, b'incoming') diff -r bd5b2b29b82d -r e3df1f560d9a hgext/hooklib/changeset_published.py --- a/hgext/hooklib/changeset_published.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/hooklib/changeset_published.py Fri Sep 18 10:48:43 2020 -0400 @@ -26,6 +26,7 @@ from mercurial import ( encoding, error, + formatter, logcmdutil, mail, pycompat, @@ -61,7 +62,7 @@ b'notify_published', b'messageidseed' ) or ui.config(b'notify', b'messageidseed') template = ui.config(b'notify_published', b'template') - spec = logcmdutil.templatespec(template, None) + spec = formatter.literal_templatespec(template) templater = logcmdutil.changesettemplater(ui, repo, spec) ui.pushbuffer() n = notify.notifier(ui, repo, b'incoming') diff -r bd5b2b29b82d -r e3df1f560d9a hgext/largefiles/lfcommands.py --- a/hgext/largefiles/lfcommands.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/largefiles/lfcommands.py Fri Sep 18 10:48:43 2020 -0400 @@ -485,19 +485,14 @@ return ([], []) -def downloadlfiles(ui, repo, rev=None): - match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {}) - - def prepare(ctx, fns): - pass - +def downloadlfiles(ui, repo): + tonode = repo.changelog.node totalsuccess = 0 totalmissing = 0 - if rev != []: # walkchangerevs on empty list would return all revs - for ctx in cmdutil.walkchangerevs(repo, match, {b'rev': rev}, prepare): - success, missing = cachelfiles(ui, repo, ctx.node()) - totalsuccess += len(success) - totalmissing += len(missing) + for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname): + success, missing = cachelfiles(ui, repo, tonode(rev)) + totalsuccess += len(success) + totalmissing += len(missing) ui.status(_(b"%d additional largefiles cached\n") % totalsuccess) if totalmissing > 0: ui.status(_(b"%d largefiles failed to download\n") % totalmissing) diff -r bd5b2b29b82d -r e3df1f560d9a hgext/largefiles/overrides.py --- a/hgext/largefiles/overrides.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/largefiles/overrides.py Fri Sep 18 10:48:43 2020 -0400 @@ -52,6 +52,8 @@ lfstatus = lfutil.lfstatus +MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr' + # -- Utility functions: commonly/repeatedly needed functionality --------------- @@ -543,16 +545,16 @@ origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs ): overwrite = force and not branchmerge - actions, diverge, renamedelete = origfn( + mresult = origfn( repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs ) if overwrite: - return actions, diverge, renamedelete + return mresult # Convert to dictionary with filename as key and action as value. lfiles = set() - for f in actions: + for f in mresult.files(): splitstandin = lfutil.splitstandin(f) if splitstandin is not None and splitstandin in p1: lfiles.add(splitstandin) @@ -561,8 +563,8 @@ for lfile in sorted(lfiles): standin = lfutil.standin(lfile) - (lm, largs, lmsg) = actions.get(lfile, (None, None, None)) - (sm, sargs, smsg) = actions.get(standin, (None, None, None)) + (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None)) + (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None)) if sm in (b'g', b'dc') and lm != b'r': if sm == b'dc': f1, f2, fa, move, anc = sargs @@ -578,14 +580,18 @@ % lfile ) if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile - actions[lfile] = (b'r', None, b'replaced by standin') - actions[standin] = (b'g', sargs, b'replaces standin') + mresult.addfile(lfile, b'r', None, b'replaced by standin') + mresult.addfile(standin, b'g', sargs, b'replaces standin') else: # keep local normal file - actions[lfile] = (b'k', None, b'replaces standin') + mresult.addfile(lfile, b'k', None, b'replaces standin') if branchmerge: - actions[standin] = (b'k', None, b'replaced by non-standin') + mresult.addfile( + standin, b'k', None, b'replaced by non-standin', + ) else: - actions[standin] = (b'r', None, b'replaced by non-standin') + mresult.addfile( + standin, b'r', None, b'replaced by non-standin', + ) elif lm in (b'g', b'dc') and sm != b'r': if lm == b'dc': f1, f2, fa, move, anc = largs @@ -603,31 +609,36 @@ if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile if branchmerge: # largefile can be restored from standin safely - actions[lfile] = (b'k', None, b'replaced by standin') - actions[standin] = (b'k', None, b'replaces standin') + mresult.addfile( + lfile, b'k', None, b'replaced by standin', + ) + mresult.addfile(standin, b'k', None, b'replaces standin') else: # "lfile" should be marked as "removed" without # removal of itself - actions[lfile] = ( - b'lfmr', + mresult.addfile( + lfile, + MERGE_ACTION_LARGEFILE_MARK_REMOVED, None, b'forget non-standin largefile', ) # linear-merge should treat this largefile as 're-added' - actions[standin] = (b'a', None, b'keep standin') + mresult.addfile(standin, b'a', None, b'keep standin') else: # pick remote normal file - actions[lfile] = (b'g', largs, b'replaces standin') - actions[standin] = (b'r', None, b'replaced by non-standin') + mresult.addfile(lfile, b'g', largs, b'replaces standin') + mresult.addfile( + standin, b'r', None, b'replaced by non-standin', + ) - return actions, diverge, renamedelete + return mresult @eh.wrapfunction(mergestatemod, b'recordupdates') def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata): - if b'lfmr' in actions: + if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions: lfdirstate = lfutil.openlfdirstate(repo.ui, repo) - for lfile, args, msg in actions[b'lfmr']: + for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]: # this should be executed before 'orig', to execute 'remove' # before all other actions repo.dirstate.remove(lfile) @@ -863,7 +874,7 @@ # the matcher to hit standins instead of largefiles. Based on the # resulting standins update the largefiles. @eh.wrapfunction(cmdutil, b'revert') -def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts): +def overriderevert(orig, ui, repo, ctx, *pats, **opts): # Because we put the standins in a bad state (by updating them) # and then return them to a correct state we need to lock to # prevent others from changing them in their incorrect state. @@ -926,7 +937,7 @@ return m with extensions.wrappedfunction(scmutil, b'match', overridematch): - orig(ui, repo, ctx, parents, *pats, **opts) + orig(ui, repo, ctx, *pats, **opts) newstandins = lfutil.getstandinsstate(repo) filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) @@ -1083,7 +1094,7 @@ # truncated at that point. The user may expect a download count with # this option, so attempt whether or not this is a largefile repo. if opts.get(b'all_largefiles'): - success, missing = lfcommands.downloadlfiles(ui, repo, None) + success, missing = lfcommands.downloadlfiles(ui, repo) if missing != 0: return None diff -r bd5b2b29b82d -r e3df1f560d9a hgext/mq.py --- a/hgext/mq.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/mq.py Fri Sep 18 10:48:43 2020 -0400 @@ -1717,11 +1717,7 @@ except: # re-raises self.ui.warn(_(b'cleaning up working directory...\n')) cmdutil.revert( - self.ui, - repo, - repo[b'.'], - repo.dirstate.parents(), - no_backup=True, + self.ui, repo, repo[b'.'], no_backup=True, ) # only remove unknown files that we know we touched or # created while patching diff -r bd5b2b29b82d -r e3df1f560d9a hgext/narrow/__init__.py --- a/hgext/narrow/__init__.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/narrow/__init__.py Fri Sep 18 10:48:43 2020 -0400 @@ -11,9 +11,9 @@ from mercurial import ( localrepo, registrar, + requirements, ) -from mercurial.interfaces import repository from . import ( narrowbundle2, @@ -52,7 +52,7 @@ def featuresetup(ui, features): - features.add(repository.NARROW_REQUIREMENT) + features.add(requirements.NARROW_REQUIREMENT) def uisetup(ui): @@ -69,7 +69,7 @@ return repo.ui.setconfig(b'experimental', b'narrow', True, b'narrow-ext') - if repository.NARROW_REQUIREMENT in repo.requirements: + if requirements.NARROW_REQUIREMENT in repo.requirements: narrowrepo.wraprepo(repo) narrowwirepeer.reposetup(repo) diff -r bd5b2b29b82d -r e3df1f560d9a hgext/narrow/narrowbundle2.py --- a/hgext/narrow/narrowbundle2.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/narrow/narrowbundle2.py Fri Sep 18 10:48:43 2020 -0400 @@ -20,11 +20,11 @@ localrepo, narrowspec, repair, + requirements, scmutil, util, wireprototypes, ) -from mercurial.interfaces import repository from mercurial.utils import stringutil _NARROWACL_SECTION = b'narrowacl' @@ -108,7 +108,7 @@ part = bundler.newpart(b'changegroup', data=cgdata) part.addparam(b'version', version) - if b'treemanifest' in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: part.addparam(b'treemanifest', b'1') @@ -163,7 +163,7 @@ part = bundler.newpart(b'changegroup', data=cgdata) part.addparam(b'version', version) - if b'treemanifest' in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: part.addparam(b'treemanifest', b'1') @@ -178,8 +178,8 @@ narrowspec.validatepatterns(includepats) narrowspec.validatepatterns(excludepats) - if not repository.NARROW_REQUIREMENT in op.repo.requirements: - op.repo.requirements.add(repository.NARROW_REQUIREMENT) + if not requirements.NARROW_REQUIREMENT in op.repo.requirements: + op.repo.requirements.add(requirements.NARROW_REQUIREMENT) scmutil.writereporequirements(op.repo) op.repo.setnarrowpats(includepats, excludepats) narrowspec.copytoworkingcopy(op.repo) @@ -194,8 +194,8 @@ narrowspec.validatepatterns(includepats) narrowspec.validatepatterns(excludepats) - if repository.NARROW_REQUIREMENT not in op.repo.requirements: - op.repo.requirements.add(repository.NARROW_REQUIREMENT) + if requirements.NARROW_REQUIREMENT not in op.repo.requirements: + op.repo.requirements.add(requirements.NARROW_REQUIREMENT) scmutil.writereporequirements(op.repo) op.repo.setnarrowpats(includepats, excludepats) narrowspec.copytoworkingcopy(op.repo) diff -r bd5b2b29b82d -r e3df1f560d9a hgext/narrow/narrowcommands.py --- a/hgext/narrow/narrowcommands.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/narrow/narrowcommands.py Fri Sep 18 10:48:43 2020 -0400 @@ -27,11 +27,11 @@ registrar, repair, repoview, + requirements, sparse, util, wireprototypes, ) -from mercurial.interfaces import repository table = {} command = registrar.command(table) @@ -133,7 +133,7 @@ def pullnarrowcmd(orig, ui, repo, *args, **opts): """Wraps pull command to allow modifying narrow spec.""" wrappedextraprepare = util.nullcontextmanager() - if repository.NARROW_REQUIREMENT in repo.requirements: + if requirements.NARROW_REQUIREMENT in repo.requirements: def pullbundle2extraprepare_widen(orig, pullop, kwargs): orig(pullop, kwargs) @@ -150,7 +150,7 @@ def archivenarrowcmd(orig, ui, repo, *args, **opts): """Wraps archive command to narrow the default includes.""" - if repository.NARROW_REQUIREMENT in repo.requirements: + if requirements.NARROW_REQUIREMENT in repo.requirements: repo_includes, repo_excludes = repo.narrowpats includes = set(opts.get('include', [])) excludes = set(opts.get('exclude', [])) @@ -166,7 +166,7 @@ def pullbundle2extraprepare(orig, pullop, kwargs): repo = pullop.repo - if repository.NARROW_REQUIREMENT not in repo.requirements: + if requirements.NARROW_REQUIREMENT not in repo.requirements: return orig(pullop, kwargs) if wireprototypes.NARROWCAP not in pullop.remote.capabilities(): @@ -482,7 +482,7 @@ exclude switches, the changes are applied immediately. """ opts = pycompat.byteskwargs(opts) - if repository.NARROW_REQUIREMENT not in repo.requirements: + if requirements.NARROW_REQUIREMENT not in repo.requirements: raise error.Abort( _( b'the tracked command is only supported on ' diff -r bd5b2b29b82d -r e3df1f560d9a hgext/patchbomb.py --- a/hgext/patchbomb.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/patchbomb.py Fri Sep 18 10:48:43 2020 -0400 @@ -207,7 +207,7 @@ if not tmpl: return b' '.join(flags) out = util.stringio() - spec = formatter.templatespec(b'', templater.unquotestring(tmpl), None) + spec = formatter.literal_templatespec(templater.unquotestring(tmpl)) with formatter.templateformatter(ui, out, b'patchbombflag', {}, spec) as fm: fm.startitem() fm.context(ctx=repo[rev]) diff -r bd5b2b29b82d -r e3df1f560d9a hgext/phabricator.py --- a/hgext/phabricator.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/phabricator.py Fri Sep 18 10:48:43 2020 -0400 @@ -76,6 +76,7 @@ patch, phases, pycompat, + rewriteutil, scmutil, smartset, tags, @@ -166,7 +167,7 @@ @eh.wrapfunction(localrepo, "loadhgrc") -def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements): +def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts): """Load ``.arcconfig`` content into a ui instance on repository open. """ result = False @@ -200,7 +201,9 @@ if cfg: ui.applyconfig(cfg, source=wdirvfs.join(b".arcconfig")) - return orig(ui, wdirvfs, hgvfs, requirements) or result # Load .hg/hgrc + return ( + orig(ui, wdirvfs, hgvfs, requirements, *args, **opts) or result + ) # Load .hg/hgrc def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False): @@ -238,8 +241,9 @@ def decorate(fn): def inner(*args, **kwargs): - if kwargs.get('test_vcr'): - cassette = pycompat.fsdecode(kwargs.pop('test_vcr')) + vcr = kwargs.pop('test_vcr') + if vcr: + cassette = pycompat.fsdecode(vcr) import hgdemandimport with hgdemandimport.deactivated(): @@ -1510,6 +1514,9 @@ mapping.get(old.p1().node(), (old.p1(),))[0], mapping.get(old.p2().node(), (old.p2(),))[0], ] + newdesc = rewriteutil.update_hash_refs( + repo, newdesc, mapping, + ) new = context.metadataonlyctx( repo, old, @@ -1587,7 +1594,9 @@ repo, old, parents=parents, - text=old.description(), + text=rewriteutil.update_hash_refs( + repo, old.description(), mapping + ), user=old.user(), date=old.date(), extra=old.extra(), diff -r bd5b2b29b82d -r e3df1f560d9a hgext/remotefilelog/__init__.py --- a/hgext/remotefilelog/__init__.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/remotefilelog/__init__.py Fri Sep 18 10:48:43 2020 -0400 @@ -150,6 +150,7 @@ localrepo, match as matchmod, merge, + mergestate as mergestatemod, node as nodemod, patch, pycompat, @@ -361,7 +362,10 @@ self.unfiltered().__class__, ) self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) - scmutil.writereporequirements(self) + with self.lock(): + # acquire store lock before writing requirements as some + # requirements might be written to .hg/store/requires + scmutil.writereporequirements(self) # Since setupclient hadn't been called, exchange.pull was not # wrapped. So we need to manually invoke our version of it. @@ -479,36 +483,38 @@ # prefetch files before update def applyupdates( - orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None + orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts ): if isenabled(repo): manifest = mctx.manifest() files = [] - for f, args, msg in actions[b'g']: + for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]): files.append((f, hex(manifest[f]))) # batch fetch the needed files from the server repo.fileservice.prefetch(files) - return orig( - repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels - ) + return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts) # Prefetch merge checkunknownfiles -def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs): +def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs): if isenabled(repo): files = [] sparsematch = repo.maybesparsematch(mctx.rev()) - for f, (m, actionargs, msg) in pycompat.iteritems(actions): + for f, (m, actionargs, msg) in mresult.filemap(): if sparsematch and not sparsematch(f): continue - if m in (b'c', b'dc', b'cm'): + if m in ( + mergestatemod.ACTION_CREATED, + mergestatemod.ACTION_DELETED_CHANGED, + mergestatemod.ACTION_CREATED_MERGE, + ): files.append((f, hex(mctx.filenode(f)))) - elif m == b'dg': + elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET: f2 = actionargs[0] files.append((f2, hex(mctx.filenode(f2)))) # batch fetch the needed files from the server repo.fileservice.prefetch(files) - return orig(repo, wctx, mctx, force, actions, *args, **kwargs) + return orig(repo, wctx, mctx, force, mresult, *args, **kwargs) # Prefetch files before status attempts to look at their size and contents diff -r bd5b2b29b82d -r e3df1f560d9a hgext/remotefilelog/remotefilelogserver.py --- a/hgext/remotefilelog/remotefilelogserver.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/remotefilelog/remotefilelogserver.py Fri Sep 18 10:48:43 2020 -0400 @@ -23,6 +23,7 @@ extensions, match, pycompat, + requirements, store, streamclone, util, @@ -169,7 +170,7 @@ if kind == stat.S_IFDIR: visit.append(fp) - if b'treemanifest' in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: for (u, e, s) in repo.store.datafiles(): if u.startswith(b'meta/') and ( u.endswith(b'.i') or u.endswith(b'.d') diff -r bd5b2b29b82d -r e3df1f560d9a hgext/sqlitestore.py --- a/hgext/sqlitestore.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/sqlitestore.py Fri Sep 18 10:48:43 2020 -0400 @@ -67,6 +67,7 @@ mdiff, pycompat, registrar, + requirements, util, verify, ) @@ -1151,7 +1152,7 @@ supported.add(REQUIREMENT_ZLIB) supported.add(REQUIREMENT_NONE) supported.add(REQUIREMENT_SHALLOW_FILES) - supported.add(repository.NARROW_REQUIREMENT) + supported.add(requirements.NARROW_REQUIREMENT) def newreporequirements(orig, ui, createopts): diff -r bd5b2b29b82d -r e3df1f560d9a hgext/strip.py --- a/hgext/strip.py Sun Sep 13 15:59:23 2020 +0900 +++ b/hgext/strip.py Fri Sep 18 10:48:43 2020 -0400 @@ -269,7 +269,7 @@ repo.dirstate.write(repo.currenttransaction()) # clear resolve state - mergestatemod.mergestate.clean(repo, repo[b'.'].node()) + mergestatemod.mergestate.clean(repo) update = False diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/bundle2.py --- a/mercurial/bundle2.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/bundle2.py Fri Sep 18 10:48:43 2020 -0400 @@ -166,6 +166,7 @@ phases, pushkey, pycompat, + requirements, scmutil, streamclone, tags, @@ -1965,7 +1966,7 @@ nbchangesets = int(inpart.params.get(b'nbchanges')) if ( b'treemanifest' in inpart.params - and b'treemanifest' not in op.repo.requirements + and requirements.TREEMANIFEST_REQUIREMENT not in op.repo.requirements ): if len(op.repo.changelog) != 0: raise error.Abort( @@ -1974,7 +1975,7 @@ b"non-empty and does not use tree manifests" ) ) - op.repo.requirements.add(b'treemanifest') + op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT) op.repo.svfs.options = localrepo.resolvestorevfsoptions( op.repo.ui, op.repo.requirements, op.repo.features ) @@ -2576,7 +2577,7 @@ part = bundler.newpart(b'changegroup', data=cgdata) part.addparam(b'version', cgversion) - if b'treemanifest' in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: part.addparam(b'treemanifest', b'1') if b'exp-sidedata-flag' in repo.requirements: part.addparam(b'exp-sidedata', b'1') diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/changegroup.py --- a/mercurial/changegroup.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/changegroup.py Fri Sep 18 10:48:43 2020 -0400 @@ -26,6 +26,7 @@ mdiff, phases, pycompat, + requirements, util, ) @@ -949,7 +950,7 @@ # either, because we don't discover which directory nodes to # send along with files. This could probably be fixed. fastpathlinkrev = fastpathlinkrev and ( - b'treemanifest' not in repo.requirements + requirements.TREEMANIFEST_REQUIREMENT not in repo.requirements ) fnodes = {} # needed file nodes @@ -1467,7 +1468,7 @@ if ( repo.ui.configbool(b'experimental', b'changegroup3') or repo.ui.configbool(b'experimental', b'treemanifest') - or b'treemanifest' in repo.requirements + or requirements.TREEMANIFEST_REQUIREMENT in repo.requirements ): # we keep version 03 because we need to to exchange treemanifest data # @@ -1495,7 +1496,7 @@ # Changegroup versions that can be created from the repo def supportedoutgoingversions(repo): versions = allsupportedversions(repo) - if b'treemanifest' in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: # Versions 01 and 02 support only flat manifests and it's just too # expensive to convert between the flat manifest and tree manifest on # the fly. Since tree manifests are hashed differently, all of history @@ -1503,7 +1504,7 @@ # support versions 01 and 02. versions.discard(b'01') versions.discard(b'02') - if repository.NARROW_REQUIREMENT in repo.requirements: + if requirements.NARROW_REQUIREMENT in repo.requirements: # Versions 01 and 02 don't support revlog flags, and we need to # support that for stripping and unbundling to work. versions.discard(b'01') diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/changelog.py --- a/mercurial/changelog.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/changelog.py Fri Sep 18 10:48:43 2020 -0400 @@ -524,10 +524,6 @@ user, date=None, extra=None, - p1copies=None, - p2copies=None, - filesadded=None, - filesremoved=None, ): # Convert to UTF-8 encoded bytestrings as the very first # thing: calling any method on a localstr object will turn it @@ -559,48 +555,10 @@ raise error.StorageError( _(b'the name \'%s\' is reserved') % branch ) - sortedfiles = sorted(files) + sortedfiles = sorted(files.touched) sidedata = None - if extra is not None: - for name in ( - b'p1copies', - b'p2copies', - b'filesadded', - b'filesremoved', - ): - extra.pop(name, None) - if p1copies is not None: - p1copies = metadata.encodecopies(sortedfiles, p1copies) - if p2copies is not None: - p2copies = metadata.encodecopies(sortedfiles, p2copies) - if filesadded is not None: - filesadded = metadata.encodefileindices(sortedfiles, filesadded) - if filesremoved is not None: - filesremoved = metadata.encodefileindices(sortedfiles, filesremoved) - if self._copiesstorage == b'extra': - extrasentries = p1copies, p2copies, filesadded, filesremoved - if extra is None and any(x is not None for x in extrasentries): - extra = {} - if p1copies is not None: - extra[b'p1copies'] = p1copies - if p2copies is not None: - extra[b'p2copies'] = p2copies - if filesadded is not None: - extra[b'filesadded'] = filesadded - if filesremoved is not None: - extra[b'filesremoved'] = filesremoved - elif self._copiesstorage == b'changeset-sidedata': - sidedata = {} - if p1copies: - sidedata[sidedatamod.SD_P1COPIES] = p1copies - if p2copies: - sidedata[sidedatamod.SD_P2COPIES] = p2copies - if filesadded: - sidedata[sidedatamod.SD_FILESADDED] = filesadded - if filesremoved: - sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved - if not sidedata: - sidedata = None + if self._copiesstorage == b'changeset-sidedata': + sidedata = metadata.encode_copies_sidedata(files) if extra: extra = encodeextra(extra) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/cmdutil.py --- a/mercurial/cmdutil.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/cmdutil.py Fri Sep 18 10:48:43 2020 -0400 @@ -46,6 +46,7 @@ phases, pycompat, repair, + requirements, revlog, rewriteutil, scmutil, @@ -1358,7 +1359,7 @@ if cl: r = repo.unfiltered().changelog elif dir: - if b'treemanifest' not in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT not in repo.requirements: raise error.Abort( _( b"--dir can only be used on repos with " @@ -2229,26 +2230,17 @@ def finddate(ui, repo, date): """Find the tipmost changeset that matches the given date spec""" - - df = dateutil.matchdate(date) - m = scmutil.matchall(repo) - results = {} - - def prep(ctx, fns): - d = ctx.date() - if df(d[0]): - results[ctx.rev()] = d - - for ctx in walkchangerevs(repo, m, {b'rev': None}, prep): - rev = ctx.rev() - if rev in results: - ui.status( - _(b"found revision %d from %s\n") - % (rev, dateutil.datestr(results[rev])) - ) - return b'%d' % rev - - raise error.Abort(_(b"revision matching date not found")) + mrevs = repo.revs(b'date(%s)', date) + try: + rev = mrevs.max() + except ValueError: + raise error.Abort(_(b"revision matching date not found")) + + ui.status( + _(b"found revision %d from %s\n") + % (rev, dateutil.datestr(repo[rev].date())) + ) + return b'%d' % rev def increasingwindows(windowsize=8, sizelimit=512): @@ -2262,8 +2254,12 @@ # Default --rev value depends on --follow but --follow behavior # depends on revisions resolved from --rev... follow = opts.get(b'follow') or opts.get(b'follow_first') - if opts.get(b'rev'): - revs = scmutil.revrange(repo, opts[b'rev']) + revspec = opts.get(b'rev') + if follow and revspec: + revs = scmutil.revrange(repo, revspec) + revs = repo.revs(b'reverse(::%ld)', revs) + elif revspec: + revs = scmutil.revrange(repo, revspec) elif follow and repo.dirstate.p1() == nullid: revs = smartset.baseset() elif follow: @@ -2394,12 +2390,20 @@ def match(self, rev): def realparents(rev): - if self.onlyfirst: - return self.repo.changelog.parentrevs(rev)[0:1] - else: - return filter( - lambda x: x != nullrev, self.repo.changelog.parentrevs(rev) - ) + try: + if self.onlyfirst: + return self.repo.changelog.parentrevs(rev)[0:1] + else: + return filter( + lambda x: x != nullrev, + self.repo.changelog.parentrevs(rev), + ) + except error.WdirUnsupported: + prevs = [p.rev() for p in self.repo[rev].parents()] + if self.onlyfirst: + return prevs[:1] + else: + return prevs if self.startrev == nullrev: self.startrev = rev @@ -3258,6 +3262,7 @@ if opts.get(b'secret'): commitphase = phases.secret newid = repo.commitctx(new) + ms.reset() # Reroute the working copy parent to the new changeset repo.setparents(newid, nullid) @@ -3375,7 +3380,7 @@ def buildcommittemplate(repo, ctx, subs, extramsg, ref): ui = repo.ui - spec = formatter.templatespec(ref, None, None) + spec = formatter.reference_templatespec(ref) t = logcmdutil.changesettemplater(ui, repo, spec) t.t.cache.update( (k, templater.unquotestring(v)) @@ -3492,9 +3497,9 @@ return repo.status(match=scmutil.match(repo[None], pats, opts)) -def revert(ui, repo, ctx, parents, *pats, **opts): +def revert(ui, repo, ctx, *pats, **opts): opts = pycompat.byteskwargs(opts) - parent, p2 = parents + parent, p2 = repo.dirstate.parents() node = ctx.node() mf = ctx.manifest() @@ -3780,7 +3785,6 @@ match = scmutil.match(repo[None], pats) _performrevert( repo, - parents, ctx, names, uipathfn, @@ -3806,7 +3810,6 @@ def _performrevert( repo, - parents, ctx, names, uipathfn, @@ -3822,7 +3825,7 @@ Make sure you have the working directory locked when calling this function. """ - parent, p2 = parents + parent, p2 = repo.dirstate.parents() node = ctx.node() excluded_files = [] diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/commands.py --- a/mercurial/commands.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/commands.py Fri Sep 18 10:48:43 2020 -0400 @@ -55,6 +55,7 @@ pycompat, rcutil, registrar, + requirements, revsetlang, rewriteutil, scmutil, @@ -66,6 +67,7 @@ ui as uimod, util, verify as verifymod, + vfs as vfsmod, wireprotoserver, ) from .utils import ( @@ -837,7 +839,7 @@ else: hg.clean(repo, node, show_stats=False) repo.dirstate.setbranch(branch) - cmdutil.revert(ui, repo, rctx, repo.dirstate.parents()) + cmdutil.revert(ui, repo, rctx) if opts.get(b'no_commit'): msg = _(b"changeset %s backed out, don't forget to commit.\n") @@ -2141,6 +2143,12 @@ (b'u', b'untrusted', None, _(b'show untrusted configuration options')), (b'e', b'edit', None, _(b'edit user config')), (b'l', b'local', None, _(b'edit repository config')), + ( + b'', + b'shared', + None, + _(b'edit shared source repository config (EXPERIMENTAL)'), + ), (b'g', b'global', None, _(b'edit global config')), ] + formatteropts, @@ -2179,22 +2187,37 @@ :source: String. Filename and line number where the item is defined. :value: String. Config value. + The --shared flag can be used to edit the config file of shared source + repository. It only works when you have shared using the experimental + share safe feature. + Returns 0 on success, 1 if NAME does not exist. """ opts = pycompat.byteskwargs(opts) - editopts = (b'edit', b'local', b'global') + editopts = (b'edit', b'local', b'global', b'shared') if any(opts.get(o) for o in editopts): - if opts.get(b'local') and opts.get(b'global'): - raise error.Abort(_(b"can't use --local and --global together")) - + cmdutil.check_at_most_one_arg(opts, *editopts[1:]) if opts.get(b'local'): if not repo: raise error.Abort(_(b"can't use --local outside a repository")) paths = [repo.vfs.join(b'hgrc')] elif opts.get(b'global'): paths = rcutil.systemrcpath() + elif opts.get(b'shared'): + if not repo.shared(): + raise error.Abort( + _(b"repository is not shared; can't use --shared") + ) + if requirements.SHARESAFE_REQUIREMENT not in repo.requirements: + raise error.Abort( + _( + b"share safe feature not unabled; " + b"unable to edit shared source repository config" + ) + ) + paths = [vfsmod.vfs(repo.sharedpath).join(b'hgrc')] else: paths = rcutil.userrcpath() @@ -3252,7 +3275,7 @@ b'grep', [ (b'0', b'print0', None, _(b'end fields with NUL')), - (b'', b'all', None, _(b'print all revisions that match (DEPRECATED) ')), + (b'', b'all', None, _(b'an alias to --diff (DEPRECATED)')), ( b'', b'diff', @@ -3357,7 +3380,11 @@ raise error.Abort(_(b'--diff and --all-files are mutually exclusive')) if opts.get(b'all_files') is None and not diff: opts[b'all_files'] = True - plaingrep = opts.get(b'all_files') and not opts.get(b'rev') + plaingrep = ( + opts.get(b'all_files') + and not opts.get(b'rev') + and not opts.get(b'follow') + ) all_files = opts.get(b'all_files') if plaingrep: opts[b'rev'] = [b'wdir()'] @@ -3400,7 +3427,7 @@ self.colend = colend def __hash__(self): - return hash((self.linenum, self.line)) + return hash(self.line) def __eq__(self, other): return self.line == other.line @@ -5781,6 +5808,13 @@ [ (b'A', b'after', None, _(b'record a rename that has already occurred')), ( + b'', + b'at-rev', + b'', + _(b'(un)mark renames in the given revision (EXPERIMENTAL)'), + _(b'REV'), + ), + ( b'f', b'force', None, @@ -5962,8 +5996,6 @@ if not m(f): continue - if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER: - continue label, key = mergestateinfo[ms[f]] fm.startitem() fm.context(ctx=wctx) @@ -6011,9 +6043,6 @@ didwork = True - if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER: - continue - # don't let driver-resolved files be marked, and run the conclude # step if asked to resolve if ms[f] == mergestatemod.MERGE_RECORD_DRIVER_RESOLVED: @@ -6294,9 +6323,7 @@ hint = _(b"use --all to revert all files") raise error.Abort(msg, hint=hint) - return cmdutil.revert( - ui, repo, ctx, (parent, p2), *pats, **pycompat.strkwargs(opts) - ) + return cmdutil.revert(ui, repo, ctx, *pats, **pycompat.strkwargs(opts)) @command( diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/commit.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/commit.py Fri Sep 18 10:48:43 2020 -0400 @@ -0,0 +1,442 @@ +# commit.py - fonction to perform commit +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import errno + +from .i18n import _ +from .node import ( + hex, + nullid, + nullrev, +) + +from . import ( + context, + mergestate, + metadata, + phases, + scmutil, + subrepoutil, +) + + +def _write_copy_meta(repo): + """return a (changelog, filelog) boolean tuple + + changelog: copy related information should be stored in the changeset + filelof: copy related information should be written in the file revision + """ + if repo.filecopiesmode == b'changeset-sidedata': + writechangesetcopy = True + writefilecopymeta = True + else: + writecopiesto = repo.ui.config(b'experimental', b'copies.write-to') + writefilecopymeta = writecopiesto != b'changeset-only' + writechangesetcopy = writecopiesto in ( + b'changeset-only', + b'compatibility', + ) + return writechangesetcopy, writefilecopymeta + + +def commitctx(repo, ctx, error=False, origctx=None): + """Add a new revision to the target repository. + Revision information is passed via the context argument. + + ctx.files() should list all files involved in this commit, i.e. + modified/added/removed files. On merge, it may be wider than the + ctx.files() to be committed, since any file nodes derived directly + from p1 or p2 are excluded from the committed ctx.files(). + + origctx is for convert to work around the problem that bug + fixes to the files list in changesets change hashes. For + convert to be the identity, it can pass an origctx and this + function will use the same files list when it makes sense to + do so. + """ + repo = repo.unfiltered() + + p1, p2 = ctx.p1(), ctx.p2() + user = ctx.user() + + with repo.lock(), repo.transaction(b"commit") as tr: + mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx) + + extra = ctx.extra().copy() + + if extra is not None: + for name in ( + b'p1copies', + b'p2copies', + b'filesadded', + b'filesremoved', + ): + extra.pop(name, None) + if repo.changelog._copiesstorage == b'extra': + extra = _extra_with_copies(repo, extra, files) + + # update changelog + repo.ui.note(_(b"committing changelog\n")) + repo.changelog.delayupdate(tr) + n = repo.changelog.add( + mn, + files, + ctx.description(), + tr, + p1.node(), + p2.node(), + user, + ctx.date(), + extra, + ) + xp1, xp2 = p1.hex(), p2 and p2.hex() or b'' + repo.hook( + b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2, + ) + # set the new commit is proper phase + targetphase = subrepoutil.newcommitphase(repo.ui, ctx) + if targetphase: + # retract boundary do not alter parent changeset. + # if a parent have higher the resulting phase will + # be compliant anyway + # + # if minimal phase was 0 we don't need to retract anything + phases.registernew(repo, tr, targetphase, [n]) + return n + + +def _prepare_files(tr, ctx, error=False, origctx=None): + repo = ctx.repo() + p1 = ctx.p1() + + writechangesetcopy, writefilecopymeta = _write_copy_meta(repo) + + if ctx.manifestnode(): + # reuse an existing manifest revision + repo.ui.debug(b'reusing known manifest\n') + mn = ctx.manifestnode() + files = metadata.ChangingFiles() + files.update_touched(ctx.files()) + if writechangesetcopy: + files.update_added(ctx.filesadded()) + files.update_removed(ctx.filesremoved()) + elif not ctx.files(): + repo.ui.debug(b'reusing manifest from p1 (no file change)\n') + mn = p1.manifestnode() + files = metadata.ChangingFiles() + else: + mn, files = _process_files(tr, ctx, error=error) + + if origctx and origctx.manifestnode() == mn: + origfiles = origctx.files() + assert files.touched.issubset(origfiles) + files.update_touched(origfiles) + + if writechangesetcopy: + files.update_copies_from_p1(ctx.p1copies()) + files.update_copies_from_p2(ctx.p2copies()) + + return mn, files + + +def _process_files(tr, ctx, error=False): + repo = ctx.repo() + p1 = ctx.p1() + p2 = ctx.p2() + + writechangesetcopy, writefilecopymeta = _write_copy_meta(repo) + + m1ctx = p1.manifestctx() + m2ctx = p2.manifestctx() + mctx = m1ctx.copy() + + m = mctx.read() + m1 = m1ctx.read() + m2 = m2ctx.read() + ms = mergestate.mergestate.read(repo) + + files = metadata.ChangingFiles() + + # check in files + added = [] + removed = list(ctx.removed()) + linkrev = len(repo) + repo.ui.note(_(b"committing files:\n")) + uipathfn = scmutil.getuipathfn(repo) + for f in sorted(ctx.modified() + ctx.added()): + repo.ui.note(uipathfn(f) + b"\n") + try: + fctx = ctx[f] + if fctx is None: + removed.append(f) + else: + added.append(f) + m[f], is_touched = _filecommit( + repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms + ) + if is_touched: + if is_touched == 'added': + files.mark_added(f) + else: + files.mark_touched(f) + m.setflag(f, fctx.flags()) + except OSError: + repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f)) + raise + except IOError as inst: + errcode = getattr(inst, 'errno', errno.ENOENT) + if error or errcode and errcode != errno.ENOENT: + repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f)) + raise + + # update manifest + removed = [f for f in removed if f in m1 or f in m2] + drop = sorted([f for f in removed if f in m]) + for f in drop: + del m[f] + if p2.rev() == nullrev: + files.update_removed(removed) + else: + rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2)) + for f in removed: + if not rf(f): + files.mark_removed(f) + + mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop) + + return mn, files + + +def _filecommit( + repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms, +): + """ + commit an individual file as part of a larger transaction + + input: + + fctx: a file context with the content we are trying to commit + manifest1: manifest of changeset first parent + manifest2: manifest of changeset second parent + linkrev: revision number of the changeset being created + tr: current transation + includecopymeta: boolean, set to False to skip storing the copy data + (only used by the Google specific feature of using + changeset extra as copy source of truth). + ms: mergestate object + + output: (filenode, touched) + + filenode: the filenode that should be used by this changeset + touched: one of: None (mean untouched), 'added' or 'modified' + """ + + fname = fctx.path() + fparent1 = manifest1.get(fname, nullid) + fparent2 = manifest2.get(fname, nullid) + touched = None + if fparent1 == fparent2 == nullid: + touched = 'added' + + if isinstance(fctx, context.filectx): + # This block fast path most comparisons which are usually done. It + # assumes that bare filectx is used and no merge happened, hence no + # need to create a new file revision in this case. + node = fctx.filenode() + if node in [fparent1, fparent2]: + repo.ui.debug(b'reusing %s filelog entry\n' % fname) + if ( + fparent1 != nullid and manifest1.flags(fname) != fctx.flags() + ) or ( + fparent2 != nullid and manifest2.flags(fname) != fctx.flags() + ): + touched = 'modified' + return node, touched + + flog = repo.file(fname) + meta = {} + cfname = fctx.copysource() + fnode = None + + if cfname and cfname != fname: + # Mark the new revision of this file as a copy of another + # file. This copy data will effectively act as a parent + # of this new revision. If this is a merge, the first + # parent will be the nullid (meaning "look up the copy data") + # and the second one will be the other parent. For example: + # + # 0 --- 1 --- 3 rev1 changes file foo + # \ / rev2 renames foo to bar and changes it + # \- 2 -/ rev3 should have bar with all changes and + # should record that bar descends from + # bar in rev2 and foo in rev1 + # + # this allows this merge to succeed: + # + # 0 --- 1 --- 3 rev4 reverts the content change from rev2 + # \ / merging rev3 and rev4 should use bar@rev2 + # \- 2 --- 4 as the merge base + # + + cnode = manifest1.get(cfname) + newfparent = fparent2 + + if manifest2: # branch merge + if fparent2 == nullid or cnode is None: # copied on remote side + if cfname in manifest2: + cnode = manifest2[cfname] + newfparent = fparent1 + + # Here, we used to search backwards through history to try to find + # where the file copy came from if the source of a copy was not in + # the parent directory. However, this doesn't actually make sense to + # do (what does a copy from something not in your working copy even + # mean?) and it causes bugs (eg, issue4476). Instead, we will warn + # the user that copy information was dropped, so if they didn't + # expect this outcome it can be fixed, but this is the correct + # behavior in this circumstance. + + if cnode: + repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))) + if includecopymeta: + meta[b"copy"] = cfname + meta[b"copyrev"] = hex(cnode) + fparent1, fparent2 = nullid, newfparent + else: + repo.ui.warn( + _( + b"warning: can't find ancestor for '%s' " + b"copied from '%s'!\n" + ) + % (fname, cfname) + ) + + elif fparent1 == nullid: + fparent1, fparent2 = fparent2, nullid + elif fparent2 != nullid: + # is one parent an ancestor of the other? + fparentancestors = flog.commonancestorsheads(fparent1, fparent2) + if fparent1 in fparentancestors: + fparent1, fparent2 = fparent2, nullid + elif fparent2 in fparentancestors: + fparent2 = nullid + elif not fparentancestors: + # TODO: this whole if-else might be simplified much more + if ( + ms.active() + and ms.extras(fname).get(b'filenode-source') == b'other' + ): + fparent1, fparent2 = fparent2, nullid + + # is the file changed? + text = fctx.data() + if fparent2 != nullid or meta or flog.cmp(fparent1, text): + if touched is None: # do not overwrite added + touched = 'modified' + fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2) + # are just the flags changed during merge? + elif fname in manifest1 and manifest1.flags(fname) != fctx.flags(): + touched = 'modified' + fnode = fparent1 + else: + fnode = fparent1 + return fnode, touched + + +def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop): + """make a new manifest entry (or reuse a new one) + + given an initialised manifest context and precomputed list of + - files: files affected by the commit + - added: new entries in the manifest + - drop: entries present in parents but absent of this one + + Create a new manifest revision, reuse existing ones if possible. + + Return the nodeid of the manifest revision. + """ + repo = ctx.repo() + + md = None + + # all this is cached, so it is find to get them all from the ctx. + p1 = ctx.p1() + p2 = ctx.p2() + m1ctx = p1.manifestctx() + + m1 = m1ctx.read() + + if not files: + # if no "files" actually changed in terms of the changelog, + # try hard to detect unmodified manifest entry so that the + # exact same commit can be reproduced later on convert. + md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files())) + if not files and md: + repo.ui.debug( + b'not reusing manifest (no file change in ' + b'changelog, but manifest differs)\n' + ) + if files or md: + repo.ui.note(_(b"committing manifest\n")) + # we're using narrowmatch here since it's already applied at + # other stages (such as dirstate.walk), so we're already + # ignoring things outside of narrowspec in most cases. The + # one case where we might have files outside the narrowspec + # at this point is merges, and we already error out in the + # case where the merge has files outside of the narrowspec, + # so this is safe. + mn = mctx.write( + tr, + linkrev, + p1.manifestnode(), + p2.manifestnode(), + added, + drop, + match=repo.narrowmatch(), + ) + else: + repo.ui.debug( + b'reusing manifest from p1 (listed files ' b'actually unchanged)\n' + ) + mn = p1.manifestnode() + + return mn + + +def _extra_with_copies(repo, extra, files): + """encode copy information into a `extra` dictionnary""" + p1copies = files.copied_from_p1 + p2copies = files.copied_from_p2 + filesadded = files.added + filesremoved = files.removed + files = sorted(files.touched) + if not _write_copy_meta(repo)[1]: + # If writing only to changeset extras, use None to indicate that + # no entry should be written. If writing to both, write an empty + # entry to prevent the reader from falling back to reading + # filelogs. + p1copies = p1copies or None + p2copies = p2copies or None + filesadded = filesadded or None + filesremoved = filesremoved or None + + extrasentries = p1copies, p2copies, filesadded, filesremoved + if extra is None and any(x is not None for x in extrasentries): + extra = {} + if p1copies is not None: + p1copies = metadata.encodecopies(files, p1copies) + extra[b'p1copies'] = p1copies + if p2copies is not None: + p2copies = metadata.encodecopies(files, p2copies) + extra[b'p2copies'] = p2copies + if filesadded is not None: + filesadded = metadata.encodefileindices(files, filesadded) + extra[b'filesadded'] = filesadded + if filesremoved is not None: + filesremoved = metadata.encodefileindices(files, filesremoved) + extra[b'filesremoved'] = filesremoved + return extra diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/config.py --- a/mercurial/config.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/config.py Fri Sep 18 10:48:43 2020 -0400 @@ -21,10 +21,9 @@ class config(object): - def __init__(self, data=None, includepaths=None): + def __init__(self, data=None): self._data = {} self._unset = [] - self._includepaths = includepaths or [] if data: for k in data._data: self._data[k] = data[k].copy() @@ -162,21 +161,15 @@ if m and include: expanded = util.expandpath(m.group(1)) - includepaths = [os.path.dirname(src)] + self._includepaths - - for base in includepaths: - inc = os.path.normpath(os.path.join(base, expanded)) - - try: - include(inc, remap=remap, sections=sections) - break - except IOError as inst: - if inst.errno != errno.ENOENT: - raise error.ParseError( - _(b"cannot include %s (%s)") - % (inc, encoding.strtolocal(inst.strerror)), - b"%s:%d" % (src, line), - ) + try: + include(expanded, remap=remap, sections=sections) + except IOError as inst: + if inst.errno != errno.ENOENT: + raise error.ParseError( + _(b"cannot include %s (%s)") + % (expanded, encoding.strtolocal(inst.strerror)), + b"%s:%d" % (src, line), + ) continue if emptyre.match(l): continue @@ -216,8 +209,15 @@ b'config files must be opened in binary mode, got fp=%r mode=%r' % (fp, fp.mode,) ) + + dir = os.path.dirname(path) + + def include(rel, remap, sections): + abs = os.path.normpath(os.path.join(dir, rel)) + self.read(abs, remap=remap, sections=sections) + self.parse( - path, fp.read(), sections=sections, remap=remap, include=self.read + path, fp.read(), sections=sections, remap=remap, include=include ) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/configitems.py --- a/mercurial/configitems.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/configitems.py Fri Sep 18 10:48:43 2020 -0400 @@ -784,6 +784,9 @@ b'format', b'exp-use-side-data', default=False, experimental=True, ) coreconfigitem( + b'format', b'exp-share-safe', default=False, experimental=True, +) +coreconfigitem( b'format', b'internal-phase', default=False, experimental=True, ) coreconfigitem( @@ -793,6 +796,9 @@ b'fsmonitor', b'warn_update_file_count', default=50000, ) coreconfigitem( + b'fsmonitor', b'warn_update_file_count_rust', default=400000, +) +coreconfigitem( b'help', br'hidden-command\..*', default=False, generic=True, ) coreconfigitem( diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/context.py --- a/mercurial/context.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/context.py Fri Sep 18 10:48:43 2020 -0400 @@ -271,7 +271,7 @@ return self._manifest.find(path) except KeyError: raise error.ManifestLookupError( - self._node, path, _(b'not found in manifest') + self._node or b'None', path, _(b'not found in manifest') ) if '_manifestdelta' in self.__dict__ or path in self.files(): if path in self._manifestdelta: @@ -284,7 +284,7 @@ node, flag = mfl[self._changeset.manifest].find(path) except KeyError: raise error.ManifestLookupError( - self._node, path, _(b'not found in manifest') + self._node or b'None', path, _(b'not found in manifest') ) return node, flag @@ -2528,6 +2528,7 @@ return path in self._cache def clean(self): + self._mergestate = None self._cache = {} def _compact(self): @@ -2592,6 +2593,11 @@ self._repo, path, parent=self, filelog=filelog ) + def mergestate(self, clean=False): + if clean or self._mergestate is None: + self._mergestate = mergestatemod.memmergestate(self._repo) + return self._mergestate + class overlayworkingfilectx(committablefilectx): """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/debugcommands.py --- a/mercurial/debugcommands.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/debugcommands.py Fri Sep 18 10:48:43 2020 -0400 @@ -1668,11 +1668,11 @@ fm.data(re2=bool(util._re2)) # templates - p = templater.templatepaths() - fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p)) + p = templater.templatedir() + fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'') fm.condwrite(not p, b'', _(b" no template directories found\n")) if p: - m = templater.templatepath(b"map-cmdline.default") + (m, fp) = templater.try_open_template(b"map-cmdline.default") if m: # template found, check if it is working err = None @@ -2016,6 +2016,7 @@ b'")}' b'{extras % " extra: {key} = {value}\n"}' b'"}' + b'{extras % "extra: {file} ({key} = {value})\n"}' ) ms = mergestatemod.mergestate.read(repo) @@ -2061,7 +2062,7 @@ fm_files.data(renamed_path=state[1]) fm_files.data(rename_side=state[2]) fm_extras = fm_files.nested(b'extras') - for k, v in ms.extras(f).items(): + for k, v in sorted(ms.extras(f).items()): fm_extras.startitem() fm_extras.data(key=k) fm_extras.data(value=v) @@ -2069,6 +2070,18 @@ fm_files.end() + fm_extras = fm.nested(b'extras') + for f, d in sorted(pycompat.iteritems(ms._stateextras)): + if f in ms: + # If file is in mergestate, we have already processed it's extras + continue + for k, v in pycompat.iteritems(d): + fm_extras.startitem() + fm_extras.data(file=f) + fm_extras.data(key=k) + fm_extras.data(value=v) + fm_extras.end() + fm.end() diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/dirstate.py --- a/mercurial/dirstate.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/dirstate.py Fri Sep 18 10:48:43 2020 -0400 @@ -1425,6 +1425,7 @@ self._opener = opener self._root = root self._filename = b'dirstate' + self._nodelen = 20 self._parents = None self._dirtyparents = False @@ -1609,7 +1610,7 @@ if not self._parents: try: fp = self._opendirstatefile() - st = fp.read(40) + st = fp.read(2 * self._nodelen) fp.close() except IOError as err: if err.errno != errno.ENOENT: @@ -1618,8 +1619,11 @@ st = b'' l = len(st) - if l == 40: - self._parents = (st[:20], st[20:40]) + if l == self._nodelen * 2: + self._parents = ( + st[: self._nodelen], + st[self._nodelen : 2 * self._nodelen], + ) elif l == 0: self._parents = (nullid, nullid) else: @@ -1654,15 +1658,11 @@ if util.safehasattr(parsers, b'dict_new_presized'): # Make an estimate of the number of files in the dirstate based on - # its size. From a linear regression on a set of real-world repos, - # all over 10,000 files, the size of a dirstate entry is 85 - # bytes. The cost of resizing is significantly higher than the cost - # of filling in a larger presized dict, so subtract 20% from the - # size. - # - # This heuristic is imperfect in many ways, so in a future dirstate - # format update it makes sense to just record the number of entries - # on write. + # its size. This trades wasting some memory for avoiding costly + # resizes. Each entry have a prefix of 17 bytes followed by one or + # two path names. Studies on various large-scale real-world repositories + # found 54 bytes a reasonable upper limit for the average path names. + # Copy entries are ignored for the sake of this estimate. self._map = parsers.dict_new_presized(len(st) // 71) # Python's garbage collector triggers a GC each time a certain number diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/exchange.py --- a/mercurial/exchange.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/exchange.py Fri Sep 18 10:48:43 2020 -0400 @@ -32,6 +32,7 @@ phases, pushkey, pycompat, + requirements, scmutil, sslutil, streamclone, @@ -39,7 +40,6 @@ util, wireprototypes, ) -from .interfaces import repository from .utils import ( hashutil, stringutil, @@ -1068,7 +1068,7 @@ cgpart = bundler.newpart(b'changegroup', data=cgstream) if cgversions: cgpart.addparam(b'version', version) - if b'treemanifest' in pushop.repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in pushop.repo.requirements: cgpart.addparam(b'treemanifest', b'1') if b'exp-sidedata-flag' in pushop.repo.requirements: cgpart.addparam(b'exp-sidedata', b'1') @@ -1691,7 +1691,7 @@ old_heads = unficl.heads() clstart = len(unficl) _pullbundle2(pullop) - if repository.NARROW_REQUIREMENT in repo.requirements: + if requirements.NARROW_REQUIREMENT in repo.requirements: # XXX narrow clones filter the heads on the server side during # XXX getbundle and result in partial replies as well. # XXX Disable pull bundles in this case as band aid to avoid @@ -2557,7 +2557,7 @@ part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False) - if b'treemanifest' in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: part.addparam(b'treemanifest', b'1') if b'exp-sidedata-flag' in repo.requirements: diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/formatter.py --- a/mercurial/formatter.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/formatter.py Fri Sep 18 10:48:43 2020 -0400 @@ -540,6 +540,25 @@ tmpl = attr.ib() mapfile = attr.ib() refargs = attr.ib(default=None) + fp = attr.ib(default=None) + + +def empty_templatespec(): + return templatespec(None, None, None) + + +def reference_templatespec(ref, refargs=None): + return templatespec(ref, None, None, refargs) + + +def literal_templatespec(tmpl): + if pycompat.ispy3: + assert not isinstance(tmpl, str), b'tmpl must not be a str' + return templatespec(b'', tmpl, None) + + +def mapfile_templatespec(topic, mapfile, fp=None): + return templatespec(topic, None, mapfile, fp=fp) def lookuptemplate(ui, topic, tmpl): @@ -563,33 +582,33 @@ """ if not tmpl: - return templatespec(None, None, None) + return empty_templatespec() # looks like a literal template? if b'{' in tmpl: - return templatespec(b'', tmpl, None) + return literal_templatespec(tmpl) # a reference to built-in (formatter) template if tmpl in {b'cbor', b'json', b'pickle', b'debug'}: - return templatespec(tmpl, None, None) + return reference_templatespec(tmpl) # a function-style reference to built-in template func, fsep, ftail = tmpl.partition(b'(') if func in {b'cbor', b'json'} and fsep and ftail.endswith(b')'): templater.parseexpr(tmpl) # make sure syntax errors are confined - return templatespec(func, None, None, refargs=ftail[:-1]) + return reference_templatespec(func, refargs=ftail[:-1]) # perhaps a stock style? if not os.path.split(tmpl)[0]: - mapname = templater.templatepath( + (mapname, fp) = templater.try_open_template( b'map-cmdline.' + tmpl - ) or templater.templatepath(tmpl) - if mapname and os.path.isfile(mapname): - return templatespec(topic, None, mapname) + ) or templater.try_open_template(tmpl) + if mapname: + return mapfile_templatespec(topic, mapname, fp) # perhaps it's a reference to [templates] if ui.config(b'templates', tmpl): - return templatespec(tmpl, None, None) + return reference_templatespec(tmpl) if tmpl == b'list': ui.write(_(b"available styles: %s\n") % templater.stylelist()) @@ -599,13 +618,13 @@ if (b'/' in tmpl or b'\\' in tmpl) and os.path.isfile(tmpl): # is it a mapfile for a style? if os.path.basename(tmpl).startswith(b"map-"): - return templatespec(topic, None, os.path.realpath(tmpl)) + return mapfile_templatespec(topic, os.path.realpath(tmpl)) with util.posixfile(tmpl, b'rb') as f: tmpl = f.read() - return templatespec(b'', tmpl, None) + return literal_templatespec(tmpl) # constant string? - return templatespec(b'', tmpl, None) + return literal_templatespec(tmpl) def templatepartsmap(spec, t, partnames): @@ -626,9 +645,12 @@ a map file""" assert not (spec.tmpl and spec.mapfile) if spec.mapfile: - frommapfile = templater.templater.frommapfile - return frommapfile( - spec.mapfile, defaults=defaults, resources=resources, cache=cache + return templater.templater.frommapfile( + spec.mapfile, + spec.fp, + defaults=defaults, + resources=resources, + cache=cache, ) return maketemplater( ui, spec.tmpl, defaults=defaults, resources=resources, cache=cache diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/helptext/internals/requirements.txt --- a/mercurial/helptext/internals/requirements.txt Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/helptext/internals/requirements.txt Fri Sep 18 10:48:43 2020 -0400 @@ -155,3 +155,22 @@ Note that as of 5.5, only installations compiled with the Rust extension will benefit from a speedup. The other installations will do the necessary work to keep the index up to date, but will suffer a slowdown. + +exp-sharesafe +============= + +NOTE: This requirement is for internal development only. The semantics are not +frozed yet, the feature is experimental. It's not advised to use it for any +production repository yet. + +Represents that the repository can be shared safely. Requirements and config of +the source repository will be shared. +Requirements are stored in ``.hg/store`` instead of directly in ``.hg/`` where +they used to be stored. Some working copy related requirements are still stored +in ``.hg/``. +Shares read the ``.hg/hgrc`` of the source repository. + +Support for this requirement was added in Mercurial 5.6 (released +November 2020). The requirement will only be present on repositories that have +opted in to this format (by having ``format.exp-share-safe=true`` set when +they were created). diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/helptext/internals/revlogs.txt --- a/mercurial/helptext/internals/revlogs.txt Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/helptext/internals/revlogs.txt Fri Sep 18 10:48:43 2020 -0400 @@ -215,14 +215,16 @@ Revision entries consist of an optional 1 byte header followed by an encoding of the revision data. The headers are as follows: -\0 (0x00) - Revision data is the entirety of the entry, including this header. -u (0x75) - Raw revision data follows. -x (0x78) - zlib (RFC 1950) data. +\0 (0x00) + Revision data is the entirety of the entry, including this header. +( (0x28) + zstd https://github.com/facebook/zstd +u (0x75) + Raw revision data follows. +x (0x78) + zlib (RFC 1950) data. - The 0x78 value is actually the first byte of the zlib header (CMF byte). + The 0x78 value is actually the first byte of the zlib header (CMF byte). Hash Computation ================ diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/hg.py --- a/mercurial/hg.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/hg.py Fri Sep 18 10:48:43 2020 -0400 @@ -38,6 +38,7 @@ node, phases, pycompat, + requirements, scmutil, sshpeer, statichttprepo, @@ -49,7 +50,6 @@ vfs as vfsmod, ) from .utils import hashutil -from .interfaces import repository as repositorymod release = lock.release @@ -332,6 +332,28 @@ return r +def _prependsourcehgrc(repo): + """ copies the source repo config and prepend it in current repo .hg/hgrc + on unshare. This is only done if the share was perfomed using share safe + method where we share config of source in shares""" + srcvfs = vfsmod.vfs(repo.sharedpath) + dstvfs = vfsmod.vfs(repo.path) + + if not srcvfs.exists(b'hgrc'): + return + + currentconfig = b'' + if dstvfs.exists(b'hgrc'): + currentconfig = dstvfs.read(b'hgrc') + + with dstvfs(b'hgrc', b'wb') as fp: + sourceconfig = srcvfs.read(b'hgrc') + fp.write(b"# Config copied from shared source\n") + fp.write(sourceconfig) + fp.write(b'\n') + fp.write(currentconfig) + + def unshare(ui, repo): """convert a shared repository to a normal one @@ -350,12 +372,17 @@ # fail destlock = copystore(ui, repo, repo.path) with destlock or util.nullcontextmanager(): + if requirements.SHARESAFE_REQUIREMENT in repo.requirements: + # we were sharing .hg/hgrc of the share source with the current + # repo. We need to copy that while unsharing otherwise it can + # disable hooks and other checks + _prependsourcehgrc(repo) sharefile = repo.vfs.join(b'sharedpath') util.rename(sharefile, sharefile + b'.old') - repo.requirements.discard(b'shared') - repo.requirements.discard(b'relshared') + repo.requirements.discard(requirements.SHARED_REQUIREMENT) + repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT) scmutil.writereporequirements(repo) # Removing share changes some fundamental properties of the repo instance. @@ -388,7 +415,7 @@ if default: template = b'[paths]\ndefault = %s\n' destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default)) - if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements: + if requirements.NARROW_REQUIREMENT in sourcerepo.requirements: with destrepo.wlock(): narrowspec.copytoworkingcopy(destrepo) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/hgweb/common.py --- a/mercurial/hgweb/common.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/hgweb/common.py Fri Sep 18 10:48:43 2020 -0400 @@ -21,6 +21,7 @@ from .. import ( encoding, pycompat, + templater, util, ) @@ -178,7 +179,7 @@ return True -def staticfile(directory, fname, res): +def staticfile(templatepath, directory, fname, res): """return a file inside directory with guessed Content-Type header fname always uses '/' as directory separator and isn't allowed to @@ -190,24 +191,20 @@ if not ispathsafe(fname): return + if not directory: + tp = templatepath or templater.templatedir() + if tp is not None: + directory = os.path.join(tp, b'static') + fpath = os.path.join(*fname.split(b'/')) - if isinstance(directory, bytes): - directory = [directory] - for d in directory: - path = os.path.join(d, fpath) - if os.path.exists(path): - break + ct = pycompat.sysbytes( + mimetypes.guess_type(pycompat.fsdecode(fpath))[0] or r"text/plain" + ) + path = os.path.join(directory, fpath) try: os.stat(path) - ct = pycompat.sysbytes( - mimetypes.guess_type(pycompat.fsdecode(path))[0] or r"text/plain" - ) with open(path, b'rb') as fh: data = fh.read() - - res.headers[b'Content-Type'] = ct - res.setbodybytes(data) - return res except TypeError: raise ErrorResponse(HTTP_SERVER_ERROR, b'illegal filename') except OSError as err: @@ -218,6 +215,10 @@ HTTP_SERVER_ERROR, encoding.strtolocal(err.strerror) ) + res.headers[b'Content-Type'] = ct + res.setbodybytes(data) + return res + def paritygen(stripecount, offset=0): """count parity of horizontal stripes for easier reading""" diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/hgweb/hgweb_mod.py --- a/mercurial/hgweb/hgweb_mod.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/hgweb/hgweb_mod.py Fri Sep 18 10:48:43 2020 -0400 @@ -53,7 +53,36 @@ configfn(b'web', b'style'), b'paper', ) - return styles, templater.stylemap(styles, templatepath) + return styles, _stylemap(styles, templatepath) + + +def _stylemap(styles, path=None): + """Return path to mapfile for a given style. + + Searches mapfile in the following locations: + 1. templatepath/style/map + 2. templatepath/map-style + 3. templatepath/map + """ + + for style in styles: + # only plain name is allowed to honor template paths + if ( + not style + or style in (pycompat.oscurdir, pycompat.ospardir) + or pycompat.ossep in style + or pycompat.osaltsep + and pycompat.osaltsep in style + ): + continue + locations = (os.path.join(style, b'map'), b'map-' + style, b'map') + + for location in locations: + mapfile, fp = templater.try_open_template(location, path) + if mapfile: + return style, mapfile, fp + + raise RuntimeError(b"No hgweb templates found in %r" % path) def makebreadcrumb(url, prefix=b''): @@ -117,23 +146,21 @@ self.csp, self.nonce = cspvalues(self.repo.ui) # Trust the settings from the .hg/hgrc files by default. - def config(self, section, name, default=uimod._unset, untrusted=True): - return self.repo.ui.config(section, name, default, untrusted=untrusted) + def config(self, *args, **kwargs): + kwargs.setdefault('untrusted', True) + return self.repo.ui.config(*args, **kwargs) - def configbool(self, section, name, default=uimod._unset, untrusted=True): - return self.repo.ui.configbool( - section, name, default, untrusted=untrusted - ) + def configbool(self, *args, **kwargs): + kwargs.setdefault('untrusted', True) + return self.repo.ui.configbool(*args, **kwargs) - def configint(self, section, name, default=uimod._unset, untrusted=True): - return self.repo.ui.configint( - section, name, default, untrusted=untrusted - ) + def configint(self, *args, **kwargs): + kwargs.setdefault('untrusted', True) + return self.repo.ui.configint(*args, **kwargs) - def configlist(self, section, name, default=uimod._unset, untrusted=True): - return self.repo.ui.configlist( - section, name, default, untrusted=untrusted - ) + def configlist(self, *args, **kwargs): + kwargs.setdefault('untrusted', True) + return self.repo.ui.configlist(*args, **kwargs) def archivelist(self, nodeid): return webutil.archivelist(self.repo.ui, nodeid) @@ -153,7 +180,9 @@ # figure out which style to use vars = {} - styles, (style, mapfile) = getstyle(req, self.config, self.templatepath) + styles, (style, mapfile, fp) = getstyle( + req, self.config, self.templatepath + ) if style == styles[0]: vars[b'style'] = style @@ -196,10 +225,9 @@ yield self.config(b'web', b'motd') tres = formatter.templateresources(self.repo.ui, self.repo) - tmpl = templater.templater.frommapfile( - mapfile, filters=filters, defaults=defaults, resources=tres + return templater.templater.frommapfile( + mapfile, fp=fp, filters=filters, defaults=defaults, resources=tres ) - return tmpl def sendtemplate(self, name, **kwargs): """Helper function to send a response generated from a template.""" diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/hgweb/hgwebdir_mod.py --- a/mercurial/hgweb/hgwebdir_mod.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/hgweb/hgwebdir_mod.py Fri Sep 18 10:48:43 2020 -0400 @@ -413,13 +413,7 @@ else: fname = req.qsparams[b'static'] static = self.ui.config(b"web", b"static", untrusted=False) - if not static: - tp = self.templatepath or templater.templatepaths() - if isinstance(tp, bytes): - tp = [tp] - static = [os.path.join(p, b'static') for p in tp] - - staticfile(static, fname, res) + staticfile(self.templatepath, static, fname, res) return res.sendresponse() # top-level index @@ -538,11 +532,12 @@ return res.sendresponse() def templater(self, req, nonce): - def config(section, name, default=uimod._unset, untrusted=True): - return self.ui.config(section, name, default, untrusted) + def config(*args, **kwargs): + kwargs.setdefault('untrusted', True) + return self.ui.config(*args, **kwargs) vars = {} - styles, (style, mapfile) = hgweb_mod.getstyle( + styles, (style, mapfile, fp) = hgweb_mod.getstyle( req, config, self.templatepath ) if style == styles[0]: @@ -577,5 +572,6 @@ else: yield config(b'web', b'motd') - tmpl = templater.templater.frommapfile(mapfile, defaults=defaults) - return tmpl + return templater.templater.frommapfile( + mapfile, fp=fp, defaults=defaults + ) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/hgweb/webcommands.py --- a/mercurial/hgweb/webcommands.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/hgweb/webcommands.py Fri Sep 18 10:48:43 2020 -0400 @@ -36,7 +36,6 @@ revsetlang, scmutil, smartset, - templater, templateutil, ) @@ -1318,13 +1317,7 @@ # a repo owner may set web.static in .hg/hgrc to get any file # readable by the user running the CGI script static = web.config(b"web", b"static", untrusted=False) - if not static: - tp = web.templatepath or templater.templatepaths() - if isinstance(tp, bytes): - tp = [tp] - static = [os.path.join(p, b'static') for p in tp] - - staticfile(static, fname, web.res) + staticfile(web.templatepath, static, fname, web.res) return web.res.sendresponse() diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/interfaces/repository.py --- a/mercurial/interfaces/repository.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/interfaces/repository.py Fri Sep 18 10:48:43 2020 -0400 @@ -11,10 +11,6 @@ from .. import error from . import util as interfaceutil -# When narrowing is finalized and no longer subject to format changes, -# we should move this to just "narrow" or similar. -NARROW_REQUIREMENT = b'narrowhg-experimental' - # Local repository feature string. # Revlogs are being used for file storage. diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/localrepo.py --- a/mercurial/localrepo.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/localrepo.py Fri Sep 18 10:48:43 2020 -0400 @@ -8,6 +8,7 @@ from __future__ import absolute_import import errno +import functools import os import random import sys @@ -32,6 +33,7 @@ bundle2, changegroup, color, + commit, context, dirstate, dirstateguard, @@ -46,7 +48,6 @@ match as matchmod, mergestate as mergestatemod, mergeutil, - metadata, namespaces, narrowspec, obsolete, @@ -56,6 +57,7 @@ pycompat, rcutil, repoview, + requirements as requirementsmod, revset, revsetlang, scmutil, @@ -192,6 +194,7 @@ def unfilteredmethod(orig): """decorate method that always need to be run on unfiltered version""" + @functools.wraps(orig) def wrapper(repo, *args, **kwargs): return orig(repo.unfiltered(), *args, **kwargs) @@ -425,30 +428,6 @@ # End of baselegacywirecommands interface. -# Increment the sub-version when the revlog v2 format changes to lock out old -# clients. -REVLOGV2_REQUIREMENT = b'exp-revlogv2.1' - -# A repository with the sparserevlog feature will have delta chains that -# can spread over a larger span. Sparse reading cuts these large spans into -# pieces, so that each piece isn't too big. -# Without the sparserevlog capability, reading from the repository could use -# huge amounts of memory, because the whole span would be read at once, -# including all the intermediate revisions that aren't pertinent for the chain. -# This is why once a repository has enabled sparse-read, it becomes required. -SPARSEREVLOG_REQUIREMENT = b'sparserevlog' - -# A repository with the sidedataflag requirement will allow to store extra -# information for revision without altering their original hashes. -SIDEDATA_REQUIREMENT = b'exp-sidedata-flag' - -# A repository with the the copies-sidedata-changeset requirement will store -# copies related information in changeset's sidedata. -COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset' - -# The repository use persistent nodemap for the changelog and the manifest. -NODEMAP_REQUIREMENT = b'persistent-nodemap' - # Functions receiving (ui, features) that extensions can register to impact # the ability to load repositories with custom requirements. Only # functions defined in loaded extensions are called. @@ -459,6 +438,50 @@ featuresetupfuncs = set() +def _getsharedvfs(hgvfs, requirements): + """ returns the vfs object pointing to root of shared source + repo for a shared repository + + hgvfs is vfs pointing at .hg/ of current repo (shared one) + requirements is a set of requirements of current repo (shared one) + """ + # The ``shared`` or ``relshared`` requirements indicate the + # store lives in the path contained in the ``.hg/sharedpath`` file. + # This is an absolute path for ``shared`` and relative to + # ``.hg/`` for ``relshared``. + sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') + if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements: + sharedpath = hgvfs.join(sharedpath) + + sharedvfs = vfsmod.vfs(sharedpath, realpath=True) + + if not sharedvfs.exists(): + raise error.RepoError( + _(b'.hg/sharedpath points to nonexistent directory %s') + % sharedvfs.base + ) + return sharedvfs + + +def _readrequires(vfs, allowmissing): + """ reads the require file present at root of this vfs + and return a set of requirements + + If allowmissing is True, we suppress ENOENT if raised""" + # requires file contains a newline-delimited list of + # features/capabilities the opener (us) must have in order to use + # the repository. This file was introduced in Mercurial 0.9.2, + # which means very old repositories may not have one. We assume + # a missing file translates to no requirements. + try: + requirements = set(vfs.read(b'requires').splitlines()) + except IOError as e: + if not (allowmissing and e.errno == errno.ENOENT): + raise + requirements = set() + return requirements + + def makelocalrepository(baseui, path, intents=None): """Create a local repository object. @@ -500,6 +523,10 @@ # Main VFS for .hg/ directory. hgpath = wdirvfs.join(b'.hg') hgvfs = vfsmod.vfs(hgpath, cacheaudited=True) + # Whether this repository is shared one or not + shared = False + # If this repository is shared, vfs pointing to shared repo + sharedvfs = None # The .hg/ path should exist and should be a directory. All other # cases are errors. @@ -517,22 +544,32 @@ raise error.RepoError(_(b'repository %s not found') % path) - # .hg/requires file contains a newline-delimited list of - # features/capabilities the opener (us) must have in order to use - # the repository. This file was introduced in Mercurial 0.9.2, - # which means very old repositories may not have one. We assume - # a missing file translates to no requirements. - try: - requirements = set(hgvfs.read(b'requires').splitlines()) - except IOError as e: - if e.errno != errno.ENOENT: - raise - requirements = set() + requirements = _readrequires(hgvfs, True) + shared = ( + requirementsmod.SHARED_REQUIREMENT in requirements + or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements + ) + if shared: + sharedvfs = _getsharedvfs(hgvfs, requirements) + + # if .hg/requires contains the sharesafe requirement, it means + # there exists a `.hg/store/requires` too and we should read it + # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement + # is present. We never write SHARESAFE_REQUIREMENT for a repo if store + # is not present, refer checkrequirementscompat() for that + if requirementsmod.SHARESAFE_REQUIREMENT in requirements: + if shared: + # This is a shared repo + storevfs = vfsmod.vfs(sharedvfs.join(b'store')) + else: + storevfs = vfsmod.vfs(hgvfs.join(b'store')) + + requirements |= _readrequires(storevfs, False) # The .hg/hgrc file may load extensions or contain config options # that influence repository construction. Attempt to load it and # process any new extensions that it may have pulled in. - if loadhgrc(ui, wdirvfs, hgvfs, requirements): + if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs): afterhgrcload(ui, wdirvfs, hgvfs, requirements) extensions.loadall(ui) extensions.populateui(ui) @@ -567,27 +604,13 @@ features = set() # The "store" part of the repository holds versioned data. How it is - # accessed is determined by various requirements. The ``shared`` or - # ``relshared`` requirements indicate the store lives in the path contained - # in the ``.hg/sharedpath`` file. This is an absolute path for - # ``shared`` and relative to ``.hg/`` for ``relshared``. - if b'shared' in requirements or b'relshared' in requirements: - sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') - if b'relshared' in requirements: - sharedpath = hgvfs.join(sharedpath) - - sharedvfs = vfsmod.vfs(sharedpath, realpath=True) - - if not sharedvfs.exists(): - raise error.RepoError( - _(b'.hg/sharedpath points to nonexistent directory %s') - % sharedvfs.base - ) - - features.add(repository.REPO_FEATURE_SHARED_STORAGE) - + # accessed is determined by various requirements. If `shared` or + # `relshared` requirements are present, this indicates current repository + # is a share and store exists in path mentioned in `.hg/sharedpath` + if shared: storebasepath = sharedvfs.base cachepath = sharedvfs.join(b'cache') + features.add(repository.REPO_FEATURE_SHARED_STORAGE) else: storebasepath = hgvfs.base cachepath = hgvfs.join(b'cache') @@ -674,7 +697,7 @@ ) -def loadhgrc(ui, wdirvfs, hgvfs, requirements): +def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None): """Load hgrc files/content into a ui instance. This is called during repository opening to load any additional @@ -685,9 +708,20 @@ Extensions should monkeypatch this function to modify how per-repo configs are loaded. For example, an extension may wish to pull in configs from alternate files or sources. + + sharedvfs is vfs object pointing to source repo if the current one is a + shared one """ if not rcutil.use_repo_hgrc(): return False + + # first load config from shared source if we has to + if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs: + try: + ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base) + except IOError: + pass + try: ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base) return True @@ -790,7 +824,10 @@ ``error.RepoError`` should be raised on failure. """ - if b'exp-sparse' in requirements and not sparse.enabled: + if ( + requirementsmod.SPARSE_REQUIREMENT in requirements + and not sparse.enabled + ): raise error.RepoError( _( b'repository is using sparse feature but ' @@ -820,7 +857,7 @@ """ options = {} - if b'treemanifest' in requirements: + if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements: options[b'treemanifest'] = True # experimental config: format.manifestcachesize @@ -833,12 +870,15 @@ # This revlog format is super old and we don't bother trying to parse # opener options for it because those options wouldn't do anything # meaningful on such old repos. - if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements: + if ( + b'revlogv1' in requirements + or requirementsmod.REVLOGV2_REQUIREMENT in requirements + ): options.update(resolverevlogstorevfsoptions(ui, requirements, features)) else: # explicitly mark repo as using revlogv0 options[b'revlogv0'] = True - if COPIESSDC_REQUIREMENT in requirements: + if requirementsmod.COPIESSDC_REQUIREMENT in requirements: options[b'copies-storage'] = b'changeset-sidedata' else: writecopiesto = ui.config(b'experimental', b'copies.write-to') @@ -857,7 +897,7 @@ if b'revlogv1' in requirements: options[b'revlogv1'] = True - if REVLOGV2_REQUIREMENT in requirements: + if requirementsmod.REVLOGV2_REQUIREMENT in requirements: options[b'revlogv2'] = True if b'generaldelta' in requirements: @@ -901,12 +941,12 @@ options[b'sparse-read-density-threshold'] = srdensitythres options[b'sparse-read-min-gap-size'] = srmingapsize - sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements + sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements options[b'sparse-revlog'] = sparserevlog if sparserevlog: options[b'generaldelta'] = True - sidedata = SIDEDATA_REQUIREMENT in requirements + sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements options[b'side-data'] = sidedata maxchainlen = None @@ -937,12 +977,12 @@ msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d') raise error.Abort(msg % options[b'zstd.level']) - if repository.NARROW_REQUIREMENT in requirements: + if requirementsmod.NARROW_REQUIREMENT in requirements: options[b'enableellipsis'] = True if ui.configbool(b'experimental', b'rust.index'): options[b'rust.index'] = True - if NODEMAP_REQUIREMENT in requirements: + if requirementsmod.NODEMAP_REQUIREMENT in requirements: options[b'persistent-nodemap'] = True if ui.configbool(b'storage', b'revlog.nodemap.mmap'): options[b'persistent-nodemap.mmap'] = True @@ -986,7 +1026,7 @@ features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE) features.add(repository.REPO_FEATURE_STREAM_CLONE) - if repository.NARROW_REQUIREMENT in requirements: + if requirementsmod.NARROW_REQUIREMENT in requirements: return revlognarrowfilestorage else: return revlogfilestorage @@ -1027,22 +1067,23 @@ supportedformats = { b'revlogv1', b'generaldelta', - b'treemanifest', - COPIESSDC_REQUIREMENT, - REVLOGV2_REQUIREMENT, - SIDEDATA_REQUIREMENT, - SPARSEREVLOG_REQUIREMENT, - NODEMAP_REQUIREMENT, + requirementsmod.TREEMANIFEST_REQUIREMENT, + requirementsmod.COPIESSDC_REQUIREMENT, + requirementsmod.REVLOGV2_REQUIREMENT, + requirementsmod.SIDEDATA_REQUIREMENT, + requirementsmod.SPARSEREVLOG_REQUIREMENT, + requirementsmod.NODEMAP_REQUIREMENT, bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, + requirementsmod.SHARESAFE_REQUIREMENT, } _basesupported = supportedformats | { b'store', b'fncache', - b'shared', - b'relshared', + requirementsmod.SHARED_REQUIREMENT, + requirementsmod.RELATIVE_SHARED_REQUIREMENT, b'dotencode', - b'exp-sparse', - b'internal-phase', + requirementsmod.SPARSE_REQUIREMENT, + requirementsmod.INTERNAL_PHASE_REQUIREMENT, } # list of prefix for file which can be written without 'wlock' @@ -1211,7 +1252,7 @@ self._extrafilterid = repoview.extrafilter(ui) self.filecopiesmode = None - if COPIESSDC_REQUIREMENT in self.requirements: + if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements: self.filecopiesmode = b'changeset-sidedata' def _getvfsward(self, origfunc): @@ -1236,7 +1277,12 @@ msg = b'accessing cache with vfs instead of cachevfs: "%s"' repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs") # path prefixes covered by 'lock' - vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/') + vfs_path_prefixes = ( + b'journal.', + b'undo.', + b'strip-backup/', + b'cache/', + ) if any(path.startswith(prefix) for prefix in vfs_path_prefixes): if repo._currentlock(repo._lockref) is None: repo.ui.develwarn( @@ -1503,14 +1549,14 @@ @storecache(narrowspec.FILENAME) def _storenarrowmatch(self): - if repository.NARROW_REQUIREMENT not in self.requirements: + if requirementsmod.NARROW_REQUIREMENT not in self.requirements: return matchmod.always() include, exclude = self.narrowpats return narrowspec.match(self.root, include=include, exclude=exclude) @storecache(narrowspec.FILENAME) def _narrowmatch(self): - if repository.NARROW_REQUIREMENT not in self.requirements: + if requirementsmod.NARROW_REQUIREMENT not in self.requirements: return matchmod.always() narrowspec.checkworkingcopynarrowspec(self) include, exclude = self.narrowpats @@ -1551,7 +1597,7 @@ def _quick_access_changeid_wc(self): # also fast path access to the working copy parents # however, only do it for filter that ensure wc is visible. - quick = {} + quick = self._quick_access_changeid_null.copy() cl = self.unfiltered().changelog for node in self.dirstate.parents(): if node == nullid: @@ -1590,11 +1636,9 @@ This contains a list of symbol we can recognise right away without further processing. """ - mapping = self._quick_access_changeid_null if self.filtername in repoview.filter_has_wc: - mapping = mapping.copy() - mapping.update(self._quick_access_changeid_wc) - return mapping + return self._quick_access_changeid_wc + return self._quick_access_changeid_null def __getitem__(self, changeid): # dealing with special cases @@ -2472,7 +2516,7 @@ ui.status( _(b'working directory now based on revision %d\n') % parents ) - mergestatemod.mergestate.clean(self, self[b'.'].node()) + mergestatemod.mergestate.clean(self) # TODO: if we know which new heads may result from this rollback, pass # them to destroy(), which will prevent the branchhead cache from being @@ -2771,140 +2815,6 @@ """Returns the wlock if it's held, or None if it's not.""" return self._currentlock(self._wlockref) - def _filecommit( - self, - fctx, - manifest1, - manifest2, - linkrev, - tr, - changelist, - includecopymeta, - ): - """ - commit an individual file as part of a larger transaction - - input: - - fctx: a file context with the content we are trying to commit - manifest1: manifest of changeset first parent - manifest2: manifest of changeset second parent - linkrev: revision number of the changeset being created - tr: current transation - changelist: list of file being changed (modified inplace) - individual: boolean, set to False to skip storing the copy data - (only used by the Google specific feature of using - changeset extra as copy source of truth). - - output: - - The resulting filenode - """ - - fname = fctx.path() - fparent1 = manifest1.get(fname, nullid) - fparent2 = manifest2.get(fname, nullid) - if isinstance(fctx, context.filectx): - node = fctx.filenode() - if node in [fparent1, fparent2]: - self.ui.debug(b'reusing %s filelog entry\n' % fname) - if ( - fparent1 != nullid - and manifest1.flags(fname) != fctx.flags() - ) or ( - fparent2 != nullid - and manifest2.flags(fname) != fctx.flags() - ): - changelist.append(fname) - return node - - flog = self.file(fname) - meta = {} - cfname = fctx.copysource() - if cfname and cfname != fname: - # Mark the new revision of this file as a copy of another - # file. This copy data will effectively act as a parent - # of this new revision. If this is a merge, the first - # parent will be the nullid (meaning "look up the copy data") - # and the second one will be the other parent. For example: - # - # 0 --- 1 --- 3 rev1 changes file foo - # \ / rev2 renames foo to bar and changes it - # \- 2 -/ rev3 should have bar with all changes and - # should record that bar descends from - # bar in rev2 and foo in rev1 - # - # this allows this merge to succeed: - # - # 0 --- 1 --- 3 rev4 reverts the content change from rev2 - # \ / merging rev3 and rev4 should use bar@rev2 - # \- 2 --- 4 as the merge base - # - - cnode = manifest1.get(cfname) - newfparent = fparent2 - - if manifest2: # branch merge - if fparent2 == nullid or cnode is None: # copied on remote side - if cfname in manifest2: - cnode = manifest2[cfname] - newfparent = fparent1 - - # Here, we used to search backwards through history to try to find - # where the file copy came from if the source of a copy was not in - # the parent directory. However, this doesn't actually make sense to - # do (what does a copy from something not in your working copy even - # mean?) and it causes bugs (eg, issue4476). Instead, we will warn - # the user that copy information was dropped, so if they didn't - # expect this outcome it can be fixed, but this is the correct - # behavior in this circumstance. - - if cnode: - self.ui.debug( - b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)) - ) - if includecopymeta: - meta[b"copy"] = cfname - meta[b"copyrev"] = hex(cnode) - fparent1, fparent2 = nullid, newfparent - else: - self.ui.warn( - _( - b"warning: can't find ancestor for '%s' " - b"copied from '%s'!\n" - ) - % (fname, cfname) - ) - - elif fparent1 == nullid: - fparent1, fparent2 = fparent2, nullid - elif fparent2 != nullid: - # is one parent an ancestor of the other? - fparentancestors = flog.commonancestorsheads(fparent1, fparent2) - if fparent1 in fparentancestors: - fparent1, fparent2 = fparent2, nullid - elif fparent2 in fparentancestors: - fparent2 = nullid - elif not fparentancestors: - # TODO: this whole if-else might be simplified much more - ms = mergestatemod.mergestate.read(self) - if ( - fname in ms - and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER - ): - fparent1, fparent2 = fparent2, nullid - - # is the file changed? - text = fctx.data() - if fparent2 != nullid or meta or flog.cmp(fparent1, text): - changelist.append(fname) - return flog.add(text, meta, tr, linkrev, fparent1, fparent2) - # are just the flags changed during merge? - elif fname in manifest1 and manifest1.flags(fname) != fctx.flags(): - changelist.append(fname) - - return fparent1 - def checkcommitpatterns(self, wctx, match, status, fail): """check for commit arguments that aren't committable""" if match.isexact() or match.prefix(): @@ -3062,203 +2972,7 @@ @unfilteredmethod def commitctx(self, ctx, error=False, origctx=None): - """Add a new revision to current repository. - Revision information is passed via the context argument. - - ctx.files() should list all files involved in this commit, i.e. - modified/added/removed files. On merge, it may be wider than the - ctx.files() to be committed, since any file nodes derived directly - from p1 or p2 are excluded from the committed ctx.files(). - - origctx is for convert to work around the problem that bug - fixes to the files list in changesets change hashes. For - convert to be the identity, it can pass an origctx and this - function will use the same files list when it makes sense to - do so. - """ - - p1, p2 = ctx.p1(), ctx.p2() - user = ctx.user() - - if self.filecopiesmode == b'changeset-sidedata': - writechangesetcopy = True - writefilecopymeta = True - writecopiesto = None - else: - writecopiesto = self.ui.config(b'experimental', b'copies.write-to') - writefilecopymeta = writecopiesto != b'changeset-only' - writechangesetcopy = writecopiesto in ( - b'changeset-only', - b'compatibility', - ) - p1copies, p2copies = None, None - if writechangesetcopy: - p1copies = ctx.p1copies() - p2copies = ctx.p2copies() - filesadded, filesremoved = None, None - with self.lock(), self.transaction(b"commit") as tr: - trp = weakref.proxy(tr) - - if ctx.manifestnode(): - # reuse an existing manifest revision - self.ui.debug(b'reusing known manifest\n') - mn = ctx.manifestnode() - files = ctx.files() - if writechangesetcopy: - filesadded = ctx.filesadded() - filesremoved = ctx.filesremoved() - elif ctx.files(): - m1ctx = p1.manifestctx() - m2ctx = p2.manifestctx() - mctx = m1ctx.copy() - - m = mctx.read() - m1 = m1ctx.read() - m2 = m2ctx.read() - - # check in files - added = [] - changed = [] - removed = list(ctx.removed()) - linkrev = len(self) - self.ui.note(_(b"committing files:\n")) - uipathfn = scmutil.getuipathfn(self) - for f in sorted(ctx.modified() + ctx.added()): - self.ui.note(uipathfn(f) + b"\n") - try: - fctx = ctx[f] - if fctx is None: - removed.append(f) - else: - added.append(f) - m[f] = self._filecommit( - fctx, - m1, - m2, - linkrev, - trp, - changed, - writefilecopymeta, - ) - m.setflag(f, fctx.flags()) - except OSError: - self.ui.warn( - _(b"trouble committing %s!\n") % uipathfn(f) - ) - raise - except IOError as inst: - errcode = getattr(inst, 'errno', errno.ENOENT) - if error or errcode and errcode != errno.ENOENT: - self.ui.warn( - _(b"trouble committing %s!\n") % uipathfn(f) - ) - raise - - # update manifest - removed = [f for f in removed if f in m1 or f in m2] - drop = sorted([f for f in removed if f in m]) - for f in drop: - del m[f] - if p2.rev() != nullrev: - rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2)) - removed = [f for f in removed if not rf(f)] - - files = changed + removed - md = None - if not files: - # if no "files" actually changed in terms of the changelog, - # try hard to detect unmodified manifest entry so that the - # exact same commit can be reproduced later on convert. - md = m1.diff(m, scmutil.matchfiles(self, ctx.files())) - if not files and md: - self.ui.debug( - b'not reusing manifest (no file change in ' - b'changelog, but manifest differs)\n' - ) - if files or md: - self.ui.note(_(b"committing manifest\n")) - # we're using narrowmatch here since it's already applied at - # other stages (such as dirstate.walk), so we're already - # ignoring things outside of narrowspec in most cases. The - # one case where we might have files outside the narrowspec - # at this point is merges, and we already error out in the - # case where the merge has files outside of the narrowspec, - # so this is safe. - mn = mctx.write( - trp, - linkrev, - p1.manifestnode(), - p2.manifestnode(), - added, - drop, - match=self.narrowmatch(), - ) - - if writechangesetcopy: - filesadded = [ - f for f in changed if not (f in m1 or f in m2) - ] - filesremoved = removed - else: - self.ui.debug( - b'reusing manifest from p1 (listed files ' - b'actually unchanged)\n' - ) - mn = p1.manifestnode() - else: - self.ui.debug(b'reusing manifest from p1 (no file change)\n') - mn = p1.manifestnode() - files = [] - - if writecopiesto == b'changeset-only': - # If writing only to changeset extras, use None to indicate that - # no entry should be written. If writing to both, write an empty - # entry to prevent the reader from falling back to reading - # filelogs. - p1copies = p1copies or None - p2copies = p2copies or None - filesadded = filesadded or None - filesremoved = filesremoved or None - - if origctx and origctx.manifestnode() == mn: - files = origctx.files() - - # update changelog - self.ui.note(_(b"committing changelog\n")) - self.changelog.delayupdate(tr) - n = self.changelog.add( - mn, - files, - ctx.description(), - trp, - p1.node(), - p2.node(), - user, - ctx.date(), - ctx.extra().copy(), - p1copies, - p2copies, - filesadded, - filesremoved, - ) - xp1, xp2 = p1.hex(), p2 and p2.hex() or b'' - self.hook( - b'pretxncommit', - throw=True, - node=hex(n), - parent1=xp1, - parent2=xp2, - ) - # set the new commit is proper phase - targetphase = subrepoutil.newcommitphase(self.ui, ctx) - if targetphase: - # retract boundary do not alter parent changeset. - # if a parent have higher the resulting phase will - # be compliant anyway - # - # if minimal phase was 0 we don't need to retract anything - phases.registernew(self, tr, targetphase, [n]) - return n + return commit.commitctx(self, ctx, error=error, origctx=origctx) @unfilteredmethod def destroying(self): @@ -3553,9 +3267,9 @@ if b'sharedrepo' in createopts: requirements = set(createopts[b'sharedrepo'].requirements) if createopts.get(b'sharedrelative'): - requirements.add(b'relshared') + requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT) else: - requirements.add(b'shared') + requirements.add(requirementsmod.SHARED_REQUIREMENT) return requirements @@ -3608,30 +3322,30 @@ if scmutil.gdinitconfig(ui): requirements.add(b'generaldelta') if ui.configbool(b'format', b'sparse-revlog'): - requirements.add(SPARSEREVLOG_REQUIREMENT) + requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT) # experimental config: format.exp-use-side-data if ui.configbool(b'format', b'exp-use-side-data'): - requirements.add(SIDEDATA_REQUIREMENT) + requirements.add(requirementsmod.SIDEDATA_REQUIREMENT) # experimental config: format.exp-use-copies-side-data-changeset if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'): - requirements.add(SIDEDATA_REQUIREMENT) - requirements.add(COPIESSDC_REQUIREMENT) + requirements.add(requirementsmod.SIDEDATA_REQUIREMENT) + requirements.add(requirementsmod.COPIESSDC_REQUIREMENT) if ui.configbool(b'experimental', b'treemanifest'): - requirements.add(b'treemanifest') + requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT) revlogv2 = ui.config(b'experimental', b'revlogv2') if revlogv2 == b'enable-unstable-format-and-corrupt-my-data': requirements.remove(b'revlogv1') # generaldelta is implied by revlogv2. requirements.discard(b'generaldelta') - requirements.add(REVLOGV2_REQUIREMENT) + requirements.add(requirementsmod.REVLOGV2_REQUIREMENT) # experimental config: format.internal-phase if ui.configbool(b'format', b'internal-phase'): - requirements.add(b'internal-phase') + requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT) if createopts.get(b'narrowfiles'): - requirements.add(repository.NARROW_REQUIREMENT) + requirements.add(requirementsmod.NARROW_REQUIREMENT) if createopts.get(b'lfs'): requirements.add(b'lfs') @@ -3640,11 +3354,59 @@ requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) if ui.configbool(b'format', b'use-persistent-nodemap'): - requirements.add(NODEMAP_REQUIREMENT) + requirements.add(requirementsmod.NODEMAP_REQUIREMENT) + + # if share-safe is enabled, let's create the new repository with the new + # requirement + if ui.configbool(b'format', b'exp-share-safe'): + requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) return requirements +def checkrequirementscompat(ui, requirements): + """ Checks compatibility of repository requirements enabled and disabled. + + Returns a set of requirements which needs to be dropped because dependend + requirements are not enabled. Also warns users about it """ + + dropped = set() + + if b'store' not in requirements: + if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements: + ui.warn( + _( + b'ignoring enabled \'format.bookmarks-in-store\' config ' + b'beacuse it is incompatible with disabled ' + b'\'format.usestore\' config\n' + ) + ) + dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) + + if ( + requirementsmod.SHARED_REQUIREMENT in requirements + or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements + ): + raise error.Abort( + _( + b"cannot create shared repository as source was created" + b" with 'format.usestore' config disabled" + ) + ) + + if requirementsmod.SHARESAFE_REQUIREMENT in requirements: + ui.warn( + _( + b"ignoring enabled 'format.exp-share-safe' config because " + b"it is incompatible with disabled 'format.usestore'" + b" config\n" + ) + ) + dropped.add(requirementsmod.SHARESAFE_REQUIREMENT) + + return dropped + + def filterknowncreateopts(ui, createopts): """Filters a dict of repo creation options against options that are known. @@ -3719,6 +3481,7 @@ ) requirements = newreporequirements(ui, createopts=createopts) + requirements -= checkrequirementscompat(ui, requirements) wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) @@ -3765,7 +3528,17 @@ b'layout', ) - scmutil.writerequires(hgvfs, requirements) + # Filter the requirements into working copy and store ones + wcreq, storereq = scmutil.filterrequirements(requirements) + # write working copy ones + scmutil.writerequires(hgvfs, wcreq) + # If there are store requirements and the current repository + # is not a shared one, write stored requirements + # For new shared repository, we don't need to write the store + # requirements as they are already present in store requires + if storereq and b'sharedrepo' not in createopts: + storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True) + scmutil.writerequires(storevfs, storereq) # Write out file telling readers where to find the shared store. if b'sharedrepo' in createopts: diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/logcmdutil.py --- a/mercurial/logcmdutil.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/logcmdutil.py Fri Sep 18 10:48:43 2020 -0400 @@ -603,12 +603,11 @@ def templatespec(tmpl, mapfile): - if pycompat.ispy3: - assert not isinstance(tmpl, str), b'tmpl must not be a str' + assert not (tmpl and mapfile) if mapfile: - return formatter.templatespec(b'changeset', tmpl, mapfile) + return formatter.mapfile_templatespec(b'changeset', mapfile) else: - return formatter.templatespec(b'', tmpl, None) + return formatter.literal_templatespec(tmpl) def _lookuptemplate(ui, tmpl, style): @@ -621,19 +620,20 @@ if not tmpl and not style: # template are stronger than style tmpl = ui.config(b'ui', b'logtemplate') if tmpl: - return templatespec(templater.unquotestring(tmpl), None) + return formatter.literal_templatespec(templater.unquotestring(tmpl)) else: style = util.expandpath(ui.config(b'ui', b'style')) if not tmpl and style: mapfile = style + fp = None if not os.path.split(mapfile)[0]: - mapname = templater.templatepath( + (mapname, fp) = templater.try_open_template( b'map-cmdline.' + mapfile - ) or templater.templatepath(mapfile) + ) or templater.try_open_template(mapfile) if mapname: mapfile = mapname - return templatespec(None, mapfile) + return formatter.mapfile_templatespec(b'changeset', mapfile, fp) return formatter.lookuptemplate(ui, b'changeset', tmpl) @@ -641,7 +641,7 @@ def maketemplater(ui, repo, tmpl, buffered=False): """Create a changesettemplater from a literal template 'tmpl' byte-string.""" - spec = templatespec(tmpl, None) + spec = formatter.literal_templatespec(tmpl) return changesettemplater(ui, repo, spec, buffered=buffered) @@ -691,39 +691,58 @@ slowpath = match.anypats() or (not match.always() and opts.get(b'removed')) if not slowpath: follow = opts.get(b'follow') or opts.get(b'follow_first') - startctxs = [] if follow and opts.get(b'rev'): + # There may be the case that a path doesn't exist in some (but + # not all) of the specified start revisions, but let's consider + # the path is valid. Missing files will be warned by the matcher. startctxs = [repo[r] for r in revs] - for f in match.files(): - if follow and startctxs: - # No idea if the path was a directory at that revision, so - # take the slow path. - if any(f not in c for c in startctxs): - slowpath = True - continue - elif follow and f not in wctx: - # If the file exists, it may be a directory, so let it - # take the slow path. - if os.path.exists(repo.wjoin(f)): - slowpath = True - continue - else: + for f in match.files(): + found = False + for c in startctxs: + if f in c: + found = True + elif c.hasdir(f): + # If a directory exists in any of the start revisions, + # take the slow path. + found = slowpath = True + if not found: raise error.Abort( _( - b'cannot follow file not in parent ' - b'revision: "%s"' + b'cannot follow file not in any of the specified ' + b'revisions: "%s"' ) % f ) - filelog = repo.file(f) - if not filelog: - # A zero count may be a directory or deleted file, so - # try to find matching entries on the slow path. - if follow: + elif follow: + for f in match.files(): + if f not in wctx: + # If the file exists, it may be a directory, so let it + # take the slow path. + if os.path.exists(repo.wjoin(f)): + slowpath = True + continue + else: + raise error.Abort( + _( + b'cannot follow file not in parent ' + b'revision: "%s"' + ) + % f + ) + filelog = repo.file(f) + if not filelog: + # A file exists in wdir but not in history, which means + # the file isn't committed yet. raise error.Abort( _(b'cannot follow nonexistent file: "%s"') % f ) - slowpath = True + else: + for f in match.files(): + filelog = repo.file(f) + if not filelog: + # A zero count may be a directory or deleted file, so + # try to find matching entries on the slow path. + slowpath = True # We decided to fall back to the slowpath because at least one # of the paths was not a file. Check to see if at least one of them diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/manifest.py --- a/mercurial/manifest.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/manifest.py Fri Sep 18 10:48:43 2020 -0400 @@ -315,16 +315,9 @@ b"Manifest values must be a tuple of (node, flags)." ) hashval = value[0] - # hashes are either 20 or 32 bytes (sha1 or its replacement), - # and allow one extra byte taht won't be persisted to disk but - # is sometimes used in memory. - if not isinstance(hashval, bytes) or not ( - 20 <= len(hashval) <= 22 or 32 <= len(hashval) <= 34 - ): + if not isinstance(hashval, bytes) or len(hashval) not in (20, 32): raise TypeError(b"node must be a 20-byte or 32-byte byte string") flags = value[1] - if len(hashval) == 22: - hashval = hashval[:-1] if not isinstance(flags, bytes) or len(flags) > 1: raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags) needle, found = self.bsearch2(key) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/merge.py --- a/mercurial/merge.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/merge.py Fri Sep 18 10:48:43 2020 -0400 @@ -7,6 +7,7 @@ from __future__ import absolute_import +import collections import errno import stat import struct @@ -126,7 +127,7 @@ return None -def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce): +def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce): """ Considers any actions that care about the presence of conflicting unknown files. For some actions, the result is to abort; for others, it is to @@ -150,20 +151,23 @@ warnconflicts.update(conflicts) checkunknowndirs = _unknowndirschecker() - for f, (m, args, msg) in pycompat.iteritems(actions): - if m in ( + for f in mresult.files( + ( mergestatemod.ACTION_CREATED, mergestatemod.ACTION_DELETED_CHANGED, - ): - if _checkunknownfile(repo, wctx, mctx, f): - fileconflicts.add(f) - elif pathconfig and f not in wctx: - path = checkunknowndirs(repo, wctx, f) - if path is not None: - pathconflicts.add(path) - elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET: - if _checkunknownfile(repo, wctx, mctx, f, args[0]): - fileconflicts.add(f) + ) + ): + if _checkunknownfile(repo, wctx, mctx, f): + fileconflicts.add(f) + elif pathconfig and f not in wctx: + path = checkunknowndirs(repo, wctx, f) + if path is not None: + pathconflicts.add(path) + for f, args, msg in mresult.getactions( + [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET] + ): + if _checkunknownfile(repo, wctx, mctx, f, args[0]): + fileconflicts.add(f) allconflicts = fileconflicts | pathconflicts ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)} @@ -171,49 +175,50 @@ collectconflicts(ignoredconflicts, ignoredconfig) collectconflicts(unknownconflicts, unknownconfig) else: - for f, (m, args, msg) in pycompat.iteritems(actions): - if m == mergestatemod.ACTION_CREATED_MERGE: - fl2, anc = args - different = _checkunknownfile(repo, wctx, mctx, f) - if repo.dirstate._ignore(f): - config = ignoredconfig - else: - config = unknownconfig + for f, args, msg in list( + mresult.getactions([mergestatemod.ACTION_CREATED_MERGE]) + ): + fl2, anc = args + different = _checkunknownfile(repo, wctx, mctx, f) + if repo.dirstate._ignore(f): + config = ignoredconfig + else: + config = unknownconfig - # The behavior when force is True is described by this table: - # config different mergeforce | action backup - # * n * | get n - # * y y | merge - - # abort y n | merge - (1) - # warn y n | warn + get y - # ignore y n | get y - # - # (1) this is probably the wrong behavior here -- we should - # probably abort, but some actions like rebases currently - # don't like an abort happening in the middle of - # merge.update. - if not different: - actions[f] = ( - mergestatemod.ACTION_GET, - (fl2, False), - b'remote created', - ) - elif mergeforce or config == b'abort': - actions[f] = ( - mergestatemod.ACTION_MERGE, - (f, f, None, False, anc), - b'remote differs from untracked local', - ) - elif config == b'abort': - abortconflicts.add(f) - else: - if config == b'warn': - warnconflicts.add(f) - actions[f] = ( - mergestatemod.ACTION_GET, - (fl2, True), - b'remote created', - ) + # The behavior when force is True is described by this table: + # config different mergeforce | action backup + # * n * | get n + # * y y | merge - + # abort y n | merge - (1) + # warn y n | warn + get y + # ignore y n | get y + # + # (1) this is probably the wrong behavior here -- we should + # probably abort, but some actions like rebases currently + # don't like an abort happening in the middle of + # merge.update. + if not different: + mresult.addfile( + f, + mergestatemod.ACTION_GET, + (fl2, False), + b'remote created', + ) + elif mergeforce or config == b'abort': + mresult.addfile( + f, + mergestatemod.ACTION_MERGE, + (f, f, None, False, anc), + b'remote differs from untracked local', + ) + elif config == b'abort': + abortconflicts.add(f) + else: + if config == b'warn': + warnconflicts.add(f) + mresult.addfile( + f, mergestatemod.ACTION_GET, (fl2, True), b'remote created', + ) for f in sorted(abortconflicts): warn = repo.ui.warn @@ -238,18 +243,19 @@ else: repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f) - for f, (m, args, msg) in pycompat.iteritems(actions): - if m == mergestatemod.ACTION_CREATED: - backup = ( - f in fileconflicts - or f in pathconflicts - or any(p in pathconflicts for p in pathutil.finddirs(f)) - ) - (flags,) = args - actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg) + for f, args, msg in list( + mresult.getactions([mergestatemod.ACTION_CREATED]) + ): + backup = ( + f in fileconflicts + or f in pathconflicts + or any(p in pathconflicts for p in pathutil.finddirs(f)) + ) + (flags,) = args + mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg) -def _forgetremoved(wctx, mctx, branchmerge): +def _forgetremoved(wctx, mctx, branchmerge, mresult): """ Forget removed files @@ -264,27 +270,22 @@ as removed. """ - actions = {} m = mergestatemod.ACTION_FORGET if branchmerge: m = mergestatemod.ACTION_REMOVE for f in wctx.deleted(): if f not in mctx: - actions[f] = m, None, b"forget deleted" + mresult.addfile(f, m, None, b"forget deleted") if not branchmerge: for f in wctx.removed(): if f not in mctx: - actions[f] = ( - mergestatemod.ACTION_FORGET, - None, - b"forget removed", + mresult.addfile( + f, mergestatemod.ACTION_FORGET, None, b"forget removed", ) - return actions - -def _checkcollision(repo, wmf, actions): +def _checkcollision(repo, wmf, mresult): """ Check for case-folding collisions. """ @@ -292,39 +293,38 @@ narrowmatch = repo.narrowmatch() if not narrowmatch.always(): pmmf = set(wmf.walk(narrowmatch)) - if actions: - narrowactions = {} - for m, actionsfortype in pycompat.iteritems(actions): - narrowactions[m] = [] - for (f, args, msg) in actionsfortype: - if narrowmatch(f): - narrowactions[m].append((f, args, msg)) - actions = narrowactions + if mresult: + for f in list(mresult.files()): + if not narrowmatch(f): + mresult.removefile(f) else: # build provisional merged manifest up pmmf = set(wmf) - if actions: + if mresult: # KEEP and EXEC are no-op - for m in ( - mergestatemod.ACTION_ADD, - mergestatemod.ACTION_ADD_MODIFIED, - mergestatemod.ACTION_FORGET, - mergestatemod.ACTION_GET, - mergestatemod.ACTION_CHANGED_DELETED, - mergestatemod.ACTION_DELETED_CHANGED, + for f in mresult.files( + ( + mergestatemod.ACTION_ADD, + mergestatemod.ACTION_ADD_MODIFIED, + mergestatemod.ACTION_FORGET, + mergestatemod.ACTION_GET, + mergestatemod.ACTION_CHANGED_DELETED, + mergestatemod.ACTION_DELETED_CHANGED, + ) ): - for f, args, msg in actions[m]: - pmmf.add(f) - for f, args, msg in actions[mergestatemod.ACTION_REMOVE]: + pmmf.add(f) + for f in mresult.files((mergestatemod.ACTION_REMOVE,)): pmmf.discard(f) - for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]: + for f, args, msg in mresult.getactions( + [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL] + ): f2, flags = args pmmf.discard(f2) pmmf.add(f) - for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]: + for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)): pmmf.add(f) - for f, args, msg in actions[mergestatemod.ACTION_MERGE]: + for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]): f1, f2, fa, move, anc = args if move: pmmf.discard(f1) @@ -382,7 +382,7 @@ break -def checkpathconflicts(repo, wctx, mctx, actions): +def checkpathconflicts(repo, wctx, mctx, mresult): """ Check if any actions introduce path conflicts in the repository, updating actions to record or handle the path conflict accordingly. @@ -407,30 +407,33 @@ # The set of files deleted by all the actions. deletedfiles = set() - for f, (m, args, msg) in actions.items(): - if m in ( + for f in mresult.files( + ( mergestatemod.ACTION_CREATED, mergestatemod.ACTION_DELETED_CHANGED, mergestatemod.ACTION_MERGE, mergestatemod.ACTION_CREATED_MERGE, - ): - # This action may create a new local file. - createdfiledirs.update(pathutil.finddirs(f)) - if mf.hasdir(f): - # The file aliases a local directory. This might be ok if all - # the files in the local directory are being deleted. This - # will be checked once we know what all the deleted files are. - remoteconflicts.add(f) - # Track the names of all deleted files. - if m == mergestatemod.ACTION_REMOVE: - deletedfiles.add(f) - if m == mergestatemod.ACTION_MERGE: - f1, f2, fa, move, anc = args - if move: - deletedfiles.add(f1) - if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL: - f2, flags = args - deletedfiles.add(f2) + ) + ): + # This action may create a new local file. + createdfiledirs.update(pathutil.finddirs(f)) + if mf.hasdir(f): + # The file aliases a local directory. This might be ok if all + # the files in the local directory are being deleted. This + # will be checked once we know what all the deleted files are. + remoteconflicts.add(f) + # Track the names of all deleted files. + for f in mresult.files((mergestatemod.ACTION_REMOVE,)): + deletedfiles.add(f) + for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)): + f1, f2, fa, move, anc = args + if move: + deletedfiles.add(f1) + for (f, args, msg) in mresult.getactions( + (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,) + ): + f2, flags = args + deletedfiles.add(f2) # Check all directories that contain created files for path conflicts. for p in createdfiledirs: @@ -444,7 +447,8 @@ # A file is in a directory which aliases a local file. # We will need to rename the local file. localconflicts.add(p) - if p in actions and actions[p][0] in ( + pd = mresult.getfile(p) + if pd and pd[0] in ( mergestatemod.ACTION_CREATED, mergestatemod.ACTION_DELETED_CHANGED, mergestatemod.ACTION_MERGE, @@ -459,14 +463,16 @@ for p in localconflicts: if p not in deletedfiles: ctxname = bytes(wctx).rstrip(b'+') - pnew = util.safename(p, ctxname, wctx, set(actions.keys())) + pnew = util.safename(p, ctxname, wctx, set(mresult.files())) porig = wctx[p].copysource() or p - actions[pnew] = ( + mresult.addfile( + pnew, mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, (p, porig), b'local path conflict', ) - actions[p] = ( + mresult.addfile( + p, mergestatemod.ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict', @@ -477,23 +483,25 @@ ctxname = bytes(mctx).rstrip(b'+') for f, p in _filesindirs(repo, mf, remoteconflicts): if f not in deletedfiles: - m, args, msg = actions[p] - pnew = util.safename(p, ctxname, wctx, set(actions.keys())) + m, args, msg = mresult.getfile(p) + pnew = util.safename(p, ctxname, wctx, set(mresult.files())) if m in ( mergestatemod.ACTION_DELETED_CHANGED, mergestatemod.ACTION_MERGE, ): # Action was merge, just update target. - actions[pnew] = (m, args, msg) + mresult.addfile(pnew, m, args, msg) else: # Action was create, change to renamed get action. fl = args[0] - actions[pnew] = ( + mresult.addfile( + pnew, mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, (p, fl), b'remote path conflict', ) - actions[p] = ( + mresult.addfile( + p, mergestatemod.ACTION_PATH_CONFLICT, (pnew, mergestatemod.ACTION_REMOVE), b'path conflict', @@ -507,24 +515,33 @@ raise error.Abort(_(b"destination manifest contains path conflicts")) -def _filternarrowactions(narrowmatch, branchmerge, actions): +def _filternarrowactions(narrowmatch, branchmerge, mresult): """ Filters out actions that can ignored because the repo is narrowed. Raise an exception if the merge cannot be completed because the repo is narrowed. """ - nooptypes = {b'k'} # TODO: handle with nonconflicttypes - nonconflicttypes = set(b'a am c cm f g gs r e'.split()) + # TODO: handle with nonconflicttypes + nonconflicttypes = { + mergestatemod.ACTION_ADD, + mergestatemod.ACTION_ADD_MODIFIED, + mergestatemod.ACTION_CREATED, + mergestatemod.ACTION_CREATED_MERGE, + mergestatemod.ACTION_FORGET, + mergestatemod.ACTION_GET, + mergestatemod.ACTION_REMOVE, + mergestatemod.ACTION_EXEC, + } # We mutate the items in the dict during iteration, so iterate # over a copy. - for f, action in list(actions.items()): + for f, action in mresult.filemap(): if narrowmatch(f): pass elif not branchmerge: - del actions[f] # just updating, ignore changes outside clone - elif action[0] in nooptypes: - del actions[f] # merge does not affect file + mresult.removefile(f) # just updating, ignore changes outside clone + elif action[0] in mergeresult.NO_OP_ACTIONS: + mresult.removefile(f) # merge does not affect file elif action[0] in nonconflicttypes: raise error.Abort( _( @@ -540,6 +557,176 @@ ) +class mergeresult(object): + ''''An object representing result of merging manifests. + + It has information about what actions need to be performed on dirstate + mapping of divergent renames and other such cases. ''' + + NO_OP_ACTIONS = ( + mergestatemod.ACTION_KEEP, + mergestatemod.ACTION_KEEP_ABSENT, + ) + + def __init__(self): + """ + filemapping: dict of filename as keys and action related info as values + diverge: mapping of source name -> list of dest name for + divergent renames + renamedelete: mapping of source name -> list of destinations for files + deleted on one side and renamed on other. + commitinfo: dict containing data which should be used on commit + contains a filename -> info mapping + actionmapping: dict of action names as keys and values are dict of + filename as key and related data as values + """ + self._filemapping = {} + self._diverge = {} + self._renamedelete = {} + self._commitinfo = collections.defaultdict(dict) + self._actionmapping = collections.defaultdict(dict) + + def updatevalues(self, diverge, renamedelete): + self._diverge = diverge + self._renamedelete = renamedelete + + def addfile(self, filename, action, data, message): + """ adds a new file to the mergeresult object + + filename: file which we are adding + action: one of mergestatemod.ACTION_* + data: a tuple of information like fctx and ctx related to this merge + message: a message about the merge + """ + # if the file already existed, we need to delete it's old + # entry form _actionmapping too + if filename in self._filemapping: + a, d, m = self._filemapping[filename] + del self._actionmapping[a][filename] + + self._filemapping[filename] = (action, data, message) + self._actionmapping[action][filename] = (data, message) + + def getfile(self, filename, default_return=None): + """ returns (action, args, msg) about this file + + returns default_return if the file is not present """ + if filename in self._filemapping: + return self._filemapping[filename] + return default_return + + def files(self, actions=None): + """ returns files on which provided action needs to perfromed + + If actions is None, all files are returned + """ + # TODO: think whether we should return renamedelete and + # diverge filenames also + if actions is None: + for f in self._filemapping: + yield f + + else: + for a in actions: + for f in self._actionmapping[a]: + yield f + + def removefile(self, filename): + """ removes a file from the mergeresult object as the file might + not merging anymore """ + action, data, message = self._filemapping[filename] + del self._filemapping[filename] + del self._actionmapping[action][filename] + + def getactions(self, actions, sort=False): + """ get list of files which are marked with these actions + if sort is true, files for each action is sorted and then added + + Returns a list of tuple of form (filename, data, message) + """ + for a in actions: + if sort: + for f in sorted(self._actionmapping[a]): + args, msg = self._actionmapping[a][f] + yield f, args, msg + else: + for f, (args, msg) in pycompat.iteritems( + self._actionmapping[a] + ): + yield f, args, msg + + def len(self, actions=None): + """ returns number of files which needs actions + + if actions is passed, total of number of files in that action + only is returned """ + + if actions is None: + return len(self._filemapping) + + return sum(len(self._actionmapping[a]) for a in actions) + + def filemap(self, sort=False): + if sorted: + for key, val in sorted(pycompat.iteritems(self._filemapping)): + yield key, val + else: + for key, val in pycompat.iteritems(self._filemapping): + yield key, val + + def addcommitinfo(self, filename, key, value): + """ adds key-value information about filename which will be required + while committing this merge """ + self._commitinfo[filename][key] = value + + @property + def diverge(self): + return self._diverge + + @property + def renamedelete(self): + return self._renamedelete + + @property + def commitinfo(self): + return self._commitinfo + + @property + def actionsdict(self): + """ returns a dictionary of actions to be perfomed with action as key + and a list of files and related arguments as values """ + res = collections.defaultdict(list) + for a, d in pycompat.iteritems(self._actionmapping): + for f, (args, msg) in pycompat.iteritems(d): + res[a].append((f, args, msg)) + return res + + def setactions(self, actions): + self._filemapping = actions + self._actionmapping = collections.defaultdict(dict) + for f, (act, data, msg) in pycompat.iteritems(self._filemapping): + self._actionmapping[act][f] = data, msg + + def hasconflicts(self): + """ tells whether this merge resulted in some actions which can + result in conflicts or not """ + for a in self._actionmapping.keys(): + if ( + a + not in ( + mergestatemod.ACTION_GET, + mergestatemod.ACTION_EXEC, + mergestatemod.ACTION_REMOVE, + mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, + ) + and self._actionmapping[a] + and a not in self.NO_OP_ACTIONS + ): + return True + + return False + + def manifestmerge( repo, wctx, @@ -559,13 +746,9 @@ matcher = matcher to filter file lists acceptremote = accept the incoming changes without prompting - Returns: - - actions: dict of filename as keys and action related info as values - diverge: mapping of source name -> list of dest name for divergent renames - renamedelete: mapping of source name -> list of destinations for files - deleted on one side and renamed on other. + Returns an object of mergeresult class """ + mresult = mergeresult() if matcher is not None and matcher.always(): matcher = None @@ -578,6 +761,9 @@ branch_copies1 = copies.branch_copies() branch_copies2 = copies.branch_copies() diverge = {} + # information from merge which is needed at commit time + # for example choosing filelog of which parent to commit + # TODO: use specific constants in future for this mapping if followcopies: branch_copies1, branch_copies2, diverge = copies.mergecopies( repo, wctx, p2, pa @@ -626,7 +812,6 @@ diff = m1.diff(m2, match=matcher) - actions = {} for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff): if n1 and n2: # file exists on both local and remote side if f not in ma: @@ -634,59 +819,60 @@ fa = branch_copies1.copy.get( f, None ) or branch_copies2.copy.get(f, None) + args, msg = None, None if fa is not None: - actions[f] = ( - mergestatemod.ACTION_MERGE, - (f, f, fa, False, pa.node()), - b'both renamed from %s' % fa, - ) + args = (f, f, fa, False, pa.node()) + msg = b'both renamed from %s' % fa else: - actions[f] = ( - mergestatemod.ACTION_MERGE, - (f, f, None, False, pa.node()), - b'both created', - ) + args = (f, f, None, False, pa.node()) + msg = b'both created' + mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg) else: a = ma[f] fla = ma.flags(f) nol = b'l' not in fl1 + fl2 + fla if n2 == a and fl2 == fla: - actions[f] = ( - mergestatemod.ACTION_KEEP, - (), - b'remote unchanged', + mresult.addfile( + f, mergestatemod.ACTION_KEEP, (), b'remote unchanged', ) elif n1 == a and fl1 == fla: # local unchanged - use remote if n1 == n2: # optimization: keep local content - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_EXEC, (fl2,), b'update permissions', ) else: - actions[f] = ( - mergestatemod.ACTION_GET_OTHER_AND_STORE - if branchmerge - else mergestatemod.ACTION_GET, + mresult.addfile( + f, + mergestatemod.ACTION_GET, (fl2, False), b'remote is newer', ) + if branchmerge: + mresult.addcommitinfo( + f, b'filenode-source', b'other' + ) elif nol and n2 == a: # remote only changed 'x' - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_EXEC, (fl2,), b'update permissions', ) elif nol and n1 == a: # local only changed 'x' - actions[f] = ( - mergestatemod.ACTION_GET_OTHER_AND_STORE - if branchmerge - else mergestatemod.ACTION_GET, + mresult.addfile( + f, + mergestatemod.ACTION_GET, (fl1, False), b'remote is newer', ) + if branchmerge: + mresult.addcommitinfo(f, b'filenode-source', b'other') else: # both changed something - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_MERGE, (f, f, f, False, pa.node()), b'versions differ', @@ -699,20 +885,23 @@ ): # directory rename, move local f2 = branch_copies1.movewithdir[f] if f2 in m2: - actions[f2] = ( + mresult.addfile( + f2, mergestatemod.ACTION_MERGE, (f, f2, None, True, pa.node()), b'remote directory rename, both created', ) else: - actions[f2] = ( + mresult.addfile( + f2, mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1), b'remote directory rename - move from %s' % f, ) elif f in branch_copies1.copy: f2 = branch_copies1.copy[f] - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_MERGE, (f, f2, f2, False, pa.node()), b'local copied/moved from %s' % f2, @@ -720,13 +909,15 @@ elif f in ma: # clean, a different, no remote if n1 != ma[f]: if acceptremote: - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_REMOVE, None, b'remote delete', ) else: - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_CHANGED_DELETED, (f, None, f, False, pa.node()), b'prompt changed/deleted', @@ -734,48 +925,50 @@ elif n1 == addednodeid: # This file was locally added. We should forget it instead of # deleting it. - actions[f] = ( - mergestatemod.ACTION_FORGET, - None, - b'remote deleted', + mresult.addfile( + f, mergestatemod.ACTION_FORGET, None, b'remote deleted', ) else: - actions[f] = ( - mergestatemod.ACTION_REMOVE, - None, - b'other deleted', + mresult.addfile( + f, mergestatemod.ACTION_REMOVE, None, b'other deleted', ) + else: # file not in ancestor, not in remote + mresult.addfile( + f, + mergestatemod.ACTION_KEEP, + None, + b'ancestor missing, remote missing', + ) + elif n2: # file exists only on remote side if f in copied1: pass # we'll deal with it on m1 side elif f in branch_copies2.movewithdir: f2 = branch_copies2.movewithdir[f] if f2 in m1: - actions[f2] = ( + mresult.addfile( + f2, mergestatemod.ACTION_MERGE, (f2, f, None, False, pa.node()), b'local directory rename, both created', ) else: - actions[f2] = ( + mresult.addfile( + f2, mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, (f, fl2), b'local directory rename - get from %s' % f, ) elif f in branch_copies2.copy: f2 = branch_copies2.copy[f] + msg, args = None, None if f2 in m2: - actions[f] = ( - mergestatemod.ACTION_MERGE, - (f2, f, f2, False, pa.node()), - b'remote copied from %s' % f2, - ) + args = (f2, f, f2, False, pa.node()) + msg = b'remote copied from %s' % f2 else: - actions[f] = ( - mergestatemod.ACTION_MERGE, - (f2, f, f2, True, pa.node()), - b'remote moved from %s' % f2, - ) + args = (f2, f, f2, True, pa.node()) + msg = b'remote moved from %s' % f2 + mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg) elif f not in ma: # local unknown, remote created: the logic is described by the # following table: @@ -789,19 +982,22 @@ # Checking whether the files are different is expensive, so we # don't do that when we can avoid it. if not force: - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_CREATED, (fl2,), b'remote created', ) elif not branchmerge: - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_CREATED, (fl2,), b'remote created', ) else: - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_CREATED_MERGE, (fl2, pa.node()), b'remote created, get or merge', @@ -814,60 +1010,67 @@ df = branch_copies1.dirmove[d] + f[len(d) :] break if df is not None and df in m1: - actions[df] = ( + mresult.addfile( + df, mergestatemod.ACTION_MERGE, (df, f, f, False, pa.node()), b'local directory rename - respect move ' b'from %s' % f, ) elif acceptremote: - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_CREATED, (fl2,), b'remote recreating', ) else: - actions[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_DELETED_CHANGED, (None, f, f, False, pa.node()), b'prompt deleted/changed', ) + else: + mresult.addfile( + f, + mergestatemod.ACTION_KEEP_ABSENT, + None, + b'local not present, remote unchanged', + ) if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'): # If we are merging, look for path conflicts. - checkpathconflicts(repo, wctx, p2, actions) + checkpathconflicts(repo, wctx, p2, mresult) narrowmatch = repo.narrowmatch() if not narrowmatch.always(): # Updates "actions" in place - _filternarrowactions(narrowmatch, branchmerge, actions) + _filternarrowactions(narrowmatch, branchmerge, mresult) renamedelete = branch_copies1.renamedelete renamedelete.update(branch_copies2.renamedelete) - return actions, diverge, renamedelete + mresult.updatevalues(diverge, renamedelete) + return mresult -def _resolvetrivial(repo, wctx, mctx, ancestor, actions): +def _resolvetrivial(repo, wctx, mctx, ancestor, mresult): """Resolves false conflicts where the nodeid changed but the content remained the same.""" # We force a copy of actions.items() because we're going to mutate # actions as we resolve trivial conflicts. - for f, (m, args, msg) in list(actions.items()): - if ( - m == mergestatemod.ACTION_CHANGED_DELETED - and f in ancestor - and not wctx[f].cmp(ancestor[f]) - ): + for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))): + if f in ancestor and not wctx[f].cmp(ancestor[f]): # local did change but ended up with same content - actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same' - elif ( - m == mergestatemod.ACTION_DELETED_CHANGED - and f in ancestor - and not mctx[f].cmp(ancestor[f]) - ): + mresult.addfile( + f, mergestatemod.ACTION_REMOVE, None, b'prompt same' + ) + + for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))): + if f in ancestor and not mctx[f].cmp(ancestor[f]): # remote did change but ended up with same content - del actions[f] # don't get = keep local deleted + mresult.removefile(f) # don't get = keep local deleted def calculateupdates( @@ -891,13 +1094,14 @@ Also filters out actions which are unrequired if repository is sparse. - Returns same 3 element tuple as manifestmerge(). + Returns mergeresult object same as manifestmerge(). """ # Avoid cycle. from . import sparse + mresult = None if len(ancestors) == 1: # default - actions, diverge, renamedelete = manifestmerge( + mresult = manifestmerge( repo, wctx, mctx, @@ -908,7 +1112,7 @@ acceptremote, followcopies, ) - _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) + _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce) else: # only when merge.preferancestor=* - the default repo.ui.note( @@ -920,14 +1124,17 @@ ) ) - # Call for bids - fbids = ( - {} - ) # mapping filename to bids (action method to list af actions) + # mapping filename to bids (action method to list af actions) + # {FILENAME1 : BID1, FILENAME2 : BID2} + # BID is another dictionary which contains + # mapping of following form: + # {ACTION_X : [info, ..], ACTION_Y : [info, ..]} + fbids = {} + mresult = mergeresult() diverge, renamedelete = None, None for ancestor in ancestors: repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor) - actions, diverge1, renamedelete1 = manifestmerge( + mresult1 = manifestmerge( repo, wctx, mctx, @@ -939,19 +1146,25 @@ followcopies, forcefulldiff=True, ) - _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) + _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce) # Track the shortest set of warning on the theory that bid # merge will correctly incorporate more information - if diverge is None or len(diverge1) < len(diverge): - diverge = diverge1 - if renamedelete is None or len(renamedelete) < len(renamedelete1): - renamedelete = renamedelete1 + if diverge is None or len(mresult1.diverge) < len(diverge): + diverge = mresult1.diverge + if renamedelete is None or len(renamedelete) < len( + mresult1.renamedelete + ): + renamedelete = mresult1.renamedelete - for f, a in sorted(pycompat.iteritems(actions)): + # blindly update final mergeresult commitinfo with what we get + # from mergeresult object for each ancestor + # TODO: some commitinfo depends on what bid merge choose and hence + # we will need to make commitinfo also depend on bid merge logic + mresult._commitinfo.update(mresult1._commitinfo) + + for f, a in mresult1.filemap(sort=True): m, args, msg = a - if m == mergestatemod.ACTION_GET_OTHER_AND_STORE: - m = mergestatemod.ACTION_GET repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m)) if f in fbids: d = fbids[f] @@ -962,29 +1175,42 @@ else: fbids[f] = {m: [a]} + # Call for bids # Pick the best bid for each file - repo.ui.note(_(b'\nauction for merging merge bids\n')) - actions = {} + repo.ui.note( + _(b'\nauction for merging merge bids (%d ancestors)\n') + % len(ancestors) + ) for f, bids in sorted(fbids.items()): + if repo.ui.debugflag: + repo.ui.debug(b" list of bids for %s:\n" % f) + for m, l in sorted(bids.items()): + for _f, args, msg in l: + repo.ui.debug(b' %s -> %s\n' % (msg, m)) # bids is a mapping from action method to list af actions # Consensus? if len(bids) == 1: # all bids are the same kind of method m, l = list(bids.items())[0] if all(a == l[0] for a in l[1:]): # len(bids) is > 1 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m)) - actions[f] = l[0] + mresult.addfile(f, *l[0]) continue # If keep is an option, just do it. if mergestatemod.ACTION_KEEP in bids: repo.ui.note(_(b" %s: picking 'keep' action\n") % f) - actions[f] = bids[mergestatemod.ACTION_KEEP][0] + mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0]) + continue + # If keep absent is an option, just do that + if mergestatemod.ACTION_KEEP_ABSENT in bids: + repo.ui.note(_(b" %s: picking 'keep absent' action\n") % f) + mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP_ABSENT][0]) continue # If there are gets and they all agree [how could they not?], do it. if mergestatemod.ACTION_GET in bids: ga0 = bids[mergestatemod.ACTION_GET][0] if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]): repo.ui.note(_(b" %s: picking 'get' action\n") % f) - actions[f] = ga0 + mresult.addfile(f, *ga0) continue # TODO: Consider other simple actions such as mode changes # Handle inefficient democrazy. @@ -997,20 +1223,18 @@ repo.ui.warn( _(b' %s: ambiguous merge - picked %s action\n') % (f, m) ) - actions[f] = l[0] + mresult.addfile(f, *l[0]) continue repo.ui.note(_(b'end of auction\n\n')) + mresult.updatevalues(diverge, renamedelete) if wctx.rev() is None: - fractions = _forgetremoved(wctx, mctx, branchmerge) - actions.update(fractions) + _forgetremoved(wctx, mctx, branchmerge, mresult) - prunedactions = sparse.filterupdatesactions( - repo, wctx, mctx, branchmerge, actions - ) - _resolvetrivial(repo, wctx, mctx, ancestors[0], actions) + sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult) + _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult) - return prunedactions, diverge, renamedelete + return mresult def _getcwd(): @@ -1117,34 +1341,26 @@ yield True, filedata -def _prefetchfiles(repo, ctx, actions): +def _prefetchfiles(repo, ctx, mresult): """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict of merge actions. ``ctx`` is the context being merged in.""" # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they # don't touch the context to be merged in. 'cd' is skipped, because # changed/deleted never resolves to something from the remote side. - oplist = [ - actions[a] - for a in ( + files = mresult.files( + [ mergestatemod.ACTION_GET, mergestatemod.ACTION_DELETED_CHANGED, mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, mergestatemod.ACTION_MERGE, - ) - ] + ] + ) + prefetch = scmutil.prefetchfiles matchfiles = scmutil.matchfiles prefetch( - repo, - [ - ( - ctx.rev(), - matchfiles( - repo, [f for sublist in oplist for f, args, msg in sublist] - ), - ) - ], + repo, [(ctx.rev(), matchfiles(repo, files),)], ) @@ -1164,35 +1380,12 @@ ) -def emptyactions(): - """create an actions dict, to be populated and passed to applyupdates()""" - return { - m: [] - for m in ( - mergestatemod.ACTION_ADD, - mergestatemod.ACTION_ADD_MODIFIED, - mergestatemod.ACTION_FORGET, - mergestatemod.ACTION_GET, - mergestatemod.ACTION_CHANGED_DELETED, - mergestatemod.ACTION_DELETED_CHANGED, - mergestatemod.ACTION_REMOVE, - mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL, - mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, - mergestatemod.ACTION_MERGE, - mergestatemod.ACTION_EXEC, - mergestatemod.ACTION_KEEP, - mergestatemod.ACTION_PATH_CONFLICT, - mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, - mergestatemod.ACTION_GET_OTHER_AND_STORE, - ) - } - - def applyupdates( - repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None + repo, mresult, wctx, mctx, overwrite, wantfiledata, labels=None, ): """apply the merge action list to the working directory + mresult is a mergeresult object representing result of the merge wctx is the working copy context mctx is the context to be merged into the working copy @@ -1202,25 +1395,177 @@ batchget. """ - _prefetchfiles(repo, mctx, actions) + _prefetchfiles(repo, mctx, mresult) updated, merged, removed = 0, 0, 0 - ms = mergestatemod.mergestate.clean( - repo, wctx.p1().node(), mctx.node(), labels + ms = wctx.mergestate(clean=True) + ms.start(wctx.p1().node(), mctx.node(), labels) + + for f, op in pycompat.iteritems(mresult.commitinfo): + # the other side of filenode was choosen while merging, store this in + # mergestate so that it can be reused on commit + ms.addcommitinfo(f, op) + + numupdates = mresult.len() - mresult.len(mergeresult.NO_OP_ACTIONS) + progress = repo.ui.makeprogress( + _(b'updating'), unit=_(b'files'), total=numupdates ) - # add ACTION_GET_OTHER_AND_STORE to mergestate - for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]: - ms.addmergedother(e[0]) + if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]: + subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) + + # record path conflicts + for f, args, msg in mresult.getactions( + [mergestatemod.ACTION_PATH_CONFLICT], sort=True + ): + f1, fo = args + s = repo.ui.status + s( + _( + b"%s: path conflict - a file or link has the same name as a " + b"directory\n" + ) + % f + ) + if fo == b'l': + s(_(b"the local file has been renamed to %s\n") % f1) + else: + s(_(b"the remote file has been renamed to %s\n") % f1) + s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f) + ms.addpathconflict(f, f1, fo) + progress.increment(item=f) + + # When merging in-memory, we can't support worker processes, so set the + # per-item cost at 0 in that case. + cost = 0 if wctx.isinmemory() else 0.001 + + # remove in parallel (must come before resolving path conflicts and getting) + prog = worker.worker( + repo.ui, + cost, + batchremove, + (repo, wctx), + list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)), + ) + for i, item in prog: + progress.increment(step=i, item=item) + removed = mresult.len((mergestatemod.ACTION_REMOVE,)) + + # resolve path conflicts (must come before getting) + for f, args, msg in mresult.getactions( + [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True + ): + repo.ui.debug(b" %s: %s -> pr\n" % (f, msg)) + (f0, origf0) = args + if wctx[f0].lexists(): + repo.ui.note(_(b"moving %s to %s\n") % (f0, f)) + wctx[f].audit() + wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags()) + wctx[f0].remove() + progress.increment(item=f) + + # get in parallel. + threadsafe = repo.ui.configbool( + b'experimental', b'worker.wdir-get-thread-safe' + ) + prog = worker.worker( + repo.ui, + cost, + batchget, + (repo, mctx, wctx, wantfiledata), + list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)), + threadsafe=threadsafe, + hasretval=True, + ) + getfiledata = {} + for final, res in prog: + if final: + getfiledata = res + else: + i, item = res + progress.increment(step=i, item=item) + + if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]: + subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) + + # forget (manifest only, just log it) (must come first) + for f, args, msg in mresult.getactions( + (mergestatemod.ACTION_FORGET,), sort=True + ): + repo.ui.debug(b" %s: %s -> f\n" % (f, msg)) + progress.increment(item=f) + + # re-add (manifest only, just log it) + for f, args, msg in mresult.getactions( + (mergestatemod.ACTION_ADD,), sort=True + ): + repo.ui.debug(b" %s: %s -> a\n" % (f, msg)) + progress.increment(item=f) + + # re-add/mark as modified (manifest only, just log it) + for f, args, msg in mresult.getactions( + (mergestatemod.ACTION_ADD_MODIFIED,), sort=True + ): + repo.ui.debug(b" %s: %s -> am\n" % (f, msg)) + progress.increment(item=f) + + # keep (noop, just log it) + for f, args, msg in mresult.getactions( + (mergestatemod.ACTION_KEEP,), sort=True + ): + repo.ui.debug(b" %s: %s -> k\n" % (f, msg)) + # no progress + for f, args, msg in mresult.getactions( + (mergestatemod.ACTION_KEEP_ABSENT,), sort=True + ): + repo.ui.debug(b" %s: %s -> ka\n" % (f, msg)) + # no progress + + # directory rename, move local + for f, args, msg in mresult.getactions( + (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True + ): + repo.ui.debug(b" %s: %s -> dm\n" % (f, msg)) + progress.increment(item=f) + f0, flags = args + repo.ui.note(_(b"moving %s to %s\n") % (f0, f)) + wctx[f].audit() + wctx[f].write(wctx.filectx(f0).data(), flags) + wctx[f0].remove() + + # local directory rename, get + for f, args, msg in mresult.getactions( + (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True + ): + repo.ui.debug(b" %s: %s -> dg\n" % (f, msg)) + progress.increment(item=f) + f0, flags = args + repo.ui.note(_(b"getting %s to %s\n") % (f0, f)) + wctx[f].write(mctx.filectx(f0).data(), flags) + + # exec + for f, args, msg in mresult.getactions( + (mergestatemod.ACTION_EXEC,), sort=True + ): + repo.ui.debug(b" %s: %s -> e\n" % (f, msg)) + progress.increment(item=f) + (flags,) = args + wctx[f].audit() + wctx[f].setflags(b'l' in flags, b'x' in flags) moves = [] - for m, l in actions.items(): - l.sort() # 'cd' and 'dc' actions are treated like other merge conflicts - mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED]) - mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED])) - mergeactions.extend(actions[mergestatemod.ACTION_MERGE]) + mergeactions = list( + mresult.getactions( + [ + mergestatemod.ACTION_CHANGED_DELETED, + mergestatemod.ACTION_DELETED_CHANGED, + mergestatemod.ACTION_MERGE, + ], + sort=True, + ) + ) for f, args, msg in mergeactions: f1, f2, fa, move, anc = args if f == b'.hgsubstate': # merged internally @@ -1251,150 +1596,21 @@ wctx[f].audit() wctx[f].remove() - numupdates = sum( - len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP - ) - progress = repo.ui.makeprogress( - _(b'updating'), unit=_(b'files'), total=numupdates - ) - - if [ - a - for a in actions[mergestatemod.ACTION_REMOVE] - if a[0] == b'.hgsubstate' - ]: - subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) - - # record path conflicts - for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]: - f1, fo = args - s = repo.ui.status - s( - _( - b"%s: path conflict - a file or link has the same name as a " - b"directory\n" - ) - % f + # these actions updates the file + updated = mresult.len( + ( + mergestatemod.ACTION_GET, + mergestatemod.ACTION_EXEC, + mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, + mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL, ) - if fo == b'l': - s(_(b"the local file has been renamed to %s\n") % f1) - else: - s(_(b"the remote file has been renamed to %s\n") % f1) - s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f) - ms.addpathconflict(f, f1, fo) - progress.increment(item=f) - - # When merging in-memory, we can't support worker processes, so set the - # per-item cost at 0 in that case. - cost = 0 if wctx.isinmemory() else 0.001 - - # remove in parallel (must come before resolving path conflicts and getting) - prog = worker.worker( - repo.ui, - cost, - batchremove, - (repo, wctx), - actions[mergestatemod.ACTION_REMOVE], - ) - for i, item in prog: - progress.increment(step=i, item=item) - removed = len(actions[mergestatemod.ACTION_REMOVE]) - - # resolve path conflicts (must come before getting) - for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]: - repo.ui.debug(b" %s: %s -> pr\n" % (f, msg)) - (f0, origf0) = args - if wctx[f0].lexists(): - repo.ui.note(_(b"moving %s to %s\n") % (f0, f)) - wctx[f].audit() - wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags()) - wctx[f0].remove() - progress.increment(item=f) - - # get in parallel. - threadsafe = repo.ui.configbool( - b'experimental', b'worker.wdir-get-thread-safe' ) - prog = worker.worker( - repo.ui, - cost, - batchget, - (repo, mctx, wctx, wantfiledata), - actions[mergestatemod.ACTION_GET], - threadsafe=threadsafe, - hasretval=True, - ) - getfiledata = {} - for final, res in prog: - if final: - getfiledata = res - else: - i, item = res - progress.increment(step=i, item=item) - updated = len(actions[mergestatemod.ACTION_GET]) - - if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']: - subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) - - # forget (manifest only, just log it) (must come first) - for f, args, msg in actions[mergestatemod.ACTION_FORGET]: - repo.ui.debug(b" %s: %s -> f\n" % (f, msg)) - progress.increment(item=f) - - # re-add (manifest only, just log it) - for f, args, msg in actions[mergestatemod.ACTION_ADD]: - repo.ui.debug(b" %s: %s -> a\n" % (f, msg)) - progress.increment(item=f) - - # re-add/mark as modified (manifest only, just log it) - for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]: - repo.ui.debug(b" %s: %s -> am\n" % (f, msg)) - progress.increment(item=f) - - # keep (noop, just log it) - for f, args, msg in actions[mergestatemod.ACTION_KEEP]: - repo.ui.debug(b" %s: %s -> k\n" % (f, msg)) - # no progress - - # directory rename, move local - for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]: - repo.ui.debug(b" %s: %s -> dm\n" % (f, msg)) - progress.increment(item=f) - f0, flags = args - repo.ui.note(_(b"moving %s to %s\n") % (f0, f)) - wctx[f].audit() - wctx[f].write(wctx.filectx(f0).data(), flags) - wctx[f0].remove() - updated += 1 - - # local directory rename, get - for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]: - repo.ui.debug(b" %s: %s -> dg\n" % (f, msg)) - progress.increment(item=f) - f0, flags = args - repo.ui.note(_(b"getting %s to %s\n") % (f0, f)) - wctx[f].write(mctx.filectx(f0).data(), flags) - updated += 1 - - # exec - for f, args, msg in actions[mergestatemod.ACTION_EXEC]: - repo.ui.debug(b" %s: %s -> e\n" % (f, msg)) - progress.increment(item=f) - (flags,) = args - wctx[f].audit() - wctx[f].setflags(b'l' in flags, b'x' in flags) - updated += 1 - # the ordering is important here -- ms.mergedriver will raise if the merge # driver has changed, and we want to be able to bypass it when overwrite is # True usemergedriver = not overwrite and mergeactions and ms.mergedriver if usemergedriver: - if wctx.isinmemory(): - raise error.InMemoryMergeConflictsError( - b"in-memory merge does not support mergedriver" - ) ms.commit() proceed = driverpreprocess(repo, ms, wctx, labels=labels) # the driver might leave some files unresolved @@ -1458,9 +1674,12 @@ extraactions = ms.actions() if extraactions: - mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]} + mfiles = { + a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,)) + } for k, acts in pycompat.iteritems(extraactions): - actions[k].extend(acts) + for a in acts: + mresult.addfile(a[0], k, *a[1:]) if k == mergestatemod.ACTION_GET and wantfiledata: # no filedata until mergestate is updated to provide it for a in acts: @@ -1483,13 +1702,13 @@ # those lists aren't consulted again. mfiles.difference_update(a[0] for a in acts) - actions[mergestatemod.ACTION_MERGE] = [ - a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles - ] + for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))): + if a[0] not in mfiles: + mresult.removefile(a[0]) progress.complete() assert len(getfiledata) == ( - len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0 + mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0 ) return updateresult(updated, merged, removed, unresolved), getfiledata @@ -1509,6 +1728,15 @@ fsmonitorthreshold = repo.ui.configint( b'fsmonitor', b'warn_update_file_count' ) + # avoid cycle dirstate -> sparse -> merge -> dirstate + from . import dirstate + + if dirstate.rustmod is not None: + # When using rust status, fsmonitor becomes necessary at higher sizes + fsmonitorthreshold = repo.ui.configint( + b'fsmonitor', b'warn_update_file_count_rust', + ) + try: # avoid cycle: extensions -> cmdutil -> merge from . import extensions @@ -1663,7 +1891,7 @@ if not overwrite: if len(pl) > 1: raise error.Abort(_(b"outstanding uncommitted merge")) - ms = mergestatemod.mergestate.read(repo) + ms = wc.mergestate() if list(ms.unresolved()): raise error.Abort( _(b"outstanding merge conflicts"), @@ -1734,7 +1962,7 @@ followcopies = False ### calculate phase - actionbyfile, diverge, renamedelete = calculateupdates( + mresult = calculateupdates( repo, wc, p2, @@ -1748,25 +1976,18 @@ ) if updatecheck == UPDATECHECK_NO_CONFLICT: - for f, (m, args, msg) in pycompat.iteritems(actionbyfile): - if m not in ( - mergestatemod.ACTION_GET, - mergestatemod.ACTION_KEEP, - mergestatemod.ACTION_EXEC, - mergestatemod.ACTION_REMOVE, - mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, - mergestatemod.ACTION_GET_OTHER_AND_STORE, - ): - msg = _(b"conflicting changes") - hint = _(b"commit or update --clean to discard changes") - raise error.Abort(msg, hint=hint) + if mresult.hasconflicts(): + msg = _(b"conflicting changes") + hint = _(b"commit or update --clean to discard changes") + raise error.Abort(msg, hint=hint) # Prompt and create actions. Most of this is in the resolve phase # already, but we can't handle .hgsubstate in filemerge or # subrepoutil.submerge yet so we have to keep prompting for it. - if b'.hgsubstate' in actionbyfile: + vals = mresult.getfile(b'.hgsubstate') + if vals: f = b'.hgsubstate' - m, args, msg = actionbyfile[f] + m, args, msg = vals prompts = filemerge.partextras(labels) prompts[b'f'] = f if m == mergestatemod.ACTION_CHANGED_DELETED: @@ -1779,22 +2000,19 @@ % prompts, 0, ): - actionbyfile[f] = ( - mergestatemod.ACTION_REMOVE, - None, - b'prompt delete', + mresult.addfile( + f, mergestatemod.ACTION_REMOVE, None, b'prompt delete', ) elif f in p1: - actionbyfile[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_ADD_MODIFIED, None, b'prompt keep', ) else: - actionbyfile[f] = ( - mergestatemod.ACTION_ADD, - None, - b'prompt keep', + mresult.addfile( + f, mergestatemod.ACTION_ADD, None, b'prompt keep', ) elif m == mergestatemod.ACTION_DELETED_CHANGED: f1, f2, fa, move, anc = args @@ -1811,24 +2029,14 @@ ) == 0 ): - actionbyfile[f] = ( + mresult.addfile( + f, mergestatemod.ACTION_GET, (flags, False), b'prompt recreating', ) else: - del actionbyfile[f] - - # Convert to dictionary-of-lists format - actions = emptyactions() - for f, (m, args, msg) in pycompat.iteritems(actionbyfile): - if m not in actions: - actions[m] = [] - actions[m].append((f, args, msg)) - - # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate - for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]: - actions[mergestatemod.ACTION_GET].append(e) + mresult.removefile(f) if not util.fscasesensitive(repo.path): # check collision between files only in p2 for clean update @@ -1837,10 +2045,10 @@ ): _checkcollision(repo, p2.manifest(), None) else: - _checkcollision(repo, wc.manifest(), actions) + _checkcollision(repo, wc.manifest(), mresult) # divergent renames - for f, fl in sorted(pycompat.iteritems(diverge)): + for f, fl in sorted(pycompat.iteritems(mresult.diverge)): repo.ui.warn( _( b"note: possible conflict - %s was renamed " @@ -1852,7 +2060,7 @@ repo.ui.warn(b" %s\n" % nf) # rename and delete - for f, fl in sorted(pycompat.iteritems(renamedelete)): + for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)): repo.ui.warn( _( b"note: possible conflict - %s was deleted " @@ -1876,19 +2084,19 @@ repo.vfs.write(b'updatestate', p2.hex()) _advertisefsmonitor( - repo, len(actions[mergestatemod.ACTION_GET]), p1.node() + repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node() ) wantfiledata = updatedirstate and not branchmerge stats, getfiledata = applyupdates( - repo, actions, wc, p2, overwrite, wantfiledata, labels=labels + repo, mresult, wc, p2, overwrite, wantfiledata, labels=labels, ) if updatedirstate: with repo.dirstate.parentchange(): repo.setparents(fp1, fp2) mergestatemod.recordupdates( - repo, actions, branchmerge, getfiledata + repo, mresult.actionsdict, branchmerge, getfiledata ) # update completed, clear state util.unlink(repo.vfs.join(b'updatestate')) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/mergestate.py --- a/mercurial/mergestate.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/mergestate.py Fri Sep 18 10:48:43 2020 -0400 @@ -1,5 +1,6 @@ from __future__ import absolute_import +import collections import errno import shutil import struct @@ -11,7 +12,6 @@ nullhex, nullid, ) -from .pycompat import delattr from . import ( error, filemerge, @@ -80,6 +80,8 @@ MERGE_RECORD_DRIVER_RESOLVED = b'd' # represents that the file was automatically merged in favor # of other version. This info is used on commit. +# This is now deprecated and commit related information is now +# stored in RECORD_FILE_VALUES MERGE_RECORD_MERGED_OTHER = b'o' ##### @@ -117,13 +119,15 @@ ACTION_LOCAL_DIR_RENAME_GET = b'dg' ACTION_DIR_RENAME_MOVE_LOCAL = b'dm' ACTION_KEEP = b'k' +# the file was absent on local side before merge and we should +# keep it absent (absent means file not present, it can be a result +# of file deletion, rename etc.) +ACTION_KEEP_ABSENT = b'ka' ACTION_EXEC = b'e' ACTION_CREATED_MERGE = b'cm' -# GET the other/remote side and store this info in mergestate -ACTION_GET_OTHER_AND_STORE = b'gs' -class mergestate(object): +class _mergestate_base(object): '''track 3-way merge state of individual files The merge state is stored on disk when needed. Two files are used: one with @@ -168,112 +172,18 @@ 'pu' and 'pr' for path conflicts. ''' - statepathv1 = b'merge/state' - statepathv2 = b'merge/state2' - - @staticmethod - def clean(repo, node=None, other=None, labels=None): - """Initialize a brand new merge state, removing any existing state on - disk.""" - ms = mergestate(repo) - ms.reset(node, other, labels) - return ms - - @staticmethod - def read(repo): - """Initialize the merge state, reading it from disk.""" - ms = mergestate(repo) - ms._read() - return ms - def __init__(self, repo): """Initialize the merge state. Do not use this directly! Instead call read() or clean().""" self._repo = repo - self._dirty = False - self._labels = None - - def reset(self, node=None, other=None, labels=None): self._state = {} - self._stateextras = {} - self._local = None - self._other = None - self._labels = labels - for var in ('localctx', 'otherctx'): - if var in vars(self): - delattr(self, var) - if node: - self._local = node - self._other = other - self._readmergedriver = None - if self.mergedriver: - self._mdstate = MERGE_DRIVER_STATE_SUCCESS - else: - self._mdstate = MERGE_DRIVER_STATE_UNMARKED - shutil.rmtree(self._repo.vfs.join(b'merge'), True) - self._results = {} - self._dirty = False - - def _read(self): - """Analyse each record content to restore a serialized state from disk - - This function process "record" entry produced by the de-serialization - of on disk file. - """ - self._state = {} - self._stateextras = {} + self._stateextras = collections.defaultdict(dict) self._local = None self._other = None - for var in ('localctx', 'otherctx'): - if var in vars(self): - delattr(self, var) + self._labels = None self._readmergedriver = None - self._mdstate = MERGE_DRIVER_STATE_SUCCESS - unsupported = set() - records = self._readrecords() - for rtype, record in records: - if rtype == RECORD_LOCAL: - self._local = bin(record) - elif rtype == RECORD_OTHER: - self._other = bin(record) - elif rtype == RECORD_MERGE_DRIVER_STATE: - bits = record.split(b'\0', 1) - mdstate = bits[1] - if len(mdstate) != 1 or mdstate not in ( - MERGE_DRIVER_STATE_UNMARKED, - MERGE_DRIVER_STATE_MARKED, - MERGE_DRIVER_STATE_SUCCESS, - ): - # the merge driver should be idempotent, so just rerun it - mdstate = MERGE_DRIVER_STATE_UNMARKED - - self._readmergedriver = bits[0] - self._mdstate = mdstate - elif rtype in ( - RECORD_MERGED, - RECORD_CHANGEDELETE_CONFLICT, - RECORD_PATH_CONFLICT, - RECORD_MERGE_DRIVER_MERGE, - LEGACY_RECORD_RESOLVED_OTHER, - ): - bits = record.split(b'\0') - self._state[bits[0]] = bits[1:] - elif rtype == RECORD_FILE_VALUES: - filename, rawextras = record.split(b'\0', 1) - extraparts = rawextras.split(b'\0') - extras = {} - i = 0 - while i < len(extraparts): - extras[extraparts[i]] = extraparts[i + 1] - i += 2 - - self._stateextras[filename] = extras - elif rtype == RECORD_LABELS: - labels = record.split(b'\0', 2) - self._labels = [l for l in labels if len(l) > 0] - elif not rtype.islower(): - unsupported.add(rtype) + self._mdstate = MERGE_DRIVER_STATE_UNMARKED # contains a mapping of form: # {filename : (merge_return_value, action_to_be_performed} # these are results of re-running merge process @@ -282,118 +192,15 @@ self._results = {} self._dirty = False - if unsupported: - raise error.UnsupportedMergeRecords(unsupported) - - def _readrecords(self): - """Read merge state from disk and return a list of record (TYPE, data) - - We read data from both v1 and v2 files and decide which one to use. - - V1 has been used by version prior to 2.9.1 and contains less data than - v2. We read both versions and check if no data in v2 contradicts - v1. If there is not contradiction we can safely assume that both v1 - and v2 were written at the same time and use the extract data in v2. If - there is contradiction we ignore v2 content as we assume an old version - of Mercurial has overwritten the mergestate file and left an old v2 - file around. - - returns list of record [(TYPE, data), ...]""" - v1records = self._readrecordsv1() - v2records = self._readrecordsv2() - if self._v1v2match(v1records, v2records): - return v2records - else: - # v1 file is newer than v2 file, use it - # we have to infer the "other" changeset of the merge - # we cannot do better than that with v1 of the format - mctx = self._repo[None].parents()[-1] - v1records.append((RECORD_OTHER, mctx.hex())) - # add place holder "other" file node information - # nobody is using it yet so we do no need to fetch the data - # if mctx was wrong `mctx[bits[-2]]` may fails. - for idx, r in enumerate(v1records): - if r[0] == RECORD_MERGED: - bits = r[1].split(b'\0') - bits.insert(-2, b'') - v1records[idx] = (r[0], b'\0'.join(bits)) - return v1records - - def _v1v2match(self, v1records, v2records): - oldv2 = set() # old format version of v2 record - for rec in v2records: - if rec[0] == RECORD_LOCAL: - oldv2.add(rec) - elif rec[0] == RECORD_MERGED: - # drop the onode data (not contained in v1) - oldv2.add((RECORD_MERGED, _droponode(rec[1]))) - for rec in v1records: - if rec not in oldv2: - return False - else: - return True - - def _readrecordsv1(self): - """read on disk merge state for version 1 file - - returns list of record [(TYPE, data), ...] + def reset(self): + pass - Note: the "F" data from this file are one entry short - (no "other file node" entry) - """ - records = [] - try: - f = self._repo.vfs(self.statepathv1) - for i, l in enumerate(f): - if i == 0: - records.append((RECORD_LOCAL, l[:-1])) - else: - records.append((RECORD_MERGED, l[:-1])) - f.close() - except IOError as err: - if err.errno != errno.ENOENT: - raise - return records - - def _readrecordsv2(self): - """read on disk merge state for version 2 file - - This format is a list of arbitrary records of the form: - - [type][length][content] - - `type` is a single character, `length` is a 4 byte integer, and - `content` is an arbitrary byte sequence of length `length`. - - Mercurial versions prior to 3.7 have a bug where if there are - unsupported mandatory merge records, attempting to clear out the merge - state with hg update --clean or similar aborts. The 't' record type - works around that by writing out what those versions treat as an - advisory record, but later versions interpret as special: the first - character is the 'real' record type and everything onwards is the data. - - Returns list of records [(TYPE, data), ...].""" - records = [] - try: - f = self._repo.vfs(self.statepathv2) - data = f.read() - off = 0 - end = len(data) - while off < end: - rtype = data[off : off + 1] - off += 1 - length = _unpack(b'>I', data[off : (off + 4)])[0] - off += 4 - record = data[off : (off + length)] - off += length - if rtype == RECORD_OVERRIDE: - rtype, record = record[0:1], record[1:] - records.append((rtype, record)) - f.close() - except IOError as err: - if err.errno != errno.ENOENT: - raise - return records + def start(self, node, other, labels=None): + self._local = node + self._other = other + self._labels = labels + if self.mergedriver: + self._mdstate = MERGE_DRIVER_STATE_SUCCESS @util.propertycache def mergedriver(self): @@ -451,98 +258,6 @@ def commit(self): """Write current state on disk (if necessary)""" - if self._dirty: - records = self._makerecords() - self._writerecords(records) - self._dirty = False - - def _makerecords(self): - records = [] - records.append((RECORD_LOCAL, hex(self._local))) - records.append((RECORD_OTHER, hex(self._other))) - if self.mergedriver: - records.append( - ( - RECORD_MERGE_DRIVER_STATE, - b'\0'.join([self.mergedriver, self._mdstate]), - ) - ) - # Write out state items. In all cases, the value of the state map entry - # is written as the contents of the record. The record type depends on - # the type of state that is stored, and capital-letter records are used - # to prevent older versions of Mercurial that do not support the feature - # from loading them. - for filename, v in pycompat.iteritems(self._state): - if v[0] == MERGE_RECORD_DRIVER_RESOLVED: - # Driver-resolved merge. These are stored in 'D' records. - records.append( - (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v)) - ) - elif v[0] in ( - MERGE_RECORD_UNRESOLVED_PATH, - MERGE_RECORD_RESOLVED_PATH, - ): - # Path conflicts. These are stored in 'P' records. The current - # resolution state ('pu' or 'pr') is stored within the record. - records.append( - (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v)) - ) - elif v[0] == MERGE_RECORD_MERGED_OTHER: - records.append((RECORD_MERGED, b'\0'.join([filename] + v))) - elif v[1] == nullhex or v[6] == nullhex: - # Change/Delete or Delete/Change conflicts. These are stored in - # 'C' records. v[1] is the local file, and is nullhex when the - # file is deleted locally ('dc'). v[6] is the remote file, and - # is nullhex when the file is deleted remotely ('cd'). - records.append( - (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v)) - ) - else: - # Normal files. These are stored in 'F' records. - records.append((RECORD_MERGED, b'\0'.join([filename] + v))) - for filename, extras in sorted(pycompat.iteritems(self._stateextras)): - rawextras = b'\0'.join( - b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras) - ) - records.append( - (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras)) - ) - if self._labels is not None: - labels = b'\0'.join(self._labels) - records.append((RECORD_LABELS, labels)) - return records - - def _writerecords(self, records): - """Write current state on disk (both v1 and v2)""" - self._writerecordsv1(records) - self._writerecordsv2(records) - - def _writerecordsv1(self, records): - """Write current state on disk in a version 1 file""" - f = self._repo.vfs(self.statepathv1, b'wb') - irecords = iter(records) - lrecords = next(irecords) - assert lrecords[0] == RECORD_LOCAL - f.write(hex(self._local) + b'\n') - for rtype, data in irecords: - if rtype == RECORD_MERGED: - f.write(b'%s\n' % _droponode(data)) - f.close() - - def _writerecordsv2(self, records): - """Write current state on disk in a version 2 file - - See the docstring for _readrecordsv2 for why we use 't'.""" - # these are the records that all version 2 clients can read - allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED) - f = self._repo.vfs(self.statepathv2, b'wb') - for key, data in records: - assert len(key) == 1 - if key not in allowlist: - key, data = RECORD_OVERRIDE, b'%s%s' % (key, data) - format = b'>sI%is' % len(data) - f.write(_pack(format, key, len(data), data)) - f.close() @staticmethod def getlocalkey(path): @@ -551,6 +266,12 @@ return hex(hashutil.sha1(path).digest()) + def _make_backup(self, fctx, localkey): + raise NotImplementedError() + + def _restore_backup(self, fctx, localkey, flags): + raise NotImplementedError() + def add(self, fcl, fco, fca, fd): """add a new (potentially?) conflicting file the merge state fcl: file context for local, @@ -564,7 +285,7 @@ localkey = nullhex else: localkey = mergestate.getlocalkey(fcl.path()) - self._repo.vfs.write(b'merge/' + localkey, fcl.data()) + self._make_backup(fcl, localkey) self._state[fd] = [ MERGE_RECORD_UNRESOLVED, localkey, @@ -587,8 +308,10 @@ self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin] self._dirty = True - def addmergedother(self, path): - self._state[path] = [MERGE_RECORD_MERGED_OTHER, nullhex, nullhex] + def addcommitinfo(self, path, data): + """ stores information which is required at commit + into _stateextras """ + self._stateextras[path].update(data) self._dirty = True def __contains__(self, dfile): @@ -628,7 +351,7 @@ yield f def extras(self, filename): - return self._stateextras.setdefault(filename, {}) + return self._stateextras[filename] def _resolve(self, preresolve, dfile, wctx): """rerun merge process for file path `dfile`. @@ -637,8 +360,6 @@ """ if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED): return True, 0 - if self._state[dfile][0] == MERGE_RECORD_MERGED_OTHER: - return True, 0 stateentry = self._state[dfile] state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry octx = self._repo[self._other] @@ -670,9 +391,7 @@ if preresolve: # restore local if localkey != nullhex: - f = self._repo.vfs(b'merge/' + localkey) - wctx[dfile].write(f.read(), flags) - f.close() + self._restore_backup(wctx[dfile], localkey, flags) else: wctx[dfile].remove(ignoremissing=True) complete, merge_ret, deleted = filemerge.premerge( @@ -790,6 +509,323 @@ self._results[f] = 0, ACTION_GET +class mergestate(_mergestate_base): + + statepathv1 = b'merge/state' + statepathv2 = b'merge/state2' + + @staticmethod + def clean(repo): + """Initialize a brand new merge state, removing any existing state on + disk.""" + ms = mergestate(repo) + ms.reset() + return ms + + @staticmethod + def read(repo): + """Initialize the merge state, reading it from disk.""" + ms = mergestate(repo) + ms._read() + return ms + + def _read(self): + """Analyse each record content to restore a serialized state from disk + + This function process "record" entry produced by the de-serialization + of on disk file. + """ + self._mdstate = MERGE_DRIVER_STATE_SUCCESS + unsupported = set() + records = self._readrecords() + for rtype, record in records: + if rtype == RECORD_LOCAL: + self._local = bin(record) + elif rtype == RECORD_OTHER: + self._other = bin(record) + elif rtype == RECORD_MERGE_DRIVER_STATE: + bits = record.split(b'\0', 1) + mdstate = bits[1] + if len(mdstate) != 1 or mdstate not in ( + MERGE_DRIVER_STATE_UNMARKED, + MERGE_DRIVER_STATE_MARKED, + MERGE_DRIVER_STATE_SUCCESS, + ): + # the merge driver should be idempotent, so just rerun it + mdstate = MERGE_DRIVER_STATE_UNMARKED + + self._readmergedriver = bits[0] + self._mdstate = mdstate + elif rtype in ( + RECORD_MERGED, + RECORD_CHANGEDELETE_CONFLICT, + RECORD_PATH_CONFLICT, + RECORD_MERGE_DRIVER_MERGE, + LEGACY_RECORD_RESOLVED_OTHER, + ): + bits = record.split(b'\0') + # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated + # and we now store related information in _stateextras, so + # lets write to _stateextras directly + if bits[1] == MERGE_RECORD_MERGED_OTHER: + self._stateextras[bits[0]][b'filenode-source'] = b'other' + else: + self._state[bits[0]] = bits[1:] + elif rtype == RECORD_FILE_VALUES: + filename, rawextras = record.split(b'\0', 1) + extraparts = rawextras.split(b'\0') + extras = {} + i = 0 + while i < len(extraparts): + extras[extraparts[i]] = extraparts[i + 1] + i += 2 + + self._stateextras[filename] = extras + elif rtype == RECORD_LABELS: + labels = record.split(b'\0', 2) + self._labels = [l for l in labels if len(l) > 0] + elif not rtype.islower(): + unsupported.add(rtype) + + if unsupported: + raise error.UnsupportedMergeRecords(unsupported) + + def _readrecords(self): + """Read merge state from disk and return a list of record (TYPE, data) + + We read data from both v1 and v2 files and decide which one to use. + + V1 has been used by version prior to 2.9.1 and contains less data than + v2. We read both versions and check if no data in v2 contradicts + v1. If there is not contradiction we can safely assume that both v1 + and v2 were written at the same time and use the extract data in v2. If + there is contradiction we ignore v2 content as we assume an old version + of Mercurial has overwritten the mergestate file and left an old v2 + file around. + + returns list of record [(TYPE, data), ...]""" + v1records = self._readrecordsv1() + v2records = self._readrecordsv2() + if self._v1v2match(v1records, v2records): + return v2records + else: + # v1 file is newer than v2 file, use it + # we have to infer the "other" changeset of the merge + # we cannot do better than that with v1 of the format + mctx = self._repo[None].parents()[-1] + v1records.append((RECORD_OTHER, mctx.hex())) + # add place holder "other" file node information + # nobody is using it yet so we do no need to fetch the data + # if mctx was wrong `mctx[bits[-2]]` may fails. + for idx, r in enumerate(v1records): + if r[0] == RECORD_MERGED: + bits = r[1].split(b'\0') + bits.insert(-2, b'') + v1records[idx] = (r[0], b'\0'.join(bits)) + return v1records + + def _v1v2match(self, v1records, v2records): + oldv2 = set() # old format version of v2 record + for rec in v2records: + if rec[0] == RECORD_LOCAL: + oldv2.add(rec) + elif rec[0] == RECORD_MERGED: + # drop the onode data (not contained in v1) + oldv2.add((RECORD_MERGED, _droponode(rec[1]))) + for rec in v1records: + if rec not in oldv2: + return False + else: + return True + + def _readrecordsv1(self): + """read on disk merge state for version 1 file + + returns list of record [(TYPE, data), ...] + + Note: the "F" data from this file are one entry short + (no "other file node" entry) + """ + records = [] + try: + f = self._repo.vfs(self.statepathv1) + for i, l in enumerate(f): + if i == 0: + records.append((RECORD_LOCAL, l[:-1])) + else: + records.append((RECORD_MERGED, l[:-1])) + f.close() + except IOError as err: + if err.errno != errno.ENOENT: + raise + return records + + def _readrecordsv2(self): + """read on disk merge state for version 2 file + + This format is a list of arbitrary records of the form: + + [type][length][content] + + `type` is a single character, `length` is a 4 byte integer, and + `content` is an arbitrary byte sequence of length `length`. + + Mercurial versions prior to 3.7 have a bug where if there are + unsupported mandatory merge records, attempting to clear out the merge + state with hg update --clean or similar aborts. The 't' record type + works around that by writing out what those versions treat as an + advisory record, but later versions interpret as special: the first + character is the 'real' record type and everything onwards is the data. + + Returns list of records [(TYPE, data), ...].""" + records = [] + try: + f = self._repo.vfs(self.statepathv2) + data = f.read() + off = 0 + end = len(data) + while off < end: + rtype = data[off : off + 1] + off += 1 + length = _unpack(b'>I', data[off : (off + 4)])[0] + off += 4 + record = data[off : (off + length)] + off += length + if rtype == RECORD_OVERRIDE: + rtype, record = record[0:1], record[1:] + records.append((rtype, record)) + f.close() + except IOError as err: + if err.errno != errno.ENOENT: + raise + return records + + def commit(self): + if self._dirty: + records = self._makerecords() + self._writerecords(records) + self._dirty = False + + def _makerecords(self): + records = [] + records.append((RECORD_LOCAL, hex(self._local))) + records.append((RECORD_OTHER, hex(self._other))) + if self.mergedriver: + records.append( + ( + RECORD_MERGE_DRIVER_STATE, + b'\0'.join([self.mergedriver, self._mdstate]), + ) + ) + # Write out state items. In all cases, the value of the state map entry + # is written as the contents of the record. The record type depends on + # the type of state that is stored, and capital-letter records are used + # to prevent older versions of Mercurial that do not support the feature + # from loading them. + for filename, v in pycompat.iteritems(self._state): + if v[0] == MERGE_RECORD_DRIVER_RESOLVED: + # Driver-resolved merge. These are stored in 'D' records. + records.append( + (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v)) + ) + elif v[0] in ( + MERGE_RECORD_UNRESOLVED_PATH, + MERGE_RECORD_RESOLVED_PATH, + ): + # Path conflicts. These are stored in 'P' records. The current + # resolution state ('pu' or 'pr') is stored within the record. + records.append( + (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v)) + ) + elif v[1] == nullhex or v[6] == nullhex: + # Change/Delete or Delete/Change conflicts. These are stored in + # 'C' records. v[1] is the local file, and is nullhex when the + # file is deleted locally ('dc'). v[6] is the remote file, and + # is nullhex when the file is deleted remotely ('cd'). + records.append( + (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v)) + ) + else: + # Normal files. These are stored in 'F' records. + records.append((RECORD_MERGED, b'\0'.join([filename] + v))) + for filename, extras in sorted(pycompat.iteritems(self._stateextras)): + rawextras = b'\0'.join( + b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras) + ) + records.append( + (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras)) + ) + if self._labels is not None: + labels = b'\0'.join(self._labels) + records.append((RECORD_LABELS, labels)) + return records + + def _writerecords(self, records): + """Write current state on disk (both v1 and v2)""" + self._writerecordsv1(records) + self._writerecordsv2(records) + + def _writerecordsv1(self, records): + """Write current state on disk in a version 1 file""" + f = self._repo.vfs(self.statepathv1, b'wb') + irecords = iter(records) + lrecords = next(irecords) + assert lrecords[0] == RECORD_LOCAL + f.write(hex(self._local) + b'\n') + for rtype, data in irecords: + if rtype == RECORD_MERGED: + f.write(b'%s\n' % _droponode(data)) + f.close() + + def _writerecordsv2(self, records): + """Write current state on disk in a version 2 file + + See the docstring for _readrecordsv2 for why we use 't'.""" + # these are the records that all version 2 clients can read + allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED) + f = self._repo.vfs(self.statepathv2, b'wb') + for key, data in records: + assert len(key) == 1 + if key not in allowlist: + key, data = RECORD_OVERRIDE, b'%s%s' % (key, data) + format = b'>sI%is' % len(data) + f.write(_pack(format, key, len(data), data)) + f.close() + + def _make_backup(self, fctx, localkey): + self._repo.vfs.write(b'merge/' + localkey, fctx.data()) + + def _restore_backup(self, fctx, localkey, flags): + with self._repo.vfs(b'merge/' + localkey) as f: + fctx.write(f.read(), flags) + + def reset(self): + shutil.rmtree(self._repo.vfs.join(b'merge'), True) + + +class memmergestate(_mergestate_base): + def __init__(self, repo): + super(memmergestate, self).__init__(repo) + self._backups = {} + + def _make_backup(self, fctx, localkey): + self._backups[localkey] = fctx.data() + + def _restore_backup(self, fctx, localkey, flags): + fctx.write(self._backups[localkey], flags) + + @util.propertycache + def mergedriver(self): + configmergedriver = self._repo.ui.config( + b'experimental', b'mergedriver' + ) + if configmergedriver: + raise error.InMemoryMergeConflictsError( + b"in-memory merge does not support mergedriver" + ) + return None + + def recordupdates(repo, actions, branchmerge, getfiledata): """record merge actions to the dirstate""" # remove (must come first) @@ -832,6 +868,10 @@ for f, args, msg in actions.get(ACTION_KEEP, []): pass + # keep deleted + for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []): + pass + # get for f, args, msg in actions.get(ACTION_GET, []): if branchmerge: diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/metadata.py --- a/mercurial/metadata.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/metadata.py Fri Sep 18 10:48:43 2020 -0400 @@ -22,6 +22,79 @@ ) +class ChangingFiles(object): + """A class recording the changes made to a file by a revision + """ + + def __init__( + self, touched=(), added=(), removed=(), p1_copies=(), p2_copies=(), + ): + self._added = set(added) + self._removed = set(removed) + self._touched = set(touched) + self._touched.update(self._added) + self._touched.update(self._removed) + self._p1_copies = dict(p1_copies) + self._p2_copies = dict(p2_copies) + + @property + def added(self): + return frozenset(self._added) + + def mark_added(self, filename): + self._added.add(filename) + self._touched.add(filename) + + def update_added(self, filenames): + for f in filenames: + self.mark_added(f) + + @property + def removed(self): + return frozenset(self._removed) + + def mark_removed(self, filename): + self._removed.add(filename) + self._touched.add(filename) + + def update_removed(self, filenames): + for f in filenames: + self.mark_removed(f) + + @property + def touched(self): + return frozenset(self._touched) + + def mark_touched(self, filename): + self._touched.add(filename) + + def update_touched(self, filenames): + for f in filenames: + self.mark_touched(f) + + @property + def copied_from_p1(self): + return self._p1_copies.copy() + + def mark_copied_from_p1(self, source, dest): + self._p1_copies[dest] = source + + def update_copies_from_p1(self, copies): + for dest, source in copies.items(): + self.mark_copied_from_p1(source, dest) + + @property + def copied_from_p2(self): + return self._p2_copies.copy() + + def mark_copied_from_p2(self, source, dest): + self._p2_copies[dest] = source + + def update_copies_from_p2(self, copies): + for dest, source in copies.items(): + self.mark_copied_from_p2(source, dest) + + def computechangesetfilesadded(ctx): """return the list of files added in a changeset """ @@ -181,6 +254,30 @@ return None +def encode_copies_sidedata(files): + sortedfiles = sorted(files.touched) + sidedata = {} + p1copies = files.copied_from_p1 + if p1copies: + p1copies = encodecopies(sortedfiles, p1copies) + sidedata[sidedatamod.SD_P1COPIES] = p1copies + p2copies = files.copied_from_p2 + if p2copies: + p2copies = encodecopies(sortedfiles, p2copies) + sidedata[sidedatamod.SD_P2COPIES] = p2copies + filesadded = files.added + if filesadded: + filesadded = encodefileindices(sortedfiles, filesadded) + sidedata[sidedatamod.SD_FILESADDED] = filesadded + filesremoved = files.removed + if filesremoved: + filesremoved = encodefileindices(sortedfiles, filesremoved) + sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved + if not sidedata: + sidedata = None + return sidedata + + def _getsidedata(srcrepo, rev): ctx = srcrepo[rev] filescopies = computechangesetcopies(ctx) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/narrowspec.py --- a/mercurial/narrowspec.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/narrowspec.py Fri Sep 18 10:48:43 2020 -0400 @@ -9,12 +9,12 @@ from .i18n import _ from .pycompat import getattr -from .interfaces import repository from . import ( error, match as matchmod, merge, mergestate as mergestatemod, + requirements, scmutil, sparse, util, @@ -186,7 +186,7 @@ def savebackup(repo, backupname): - if repository.NARROW_REQUIREMENT not in repo.requirements: + if requirements.NARROW_REQUIREMENT not in repo.requirements: return svfs = repo.svfs svfs.tryunlink(backupname) @@ -194,13 +194,13 @@ def restorebackup(repo, backupname): - if repository.NARROW_REQUIREMENT not in repo.requirements: + if requirements.NARROW_REQUIREMENT not in repo.requirements: return util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME)) def savewcbackup(repo, backupname): - if repository.NARROW_REQUIREMENT not in repo.requirements: + if requirements.NARROW_REQUIREMENT not in repo.requirements: return vfs = repo.vfs vfs.tryunlink(backupname) @@ -212,7 +212,7 @@ def restorewcbackup(repo, backupname): - if repository.NARROW_REQUIREMENT not in repo.requirements: + if requirements.NARROW_REQUIREMENT not in repo.requirements: return # It may not exist in old repos if repo.vfs.exists(backupname): @@ -220,7 +220,7 @@ def clearwcbackup(repo, backupname): - if repository.NARROW_REQUIREMENT not in repo.requirements: + if requirements.NARROW_REQUIREMENT not in repo.requirements: return repo.vfs.tryunlink(backupname) @@ -272,15 +272,19 @@ def _writeaddedfiles(repo, pctx, files): - actions = merge.emptyactions() - addgaction = actions[mergestatemod.ACTION_GET].append + mresult = merge.mergeresult() mf = repo[b'.'].manifest() for f in files: if not repo.wvfs.exists(f): - addgaction((f, (mf.flags(f), False), b"narrowspec updated")) + mresult.addfile( + f, + mergestatemod.ACTION_GET, + (mf.flags(f), False), + b"narrowspec updated", + ) merge.applyupdates( repo, - actions, + mresult, wctx=repo[None], mctx=repo[b'.'], overwrite=False, diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/obsolete.py --- a/mercurial/obsolete.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/obsolete.py Fri Sep 18 10:48:43 2020 -0400 @@ -328,7 +328,7 @@ # # - remaining bytes: the metadata, each (key, value) pair after the other. _fm1version = 1 -_fm1fixed = b'>IdhHBBB20s' +_fm1fixed = b'>IdhHBBB' _fm1nodesha1 = b'20s' _fm1nodesha256 = b'32s' _fm1nodesha1size = _calcsize(_fm1nodesha1) @@ -360,48 +360,36 @@ while off < stop: # read fixed part o1 = off + fsize - t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1]) + t, secs, tz, flags, numsuc, numpar, nummeta = ufixed(data[off:o1]) if flags & sha2flag: - # FIXME: prec was read as a SHA1, needs to be amended + nodefmt = sha2fmt + nodesize = sha2size + else: + nodefmt = sha1fmt + nodesize = sha1size - # read 0 or more successors - if numsuc == 1: - o2 = o1 + sha2size - sucs = (data[o1:o2],) - else: - o2 = o1 + sha2size * numsuc - sucs = unpack(sha2fmt * numsuc, data[o1:o2]) + (prec,) = unpack(nodefmt, data[o1 : o1 + nodesize]) + o1 += nodesize - # read parents - if numpar == noneflag: - o3 = o2 - parents = None - elif numpar == 1: - o3 = o2 + sha2size - parents = (data[o2:o3],) - else: - o3 = o2 + sha2size * numpar - parents = unpack(sha2fmt * numpar, data[o2:o3]) + # read 0 or more successors + if numsuc == 1: + o2 = o1 + nodesize + sucs = (data[o1:o2],) else: - # read 0 or more successors - if numsuc == 1: - o2 = o1 + sha1size - sucs = (data[o1:o2],) - else: - o2 = o1 + sha1size * numsuc - sucs = unpack(sha1fmt * numsuc, data[o1:o2]) + o2 = o1 + nodesize * numsuc + sucs = unpack(nodefmt * numsuc, data[o1:o2]) - # read parents - if numpar == noneflag: - o3 = o2 - parents = None - elif numpar == 1: - o3 = o2 + sha1size - parents = (data[o2:o3],) - else: - o3 = o2 + sha1size * numpar - parents = unpack(sha1fmt * numpar, data[o2:o3]) + # read parents + if numpar == noneflag: + o3 = o2 + parents = None + elif numpar == 1: + o3 = o2 + nodesize + parents = (data[o2:o3],) + else: + o3 = o2 + nodesize * numpar + parents = unpack(nodefmt * numpar, data[o2:o3]) # read metadata off = o3 + metasize * nummeta @@ -423,7 +411,7 @@ if flags & usingsha256: _fm1node = _fm1nodesha256 numsuc = len(sucs) - numextranodes = numsuc + numextranodes = 1 + numsuc if parents is None: numpar = _fm1parentnone else: @@ -624,6 +612,7 @@ return True if a new marker have been added, False if the markers already existed (no op). """ + flag = int(flag) if metadata is None: metadata = {} if date is None: @@ -636,11 +625,18 @@ date = dateutil.makedate() else: date = dateutil.makedate() - if len(prec) != 20: - raise ValueError(prec) - for succ in succs: - if len(succ) != 20: - raise ValueError(succ) + if flag & usingsha256: + if len(prec) != 32: + raise ValueError(prec) + for succ in succs: + if len(succ) != 32: + raise ValueError(succ) + else: + if len(prec) != 20: + raise ValueError(prec) + for succ in succs: + if len(succ) != 20: + raise ValueError(succ) if prec in succs: raise ValueError( 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec)) @@ -659,7 +655,7 @@ % (pycompat.bytestr(k), pycompat.bytestr(v)) ) - marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents) + marker = (bytes(prec), tuple(succs), flag, metadata, date, parents) return bool(self.add(transaction, [marker])) def add(self, transaction, markers): diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/phases.py --- a/mercurial/phases.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/phases.py Fri Sep 18 10:48:43 2020 -0400 @@ -121,6 +121,7 @@ from . import ( error, pycompat, + requirements, smartset, txnutil, util, @@ -154,7 +155,7 @@ def supportinternal(repo): """True if the internal phase can be used on a repository""" - return b'internal-phase' in repo.requirements + return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements def _readroots(repo, phasedefaults=None): diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/registrar.py --- a/mercurial/registrar.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/registrar.py Fri Sep 18 10:48:43 2020 -0400 @@ -121,7 +121,7 @@ return self._docformat % (decl, doc) def _extrasetup(self, name, func): - """Execute exra setup for registered function, if needed + """Execute extra setup for registered function, if needed """ diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/repair.py --- a/mercurial/repair.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/repair.py Fri Sep 18 10:48:43 2020 -0400 @@ -26,6 +26,7 @@ pathutil, phases, pycompat, + requirements, util, ) from .utils import ( @@ -418,7 +419,7 @@ def manifestrevlogs(repo): yield repo.manifestlog.getstorage(b'') - if b'treemanifest' in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: # This logic is safe if treemanifest isn't enabled, but also # pointless, so we skip it if treemanifest isn't enabled. for unencoded, encoded, size in repo.store.datafiles(): @@ -476,7 +477,7 @@ progress.complete() - if b'treemanifest' in repo.requirements: + if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: # This logic is safe if treemanifest isn't enabled, but also # pointless, so we skip it if treemanifest isn't enabled. for dir in pathutil.dirs(seenfiles): diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/requirements.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/requirements.py Fri Sep 18 10:48:43 2020 -0400 @@ -0,0 +1,75 @@ +# requirements.py - objects and functions related to repository requirements +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +# When narrowing is finalized and no longer subject to format changes, +# we should move this to just "narrow" or similar. +NARROW_REQUIREMENT = b'narrowhg-experimental' + +# Enables sparse working directory usage +SPARSE_REQUIREMENT = b'exp-sparse' + +# Enables the internal phase which is used to hide changesets instead +# of stripping them +INTERNAL_PHASE_REQUIREMENT = b'internal-phase' + +# Stores manifest in Tree structure +TREEMANIFEST_REQUIREMENT = b'treemanifest' + +# Increment the sub-version when the revlog v2 format changes to lock out old +# clients. +REVLOGV2_REQUIREMENT = b'exp-revlogv2.1' + +# A repository with the sparserevlog feature will have delta chains that +# can spread over a larger span. Sparse reading cuts these large spans into +# pieces, so that each piece isn't too big. +# Without the sparserevlog capability, reading from the repository could use +# huge amounts of memory, because the whole span would be read at once, +# including all the intermediate revisions that aren't pertinent for the chain. +# This is why once a repository has enabled sparse-read, it becomes required. +SPARSEREVLOG_REQUIREMENT = b'sparserevlog' + +# A repository with the sidedataflag requirement will allow to store extra +# information for revision without altering their original hashes. +SIDEDATA_REQUIREMENT = b'exp-sidedata-flag' + +# A repository with the the copies-sidedata-changeset requirement will store +# copies related information in changeset's sidedata. +COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset' + +# The repository use persistent nodemap for the changelog and the manifest. +NODEMAP_REQUIREMENT = b'persistent-nodemap' + +# Denotes that the current repository is a share +SHARED_REQUIREMENT = b'shared' + +# Denotes that current repository is a share and the shared source path is +# relative to the current repository root path +RELATIVE_SHARED_REQUIREMENT = b'relshared' + +# A repository with share implemented safely. The repository has different +# store and working copy requirements i.e. both `.hg/requires` and +# `.hg/store/requires` are present. +SHARESAFE_REQUIREMENT = b'exp-sharesafe' + +# List of requirements which are working directory specific +# These requirements cannot be shared between repositories if they +# share the same store +# * sparse is a working directory specific functionality and hence working +# directory specific requirement +# * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which +# represents that the current working copy/repository shares store of another +# repo. Hence both of them should be stored in working copy +# * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of +# the requirements are stored in store's requires +WORKING_DIR_REQUIREMENTS = { + SPARSE_REQUIREMENT, + SHARED_REQUIREMENT, + RELATIVE_SHARED_REQUIREMENT, + SHARESAFE_REQUIREMENT, +} diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/revset.py --- a/mercurial/revset.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/revset.py Fri Sep 18 10:48:43 2020 -0400 @@ -411,7 +411,7 @@ """ # i18n: "adds" is a keyword pat = getstring(x, _(b"adds requires a pattern")) - return checkstatus(repo, subset, pat, 1) + return checkstatus(repo, subset, pat, 'added') @predicate(b'ancestor(*changeset)', safe=True, weight=0.5) @@ -681,12 +681,8 @@ def checkstatus(repo, subset, pat, field): """Helper for status-related revsets (adds, removes, modifies). - The field parameter says which kind is desired: - 0: modified - 1: added - 2: removed + The field parameter says which kind is desired. """ - label = {0: 'modified', 1: 'added', 2: 'removed'}[field] hasset = matchmod.patkind(pat) == b'set' mcache = [None] @@ -707,7 +703,7 @@ else: if not any(m(f) for f in c.files()): return False - files = getattr(repo.status(c.p1().node(), c.node()), label) + files = getattr(repo.status(c.p1().node(), c.node()), field) if fname is not None: if fname in files: return True @@ -715,7 +711,9 @@ if any(m(f) for f in files): return True - return subset.filter(matches, condrepr=(b'', field, pat)) + return subset.filter( + matches, condrepr=(b'', pycompat.sysbytes(field), pat) + ) def _children(repo, subset, parentset): @@ -1631,7 +1629,7 @@ """ # i18n: "modifies" is a keyword pat = getstring(x, _(b"modifies requires a pattern")) - return checkstatus(repo, subset, pat, 0) + return checkstatus(repo, subset, pat, 'modified') @predicate(b'named(namespace)') @@ -2090,7 +2088,7 @@ """ # i18n: "removes" is a keyword pat = getstring(x, _(b"removes requires a pattern")) - return checkstatus(repo, subset, pat, 2) + return checkstatus(repo, subset, pat, 'removed') @predicate(b'rev(number)', safe=True) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/rewriteutil.py --- a/mercurial/rewriteutil.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/rewriteutil.py Fri Sep 18 10:48:43 2020 -0400 @@ -7,16 +7,23 @@ from __future__ import absolute_import +import re + from .i18n import _ from . import ( error, node, obsolete, + obsutil, revset, + scmutil, ) +NODE_RE = re.compile(br'\b[0-9a-f]{6,64}\b') + + def precheck(repo, revs, action=b'rewrite'): """check if revs can be rewritten action is used to control the error message. @@ -70,3 +77,49 @@ ) % (command, empty_successor) ) + + +def update_hash_refs(repo, commitmsg, pending=None): + """Replace all obsolete commit hashes in the message with the current hash. + + If the obsolete commit was split or is divergent, the hash is not replaced + as there's no way to know which successor to choose. + + For commands that update a series of commits in the current transaction, the + new obsolete markers can be considered by setting ``pending`` to a mapping + of ``pending[oldnode] = [successor_node1, successor_node2,..]``. + """ + if not pending: + pending = {} + cache = {} + hashes = re.findall(NODE_RE, commitmsg) + unfi = repo.unfiltered() + for h in hashes: + fullnode = scmutil.resolvehexnodeidprefix(unfi, h) + if fullnode is None: + continue + ctx = unfi[fullnode] + if not ctx.obsolete(): + successors = pending.get(fullnode) + if successors is None: + continue + # obsutil.successorssets() returns a list of list of nodes + successors = [successors] + else: + successors = obsutil.successorssets(repo, ctx.node(), cache=cache) + + # We can't make any assumptions about how to update the hash if the + # cset in question was split or diverged. + if len(successors) == 1 and len(successors[0]) == 1: + newhash = node.hex(successors[0][0]) + commitmsg = commitmsg.replace(h, newhash[: len(h)]) + else: + repo.ui.note( + _( + b'The stale commit message reference to %s could ' + b'not be updated\n' + ) + % h + ) + + return commitmsg diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/scmutil.py --- a/mercurial/scmutil.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/scmutil.py Fri Sep 18 10:48:43 2020 -0400 @@ -38,6 +38,7 @@ phases, policy, pycompat, + requirements as requirementsmod, revsetlang, similar, smartset, @@ -1470,11 +1471,34 @@ repo._quick_access_changeid_invalidate() +def filterrequirements(requirements): + """ filters the requirements into two sets: + + wcreq: requirements which should be written in .hg/requires + storereq: which should be written in .hg/store/requires + + Returns (wcreq, storereq) + """ + if requirementsmod.SHARESAFE_REQUIREMENT in requirements: + wc, store = set(), set() + for r in requirements: + if r in requirementsmod.WORKING_DIR_REQUIREMENTS: + wc.add(r) + else: + store.add(r) + return wc, store + return requirements, None + + def writereporequirements(repo, requirements=None): """ writes requirements for the repo to .hg/requires """ if requirements: repo.requirements = requirements - writerequires(repo.vfs, repo.requirements) + wcreq, storereq = filterrequirements(repo.requirements) + if wcreq is not None: + writerequires(repo.vfs, wcreq) + if storereq is not None: + writerequires(repo.svfs, storereq) def writerequires(opener, requirements): diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/shelve.py --- a/mercurial/shelve.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/shelve.py Fri Sep 18 10:48:43 2020 -0400 @@ -772,7 +772,7 @@ with ui.configoverride({(b'ui', b'quiet'): True}): hg.update(repo, wctx.node()) ui.pushbuffer(True) - cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents()) + cmdutil.revert(ui, repo, shelvectx) ui.popbuffer() diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/sparse.py --- a/mercurial/sparse.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/sparse.py Fri Sep 18 10:48:43 2020 -0400 @@ -21,11 +21,13 @@ mergestate as mergestatemod, pathutil, pycompat, + requirements, scmutil, util, ) from .utils import hashutil + # Whether sparse features are enabled. This variable is intended to be # temporary to facilitate porting sparse to core. It should eventually be # a per-repo option, possibly a repo requirement. @@ -269,19 +271,17 @@ sparsematch = matcher(repo, includetemp=False) dirstate = repo.dirstate - actions = [] + mresult = mergemod.mergeresult() dropped = [] tempincludes = readtemporaryincludes(repo) for file in tempincludes: if file in dirstate and not sparsematch(file): message = _(b'dropping temporarily included sparse files') - actions.append((file, None, message)) + mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message) dropped.append(file) - typeactions = mergemod.emptyactions() - typeactions[b'r'] = actions mergemod.applyupdates( - repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False + repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False ) # Fix dirstate @@ -366,16 +366,16 @@ return result -def filterupdatesactions(repo, wctx, mctx, branchmerge, actions): +def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult): """Filter updates to only lay out files that match the sparse rules.""" if not enabled: - return actions + return oldrevs = [pctx.rev() for pctx in wctx.parents()] oldsparsematch = matcher(repo, oldrevs) if oldsparsematch.always(): - return actions + return files = set() prunedactions = {} @@ -390,23 +390,29 @@ sparsematch = matcher(repo, [mctx.rev()]) temporaryfiles = [] - for file, action in pycompat.iteritems(actions): + for file, action in mresult.filemap(): type, args, msg = action files.add(file) if sparsematch(file): prunedactions[file] = action - elif type == b'm': + elif type == mergestatemod.ACTION_MERGE: temporaryfiles.append(file) prunedactions[file] = action elif branchmerge: - if type != b'k': + if type not in mergemod.mergeresult.NO_OP_ACTIONS: temporaryfiles.append(file) prunedactions[file] = action - elif type == b'f': + elif type == mergestatemod.ACTION_FORGET: prunedactions[file] = action elif file in wctx: - prunedactions[file] = (b'r', args, msg) + prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg) + # in case or rename on one side, it is possible that f1 might not + # be present in sparse checkout we should include it + # TODO: should we do the same for f2? + # exists as a separate check because file can be in sparse and hence + # if we try to club this condition in above `elif type == ACTION_MERGE` + # it won't be triggered if branchmerge and type == mergestatemod.ACTION_MERGE: f1, f2, fa, move, anc = args if not sparsematch(f1): @@ -423,22 +429,25 @@ addtemporaryincludes(repo, temporaryfiles) # Add the new files to the working copy so they can be merged, etc - actions = [] + tmresult = mergemod.mergeresult() message = b'temporarily adding to sparse checkout' wctxmanifest = repo[None].manifest() for file in temporaryfiles: if file in wctxmanifest: fctx = repo[None][file] - actions.append((file, (fctx.flags(), False), message)) + tmresult.addfile( + file, + mergestatemod.ACTION_GET, + (fctx.flags(), False), + message, + ) - typeactions = mergemod.emptyactions() - typeactions[b'g'] = actions mergemod.applyupdates( - repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False + repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False ) dirstate = repo.dirstate - for file, flags, msg in actions: + for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]): dirstate.normal(file) profiles = activeconfig(repo)[2] @@ -453,11 +462,15 @@ new = sparsematch(file) if not old and new: flags = mf.flags(file) - prunedactions[file] = (b'g', (flags, False), b'') + prunedactions[file] = ( + mergestatemod.ACTION_GET, + (flags, False), + b'', + ) elif old and not new: - prunedactions[file] = (b'r', [], b'') + prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'') - return prunedactions + mresult.setactions(prunedactions) def refreshwdir(repo, origstatus, origsparsematch, force=False): @@ -487,7 +500,7 @@ _(b'could not update sparseness due to pending changes') ) - # Calculate actions + # Calculate merge result dirstate = repo.dirstate ctx = repo[b'.'] added = [] @@ -495,8 +508,7 @@ dropped = [] mf = ctx.manifest() files = set(mf) - - actions = {} + mresult = mergemod.mergeresult() for file in files: old = origsparsematch(file) @@ -506,17 +518,19 @@ if (new and not old) or (old and new and not file in dirstate): fl = mf.flags(file) if repo.wvfs.exists(file): - actions[file] = (b'e', (fl,), b'') + mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'') lookup.append(file) else: - actions[file] = (b'g', (fl, False), b'') + mresult.addfile( + file, mergestatemod.ACTION_GET, (fl, False), b'' + ) added.append(file) # Drop files that are newly excluded, or that still exist in # the dirstate. elif (old and not new) or (not old and not new and file in dirstate): dropped.append(file) if file not in pending: - actions[file] = (b'r', [], b'') + mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'') # Verify there are no pending changes in newly included files abort = False @@ -540,13 +554,8 @@ if old and not new: dropped.append(file) - # Apply changes to disk - typeactions = mergemod.emptyactions() - for f, (m, args, msg) in pycompat.iteritems(actions): - typeactions[m].append((f, args, msg)) - mergemod.applyupdates( - repo, typeactions, repo[None], repo[b'.'], False, wantfiledata=False + repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False ) # Fix dirstate @@ -599,11 +608,11 @@ # updated. But this requires massive rework to matcher() and its # consumers. - if b'exp-sparse' in oldrequires and removing: - repo.requirements.discard(b'exp-sparse') + if requirements.SPARSE_REQUIREMENT in oldrequires and removing: + repo.requirements.discard(requirements.SPARSE_REQUIREMENT) scmutil.writereporequirements(repo) - elif b'exp-sparse' not in oldrequires: - repo.requirements.add(b'exp-sparse') + elif requirements.SPARSE_REQUIREMENT not in oldrequires: + repo.requirements.add(requirements.SPARSE_REQUIREMENT) scmutil.writereporequirements(repo) try: diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/state.py --- a/mercurial/state.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/state.py Fri Sep 18 10:48:43 2020 -0400 @@ -164,10 +164,17 @@ operation """ if not self._cmdhint: - return _(b"use 'hg %s --continue' or 'hg %s --abort'") % ( - self._opname, - self._opname, - ) + if not self._stopflag: + return _(b"use 'hg %s --continue' or 'hg %s --abort'") % ( + self._opname, + self._opname, + ) + else: + return _( + b"use 'hg %s --continue', 'hg %s --abort', " + b"or 'hg %s --stop'" + ) % (self._opname, self._opname, self._opname,) + return self._cmdhint def msg(self): diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/statichttprepo.py --- a/mercurial/statichttprepo.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/statichttprepo.py Fri Sep 18 10:48:43 2020 -0400 @@ -238,7 +238,12 @@ ) def lock(self, wait=True): - raise error.Abort(_(b'cannot lock static-http repository')) + raise error.LockUnavailable( + 0, + _(b'lock not available'), + b'lock', + _(b'cannot lock static-http repository'), + ) def _writecaches(self): pass # statichttprepository are read only diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/store.py --- a/mercurial/store.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/store.py Fri Sep 18 10:48:43 2020 -0400 @@ -373,10 +373,19 @@ return mode -_data = ( - b'bookmarks narrowspec data meta 00manifest.d 00manifest.i' - b' 00changelog.d 00changelog.i phaseroots obsstore' -) +_data = [ + b'bookmarks', + b'narrowspec', + b'data', + b'meta', + b'00manifest.d', + b'00manifest.i', + b'00changelog.d', + b'00changelog.i', + b'phaseroots', + b'obsstore', + b'requires', +] def isrevlog(f, kind, st): @@ -447,7 +456,7 @@ yield x def copylist(self): - return [b'requires'] + _data.split() + return _data def write(self, tr): pass @@ -494,9 +503,7 @@ return self.path + b'/' + encodefilename(f) def copylist(self): - return [b'requires', b'00changelog.i'] + [ - b'store/' + f for f in _data.split() - ] + return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data] class fncache(object): @@ -686,12 +693,21 @@ def copylist(self): d = ( - b'bookmarks narrowspec data meta dh fncache phaseroots obsstore' - b' 00manifest.d 00manifest.i 00changelog.d 00changelog.i' + b'bookmarks', + b'narrowspec', + b'data', + b'meta', + b'dh', + b'fncache', + b'phaseroots', + b'obsstore', + b'00manifest.d', + b'00manifest.i', + b'00changelog.d', + b'00changelog.i', + b'requires', ) - return [b'requires', b'00changelog.i'] + [ - b'store/' + f for f in d.split() - ] + return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d] def write(self, tr): self.fncache.write(tr) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/subrepo.py --- a/mercurial/subrepo.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/subrepo.py Fri Sep 18 10:48:43 2020 -0400 @@ -986,12 +986,11 @@ def filerevert(self, *pats, **opts): ctx = self._repo[opts['rev']] - parents = self._repo.dirstate.parents() if opts.get('all'): pats = [b'set:modified()'] else: pats = [] - cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts) + cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts) def shortid(self, revid): return revid[:12] diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/subrepoutil.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/tags.py --- a/mercurial/tags.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/tags.py Fri Sep 18 10:48:43 2020 -0400 @@ -838,7 +838,7 @@ repo = self._repo try: - lock = repo.wlock(wait=False) + lock = repo.lock(wait=False) except error.LockError: repo.ui.log( b'tagscache', diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templatekw.py --- a/mercurial/templatekw.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/templatekw.py Fri Sep 18 10:48:43 2020 -0400 @@ -422,7 +422,7 @@ from . import mergestate as mergestatemod mergestate = mergestatemod.mergestate.read(repo) - if mergestate.active(): + if mergestate.unresolvedcount(): merge_nodes = (mergestate.local, mergestate.other) else: merge_nodes = () diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templater.py --- a/mercurial/templater.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/templater.py Fri Sep 18 10:48:43 2020 -0400 @@ -800,10 +800,10 @@ def stylelist(): - paths = templatepaths() - if not paths: + path = templatedir() + if not path: return _(b'no templates found, try `hg debuginstall` for more info') - dirlist = os.listdir(paths[0]) + dirlist = os.listdir(path) stylelist = [] for file in dirlist: split = file.split(b".") @@ -814,17 +814,46 @@ return b", ".join(sorted(stylelist)) -def _readmapfile(mapfile): +def _open_mapfile(mapfile): + if os.path.exists(mapfile): + return util.posixfile(mapfile, b'rb') + raise error.Abort( + _(b"style '%s' not found") % mapfile, + hint=_(b"available styles: %s") % stylelist(), + ) + + +def _readmapfile(fp, mapfile): """Load template elements from the given map file""" - if not os.path.exists(mapfile): - raise error.Abort( - _(b"style '%s' not found") % mapfile, - hint=_(b"available styles: %s") % stylelist(), - ) + base = os.path.dirname(mapfile) + conf = config.config() - base = os.path.dirname(mapfile) - conf = config.config(includepaths=templatepaths()) - conf.read(mapfile, remap={b'': b'templates'}) + def include(rel, remap, sections): + subresource = None + if base: + abs = os.path.normpath(os.path.join(base, rel)) + if os.path.isfile(abs): + subresource = util.posixfile(abs, b'rb') + if not subresource: + if pycompat.ossep not in rel: + abs = rel + subresource = resourceutil.open_resource( + b'mercurial.templates', rel + ) + else: + dir = templatedir() + if dir: + abs = os.path.normpath(os.path.join(dir, rel)) + if os.path.isfile(abs): + subresource = util.posixfile(abs, b'rb') + if subresource: + data = subresource.read() + conf.parse( + abs, data, sections=sections, remap=remap, include=include, + ) + + data = fp.read() + conf.parse(mapfile, data, remap={b'': b'templates'}, include=include) cache = {} tmap = {} @@ -833,21 +862,22 @@ val = conf.get(b'templates', b'__base__') if val and val[0] not in b"'\"": # treat as a pointer to a base class for this style - path = util.normpath(os.path.join(base, val)) + path = os.path.normpath(os.path.join(base, val)) # fallback check in template paths if not os.path.exists(path): - for p in templatepaths(): - p2 = util.normpath(os.path.join(p, val)) + dir = templatedir() + if dir is not None: + p2 = os.path.normpath(os.path.join(dir, val)) if os.path.isfile(p2): path = p2 - break - p3 = util.normpath(os.path.join(p2, b"map")) - if os.path.isfile(p3): - path = p3 - break + else: + p3 = os.path.normpath(os.path.join(p2, b"map")) + if os.path.isfile(p3): + path = p3 - cache, tmap, aliases = _readmapfile(path) + fp = _open_mapfile(path) + cache, tmap, aliases = _readmapfile(fp, path) for key, val in conf[b'templates'].items(): if not val: @@ -883,7 +913,8 @@ """Get parsed tree for the given template name. Use a local cache.""" if t not in self.cache: try: - self.cache[t] = util.readfile(self._map[t]) + mapfile, fp = open_template(self._map[t]) + self.cache[t] = fp.read() except KeyError as inst: raise templateutil.TemplateNotFound( _(b'"%s" not in template map') % inst.args[0] @@ -975,6 +1006,7 @@ def frommapfile( cls, mapfile, + fp=None, filters=None, defaults=None, resources=None, @@ -984,7 +1016,9 @@ ): """Create templater from the specified map file""" t = cls(filters, defaults, resources, cache, [], minchunk, maxchunk) - cache, tmap, aliases = _readmapfile(mapfile) + if not fp: + fp = _open_mapfile(mapfile) + cache, tmap, aliases = _readmapfile(fp, mapfile) t._loader.cache.update(cache) t._loader._map = tmap t._loader._aliasmap = _aliasrules.buildmap(aliases) @@ -1045,59 +1079,42 @@ return stream -def templatepaths(): - '''return locations used for template files.''' - pathsrel = [b'templates'] - paths = [ - os.path.normpath(os.path.join(resourceutil.datapath, f)) - for f in pathsrel - ] - return [p for p in paths if os.path.isdir(p)] - - -def templatepath(name): - '''return location of template file. returns None if not found.''' - for p in templatepaths(): - f = os.path.join(p, name) - if os.path.exists(f): - return f - return None +def templatedir(): + '''return the directory used for template files, or None.''' + path = os.path.normpath(os.path.join(resourceutil.datapath, b'templates')) + return path if os.path.isdir(path) else None -def stylemap(styles, paths=None): - """Return path to mapfile for a given style. +def open_template(name, templatepath=None): + '''returns a file-like object for the given template, and its full path - Searches mapfile in the following locations: - 1. templatepath/style/map - 2. templatepath/map-style - 3. templatepath/map - """ - - if paths is None: - paths = templatepaths() - elif isinstance(paths, bytes): - paths = [paths] - - if isinstance(styles, bytes): - styles = [styles] + If the name is a relative path and we're in a frozen binary, the template + will be read from the mercurial.templates package instead. The returned path + will then be the relative path. + ''' + # Does the name point directly to a map file? + if os.path.isfile(name) or os.path.isabs(name): + return name, open(name, mode='rb') - for style in styles: - # only plain name is allowed to honor template paths - if ( - not style - or style in (pycompat.oscurdir, pycompat.ospardir) - or pycompat.ossep in style - or pycompat.osaltsep - and pycompat.osaltsep in style - ): - continue - locations = [os.path.join(style, b'map'), b'map-' + style] - locations.append(b'map') + # Does the name point to a template in the provided templatepath, or + # in mercurial/templates/ if no path was provided? + if templatepath is None: + templatepath = templatedir() + if templatepath is not None: + f = os.path.join(templatepath, name) + return f, open(f, mode='rb') - for path in paths: - for location in locations: - mapfile = os.path.join(path, location) - if os.path.isfile(mapfile): - return style, mapfile + # Otherwise try to read it using the resources API + name_parts = name.split(b'/') + package_name = b'.'.join([b'mercurial', b'templates'] + name_parts[:-1]) + return ( + name, + resourceutil.open_resource(package_name, name_parts[-1]), + ) - raise RuntimeError(b"No hgweb templates found in %r" % paths) + +def try_open_template(name, templatepath=None): + try: + return open_template(name, templatepath) + except (EnvironmentError, ImportError): + return None, None diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/atom/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/coal/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/gitweb/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/json/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/monoblue/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/paper/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/raw/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/rss/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/spartan/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/templates/static/__init__.py diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/upgrade.py --- a/mercurial/upgrade.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/upgrade.py Fri Sep 18 10:48:43 2020 -0400 @@ -20,6 +20,7 @@ manifest, metadata, pycompat, + requirements, revlog, scmutil, util, @@ -31,7 +32,7 @@ # list of requirements that request a clone of all revlog if added/removed RECLONES_REQUIREMENTS = { b'generaldelta', - localrepo.SPARSEREVLOG_REQUIREMENT, + requirements.SPARSEREVLOG_REQUIREMENT, } @@ -58,12 +59,12 @@ return { # The upgrade code does not yet support these experimental features. # This is an artificial limitation. - b'treemanifest', + requirements.TREEMANIFEST_REQUIREMENT, # This was a precursor to generaldelta and was never enabled by default. # It should (hopefully) not exist in the wild. b'parentdelta', # Upgrade should operate on the actual store, not the shared link. - b'shared', + requirements.SHARED_REQUIREMENT, } @@ -75,10 +76,10 @@ to be allowed. """ supported = { - localrepo.SPARSEREVLOG_REQUIREMENT, - localrepo.SIDEDATA_REQUIREMENT, - localrepo.COPIESSDC_REQUIREMENT, - localrepo.NODEMAP_REQUIREMENT, + requirements.SPARSEREVLOG_REQUIREMENT, + requirements.SIDEDATA_REQUIREMENT, + requirements.COPIESSDC_REQUIREMENT, + requirements.NODEMAP_REQUIREMENT, } for name in compression.compengines: engine = compression.compengines[name] @@ -103,10 +104,11 @@ b'generaldelta', b'revlogv1', b'store', - localrepo.SPARSEREVLOG_REQUIREMENT, - localrepo.SIDEDATA_REQUIREMENT, - localrepo.COPIESSDC_REQUIREMENT, - localrepo.NODEMAP_REQUIREMENT, + requirements.SPARSEREVLOG_REQUIREMENT, + requirements.SIDEDATA_REQUIREMENT, + requirements.COPIESSDC_REQUIREMENT, + requirements.NODEMAP_REQUIREMENT, + requirements.SHARESAFE_REQUIREMENT, } for name in compression.compengines: engine = compression.compengines[name] @@ -131,10 +133,10 @@ b'dotencode', b'fncache', b'generaldelta', - localrepo.SPARSEREVLOG_REQUIREMENT, - localrepo.SIDEDATA_REQUIREMENT, - localrepo.COPIESSDC_REQUIREMENT, - localrepo.NODEMAP_REQUIREMENT, + requirements.SPARSEREVLOG_REQUIREMENT, + requirements.SIDEDATA_REQUIREMENT, + requirements.COPIESSDC_REQUIREMENT, + requirements.NODEMAP_REQUIREMENT, } for name in compression.compengines: engine = compression.compengines[name] @@ -338,7 +340,7 @@ class sparserevlog(requirementformatvariant): name = b'sparserevlog' - _requirement = localrepo.SPARSEREVLOG_REQUIREMENT + _requirement = requirements.SPARSEREVLOG_REQUIREMENT default = True @@ -364,7 +366,7 @@ class sidedata(requirementformatvariant): name = b'sidedata' - _requirement = localrepo.SIDEDATA_REQUIREMENT + _requirement = requirements.SIDEDATA_REQUIREMENT default = False @@ -380,7 +382,7 @@ class persistentnodemap(requirementformatvariant): name = b'persistent-nodemap' - _requirement = localrepo.NODEMAP_REQUIREMENT + _requirement = requirements.NODEMAP_REQUIREMENT default = False @@ -395,7 +397,7 @@ class copiessdc(requirementformatvariant): name = b'copies-sdc' - _requirement = localrepo.COPIESSDC_REQUIREMENT + _requirement = requirements.COPIESSDC_REQUIREMENT default = False @@ -725,7 +727,7 @@ sidedatacompanion = None removedreqs = srcrepo.requirements - dstrepo.requirements addedreqs = dstrepo.requirements - srcrepo.requirements - if localrepo.SIDEDATA_REQUIREMENT in removedreqs: + if requirements.SIDEDATA_REQUIREMENT in removedreqs: def sidedatacompanion(rl, rev): rl = getattr(rl, '_revlog', rl) @@ -733,9 +735,9 @@ return True, (), {} return False, (), {} - elif localrepo.COPIESSDC_REQUIREMENT in addedreqs: + elif requirements.COPIESSDC_REQUIREMENT in addedreqs: sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) - elif localrepo.COPIESSDC_REQUIREMENT in removedreqs: + elif requirements.COPIESSDC_REQUIREMENT in removedreqs: sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) return sidedatacompanion diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/utils/resourceutil.py --- a/mercurial/utils/resourceutil.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/utils/resourceutil.py Fri Sep 18 10:48:43 2020 -0400 @@ -55,6 +55,8 @@ try: + # importlib.resources exists from Python 3.7; see fallback in except clause + # further down from importlib import resources from .. import encoding @@ -78,6 +80,8 @@ except (ImportError, AttributeError): + # importlib.resources was not found (almost definitely because we're on a + # Python version before 3.7) def open_resource(package, name): path = os.path.join(_package_path(package), name) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/utils/storageutil.py --- a/mercurial/utils/storageutil.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/utils/storageutil.py Fri Sep 18 10:48:43 2020 -0400 @@ -180,9 +180,9 @@ ``fileid`` can be: - * A 20 byte binary node. + * A 20 or 32 byte binary node. * An integer revision number - * A 40 byte hex node. + * A 40 or 64 byte hex node. * A bytes that can be parsed as an integer representing a revision number. ``identifier`` is used to populate ``error.LookupError`` with an identifier @@ -198,14 +198,14 @@ b'%d' % fileid, identifier, _(b'no match found') ) - if len(fileid) == 20: + if len(fileid) in (20, 32): try: store.rev(fileid) return fileid except error.LookupError: pass - if len(fileid) == 40: + if len(fileid) in (40, 64): try: rawnode = bin(fileid) store.rev(rawnode) diff -r bd5b2b29b82d -r e3df1f560d9a mercurial/worker.py --- a/mercurial/worker.py Sun Sep 13 15:59:23 2020 +0900 +++ b/mercurial/worker.py Fri Sep 18 10:48:43 2020 -0400 @@ -71,8 +71,12 @@ def __init__(self, wrapped): self._wrapped = wrapped - def __getattr__(self, attr): - return getattr(self._wrapped, attr) + # Do NOT implement readinto() by making it delegate to + # _wrapped.readinto(), since that is unbuffered. The unpickler is fine + # with just read() and readline(), so we don't need to implement it. + + def readline(self): + return self._wrapped.readline() # issue multiple reads until size is fulfilled def read(self, size=-1): @@ -91,7 +95,7 @@ del view del buf[pos:] - return buf + return bytes(buf) else: @@ -211,7 +215,7 @@ parentpid = os.getpid() pipes = [] retval = {} - for pargs in partition(args, workers): + for pargs in partition(args, min(workers, len(args))): # Every worker gets its own pipe to send results on, so we don't have to # implement atomic writes larger than PIPE_BUF. Each forked process has # its own pipe's descriptors in the local variables, and the parent diff -r bd5b2b29b82d -r e3df1f560d9a relnotes/next --- a/relnotes/next Sun Sep 13 15:59:23 2020 +0900 +++ b/relnotes/next Fri Sep 18 10:48:43 2020 -0400 @@ -1,5 +1,9 @@ == New Features == + * `hg mv -A` can now be used with `--at-rev`. It behaves just like + `hg cp -A --at-rev`, i.e. it marks the destination as a copy of the + source whether or not the source still exists (but the source must + exist in the parent revision). == New Experimental Features == diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/dirstate/dirstate_map.rs --- a/rust/hg-core/src/dirstate/dirstate_map.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-core/src/dirstate/dirstate_map.rs Fri Sep 18 10:48:43 2020 -0400 @@ -364,11 +364,17 @@ return Ok(None); } - let parents = parse_dirstate( - &mut self.state_map, - &mut self.copy_map, - file_contents, - )?; + let (parents, entries, copies) = parse_dirstate(file_contents)?; + self.state_map.extend( + entries + .into_iter() + .map(|(path, entry)| (path.to_owned(), entry)), + ); + self.copy_map.extend( + copies + .into_iter() + .map(|(path, copy)| (path.to_owned(), copy.to_owned())), + ); if !self.dirty_parents { self.set_parents(&parents); diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/dirstate/parsers.rs --- a/rust/hg-core/src/dirstate/parsers.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-core/src/dirstate/parsers.rs Fri Sep 18 10:48:43 2020 -0400 @@ -19,17 +19,21 @@ /// Dirstate entries have a static part of 8 + 32 + 32 + 32 + 32 bits. const MIN_ENTRY_SIZE: usize = 17; -// TODO parse/pack: is mutate-on-loop better for performance? +type ParseResult<'a> = ( + DirstateParents, + Vec<(&'a HgPath, DirstateEntry)>, + Vec<(&'a HgPath, &'a HgPath)>, +); #[timed] pub fn parse_dirstate( - state_map: &mut StateMap, - copy_map: &mut CopyMap, contents: &[u8], -) -> Result { +) -> Result { if contents.len() < PARENT_SIZE * 2 { return Err(DirstateParseError::TooLittleData); } + let mut copies = vec![]; + let mut entries = vec![]; let mut curr_pos = PARENT_SIZE * 2; let parents = DirstateParents { @@ -63,24 +67,21 @@ }; if let Some(copy_path) = copy { - copy_map.insert( - HgPath::new(path).to_owned(), - HgPath::new(copy_path).to_owned(), - ); + copies.push((HgPath::new(path), HgPath::new(copy_path))); }; - state_map.insert( - HgPath::new(path).to_owned(), + entries.push(( + HgPath::new(path), DirstateEntry { state, mode, size, mtime, }, - ); + )); curr_pos = curr_pos + MIN_ENTRY_SIZE + (path_len); } - Ok(parents) + Ok((parents, entries, copies)) } /// `now` is the duration in seconds since the Unix epoch @@ -285,14 +286,17 @@ pack_dirstate(&mut state_map, ©map, parents.clone(), now) .unwrap(); - let mut new_state_map: StateMap = FastHashMap::default(); - let mut new_copy_map: CopyMap = FastHashMap::default(); - let new_parents = parse_dirstate( - &mut new_state_map, - &mut new_copy_map, - result.as_slice(), - ) - .unwrap(); + let (new_parents, entries, copies) = + parse_dirstate(result.as_slice()).unwrap(); + let new_state_map: StateMap = entries + .into_iter() + .map(|(path, entry)| (path.to_owned(), entry)) + .collect(); + let new_copy_map: CopyMap = copies + .into_iter() + .map(|(path, copy)| (path.to_owned(), copy.to_owned())) + .collect(); + assert_eq!( (parents, state_map, copymap), (new_parents, new_state_map, new_copy_map) @@ -360,14 +364,17 @@ pack_dirstate(&mut state_map, ©map, parents.clone(), now) .unwrap(); - let mut new_state_map: StateMap = FastHashMap::default(); - let mut new_copy_map: CopyMap = FastHashMap::default(); - let new_parents = parse_dirstate( - &mut new_state_map, - &mut new_copy_map, - result.as_slice(), - ) - .unwrap(); + let (new_parents, entries, copies) = + parse_dirstate(result.as_slice()).unwrap(); + let new_state_map: StateMap = entries + .into_iter() + .map(|(path, entry)| (path.to_owned(), entry)) + .collect(); + let new_copy_map: CopyMap = copies + .into_iter() + .map(|(path, copy)| (path.to_owned(), copy.to_owned())) + .collect(); + assert_eq!( (parents, state_map, copymap), (new_parents, new_state_map, new_copy_map) @@ -403,14 +410,16 @@ pack_dirstate(&mut state_map, ©map, parents.clone(), now) .unwrap(); - let mut new_state_map: StateMap = FastHashMap::default(); - let mut new_copy_map: CopyMap = FastHashMap::default(); - let new_parents = parse_dirstate( - &mut new_state_map, - &mut new_copy_map, - result.as_slice(), - ) - .unwrap(); + let (new_parents, entries, copies) = + parse_dirstate(result.as_slice()).unwrap(); + let new_state_map: StateMap = entries + .into_iter() + .map(|(path, entry)| (path.to_owned(), entry)) + .collect(); + let new_copy_map: CopyMap = copies + .into_iter() + .map(|(path, copy)| (path.to_owned(), copy.to_owned())) + .collect(); assert_eq!( ( diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/dirstate/status.rs --- a/rust/hg-core/src/dirstate/status.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-core/src/dirstate/status.rs Fri Sep 18 10:48:43 2020 -0400 @@ -13,7 +13,6 @@ dirstate::SIZE_FROM_OTHER_PARENT, filepatterns::PatternFileWarning, matchers::{get_ignore_function, Matcher, VisitChildrenSet}, - operations::Operation, utils::{ files::{find_dirs, HgMetadata}, hg_path::{ diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/lib.rs --- a/rust/hg-core/src/lib.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-core/src/lib.rs Fri Sep 18 10:48:43 2020 -0400 @@ -57,6 +57,7 @@ pub enum DirstateParseError { TooLittleData, Overflow, + // TODO refactor to use bytes instead of String CorruptedEntry(String), Damaged, } diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/operations/dirstate_status.rs --- a/rust/hg-core/src/operations/dirstate_status.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-core/src/operations/dirstate_status.rs Fri Sep 18 10:48:43 2020 -0400 @@ -7,7 +7,6 @@ use crate::dirstate::status::{build_response, Dispatch, HgPathCow, Status}; use crate::matchers::Matcher; -use crate::operations::Operation; use crate::{DirstateStatus, StatusError}; /// A tuple of the paths that need to be checked in the filelog because it's @@ -15,10 +14,8 @@ /// files. pub type LookupAndStatus<'a> = (Vec>, DirstateStatus<'a>); -impl<'a, M: Matcher + Sync> Operation> for Status<'a, M> { - type Error = StatusError; - - fn run(&self) -> Result, Self::Error> { +impl<'a, M: Matcher + Sync> Status<'a, M> { + pub(crate) fn run(&self) -> Result, StatusError> { let (traversed_sender, traversed_receiver) = crossbeam::channel::unbounded(); diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/operations/find_root.rs --- a/rust/hg-core/src/operations/find_root.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-core/src/operations/find_root.rs Fri Sep 18 10:48:43 2020 -0400 @@ -1,4 +1,3 @@ -use super::Operation; use std::fmt; use std::path::{Path, PathBuf}; @@ -45,12 +44,8 @@ current_dir: Some(current_dir), } } -} -impl<'a> Operation for FindRoot<'a> { - type Error = FindRootError; - - fn run(&self) -> Result { + pub fn run(&self) -> Result { let current_dir = match self.current_dir { None => std::env::current_dir().or_else(|e| { Err(FindRootError { @@ -61,10 +56,10 @@ }; if current_dir.join(".hg").exists() { - return Ok(current_dir.into()); + return Ok(current_dir); } - let mut ancestors = current_dir.ancestors(); - while let Some(parent) = ancestors.next() { + let ancestors = current_dir.ancestors(); + for parent in ancestors { if parent.join(".hg").exists() { return Ok(parent.into()); } diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/operations/list_tracked_files.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/operations/list_tracked_files.rs Fri Sep 18 10:48:43 2020 -0400 @@ -0,0 +1,94 @@ +// list_tracked_files.rs +// +// Copyright 2020 Antoine Cezar +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +use super::find_root; +use crate::dirstate::parsers::parse_dirstate; +use crate::utils::hg_path::HgPath; +use crate::{DirstateParseError, EntryState}; +use rayon::prelude::*; +use std::convert::From; +use std::fmt; +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +/// Kind of error encoutered by ListTrackedFiles +#[derive(Debug)] +pub enum ListTrackedFilesErrorKind { + ParseError(DirstateParseError), +} + +/// A ListTrackedFiles error +#[derive(Debug)] +pub struct ListTrackedFilesError { + /// Kind of error encoutered by ListTrackedFiles + pub kind: ListTrackedFilesErrorKind, +} + +impl std::error::Error for ListTrackedFilesError {} + +impl fmt::Display for ListTrackedFilesError { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + unimplemented!() + } +} + +impl From for ListTrackedFilesError { + fn from(kind: ListTrackedFilesErrorKind) -> Self { + ListTrackedFilesError { kind } + } +} + +/// List files under Mercurial control in the working directory +pub struct ListTrackedFiles { + root: PathBuf, +} + +impl ListTrackedFiles { + pub fn new() -> Result { + let root = find_root::FindRoot::new().run()?; + Ok(ListTrackedFiles { root }) + } + + /// Load the tracked files data from disk + pub fn load(&self) -> Result { + let dirstate = &self.root.join(".hg/dirstate"); + let content = fs::read(&dirstate)?; + Ok(ListDirstateTrackedFiles { content }) + } + + /// Returns the repository root directory + /// TODO I think this is a crutch that creates a dependency that should not + /// be there. Operations that need the root of the repository should get + /// it themselves, probably in a lazy fashion. But this would make the + /// current series even larger, so this is simplified for now. + pub fn get_root(&self) -> &Path { + &self.root + } +} + +/// List files under Mercurial control in the working directory +/// by reading the dirstate +pub struct ListDirstateTrackedFiles { + content: Vec, +} + +impl ListDirstateTrackedFiles { + pub fn run(&self) -> Result, ListTrackedFilesError> { + let (_, entries, _) = parse_dirstate(&self.content) + .map_err(ListTrackedFilesErrorKind::ParseError)?; + let mut files: Vec<&HgPath> = entries + .into_iter() + .filter_map(|(path, entry)| match entry.state { + EntryState::Removed => None, + _ => Some(path), + }) + .collect(); + files.par_sort_unstable(); + Ok(files) + } +} diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/operations/mod.rs --- a/rust/hg-core/src/operations/mod.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-core/src/operations/mod.rs Fri Sep 18 10:48:43 2020 -0400 @@ -1,13 +1,17 @@ +//! A distinction is made between operations and commands. +//! An operation is what can be done whereas a command is what is exposed by +//! the cli. A single command can use several operations to achieve its goal. + mod dirstate_status; mod find_root; +mod list_tracked_files; pub use find_root::{FindRoot, FindRootError, FindRootErrorKind}; +pub use list_tracked_files::{ + ListTrackedFiles, ListTrackedFilesError, ListTrackedFilesErrorKind, +}; -/// An interface for high-level hg operations. -/// -/// A distinction is made between operation and commands. -/// An operation is what can be done whereas a command is what is exposed by -/// the cli. A single command can use several operations to achieve its goal. -pub trait Operation { - type Error; - fn run(&self) -> Result; -} +// TODO add an `Operation` trait when GAT have landed (rust #44265): +// there is no way to currently define a trait which can both return +// references to `self` and to passed data, which is what we would need. +// Generic Associated Types may fix this and allow us to have a unified +// interface. diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-core/src/utils/files.rs --- a/rust/hg-core/src/utils/files.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-core/src/utils/files.rs Fri Sep 18 10:48:43 2020 -0400 @@ -16,7 +16,7 @@ }; use lazy_static::lazy_static; use same_file::is_same_file; -use std::borrow::ToOwned; +use std::borrow::{Cow, ToOwned}; use std::fs::Metadata; use std::iter::FusedIterator; use std::ops::Deref; @@ -248,6 +248,66 @@ } } +/// Returns the representation of the path relative to the current working +/// directory for display purposes. +/// +/// `cwd` is a `HgPath`, so it is considered relative to the root directory +/// of the repository. +/// +/// # Examples +/// +/// ``` +/// use hg::utils::hg_path::HgPath; +/// use hg::utils::files::relativize_path; +/// use std::borrow::Cow; +/// +/// let file = HgPath::new(b"nested/file"); +/// let cwd = HgPath::new(b""); +/// assert_eq!(relativize_path(file, cwd), Cow::Borrowed(b"nested/file")); +/// +/// let cwd = HgPath::new(b"nested"); +/// assert_eq!(relativize_path(file, cwd), Cow::Borrowed(b"file")); +/// +/// let cwd = HgPath::new(b"other"); +/// assert_eq!(relativize_path(file, cwd), Cow::Borrowed(b"../nested/file")); +/// ``` +pub fn relativize_path(path: &HgPath, cwd: impl AsRef) -> Cow<[u8]> { + if cwd.as_ref().is_empty() { + Cow::Borrowed(path.as_bytes()) + } else { + let mut res: Vec = Vec::new(); + let mut path_iter = path.as_bytes().split(|b| *b == b'/').peekable(); + let mut cwd_iter = + cwd.as_ref().as_bytes().split(|b| *b == b'/').peekable(); + loop { + match (path_iter.peek(), cwd_iter.peek()) { + (Some(a), Some(b)) if a == b => (), + _ => break, + } + path_iter.next(); + cwd_iter.next(); + } + let mut need_sep = false; + for _ in cwd_iter { + if need_sep { + res.extend(b"/") + } else { + need_sep = true + }; + res.extend(b".."); + } + for c in path_iter { + if need_sep { + res.extend(b"/") + } else { + need_sep = true + }; + res.extend(c); + } + Cow::Owned(res) + } +} + #[cfg(test)] mod tests { use super::*; diff -r bd5b2b29b82d -r e3df1f560d9a rust/hg-cpython/src/parsers.rs --- a/rust/hg-cpython/src/parsers.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/hg-cpython/src/parsers.rs Fri Sep 18 10:48:43 2020 -0400 @@ -14,7 +14,7 @@ PythonObject, ToPyObject, }; use hg::{ - pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, + pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, DirstateEntry, DirstatePackError, DirstateParents, DirstateParseError, FastHashMap, PARENT_SIZE, }; @@ -29,11 +29,17 @@ copymap: PyDict, st: PyBytes, ) -> PyResult { - let mut dirstate_map = FastHashMap::default(); - let mut copies = FastHashMap::default(); + match parse_dirstate(st.data(py)) { + Ok((parents, entries, copies)) => { + let dirstate_map: FastHashMap = entries + .into_iter() + .map(|(path, entry)| (path.to_owned(), entry)) + .collect(); + let copy_map: FastHashMap = copies + .into_iter() + .map(|(path, copy)| (path.to_owned(), copy.to_owned())) + .collect(); - match parse_dirstate(&mut dirstate_map, &mut copies, st.data(py)) { - Ok(parents) => { for (filename, entry) in &dirstate_map { dmap.set_item( py, @@ -41,7 +47,7 @@ make_dirstate_tuple(py, entry)?, )?; } - for (path, copy_path) in copies { + for (path, copy_path) in copy_map { copymap.set_item( py, PyBytes::new(py, path.as_bytes()), diff -r bd5b2b29b82d -r e3df1f560d9a rust/rhg/src/commands.rs --- a/rust/rhg/src/commands.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/rhg/src/commands.rs Fri Sep 18 10:48:43 2020 -0400 @@ -1,9 +1,11 @@ +pub mod files; pub mod root; use crate::error::CommandError; +use crate::ui::Ui; /// The common trait for rhg commands /// /// Normalize the interface of the commands provided by rhg pub trait Command { - fn run(&self) -> Result<(), CommandError>; + fn run(&self, ui: &Ui) -> Result<(), CommandError>; } diff -r bd5b2b29b82d -r e3df1f560d9a rust/rhg/src/commands/files.rs --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/rhg/src/commands/files.rs Fri Sep 18 10:48:43 2020 -0400 @@ -0,0 +1,58 @@ +use crate::commands::Command; +use crate::error::{CommandError, CommandErrorKind}; +use crate::ui::Ui; +use hg::operations::{ListTrackedFiles, ListTrackedFilesErrorKind}; +use hg::utils::files::{get_bytes_from_path, relativize_path}; +use hg::utils::hg_path::HgPathBuf; + +pub const HELP_TEXT: &str = " +List tracked files. + +Returns 0 on success. +"; + +pub struct FilesCommand {} + +impl FilesCommand { + pub fn new() -> Self { + FilesCommand {} + } +} + +impl Command for FilesCommand { + fn run(&self, ui: &Ui) -> Result<(), CommandError> { + let operation_builder = ListTrackedFiles::new()?; + let operation = operation_builder.load().map_err(|err| { + CommandErrorKind::Abort(Some( + [b"abort: ", err.to_string().as_bytes(), b"\n"] + .concat() + .to_vec(), + )) + })?; + let files = operation.run().map_err(|err| match err.kind { + ListTrackedFilesErrorKind::ParseError(_) => { + CommandErrorKind::Abort(Some( + // TODO find a better error message + b"abort: parse error\n".to_vec(), + )) + } + })?; + + let cwd = std::env::current_dir() + .or_else(|e| Err(CommandErrorKind::CurrentDirNotFound(e)))?; + let rooted_cwd = cwd + .strip_prefix(operation_builder.get_root()) + .expect("cwd was already checked within the repository"); + let rooted_cwd = HgPathBuf::from(get_bytes_from_path(rooted_cwd)); + + let mut stdout = ui.stdout_buffer(); + + for file in files { + stdout.write_all(relativize_path(file, &rooted_cwd).as_ref())?; + stdout.write_all(b"\n")?; + } + stdout.flush()?; + + Ok(()) + } +} diff -r bd5b2b29b82d -r e3df1f560d9a rust/rhg/src/commands/root.rs --- a/rust/rhg/src/commands/root.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/rhg/src/commands/root.rs Fri Sep 18 10:48:43 2020 -0400 @@ -1,9 +1,8 @@ use crate::commands::Command; -use crate::error::{CommandError, CommandErrorKind}; +use crate::error::CommandError; use crate::ui::Ui; -use hg::operations::{FindRoot, FindRootError, FindRootErrorKind, Operation}; +use hg::operations::FindRoot; use hg::utils::files::get_bytes_from_path; -use std::path::PathBuf; pub const HELP_TEXT: &str = " Print the root directory of the current repository. @@ -11,66 +10,23 @@ Returns 0 on success. "; -pub struct RootCommand { - ui: Ui, -} +pub struct RootCommand {} impl RootCommand { pub fn new() -> Self { - RootCommand { ui: Ui::new() } - } - - fn display_found_path( - &self, - path_buf: PathBuf, - ) -> Result<(), CommandError> { - let bytes = get_bytes_from_path(path_buf); - - // TODO use formating macro - self.ui.write_stdout(&[bytes.as_slice(), b"\n"].concat())?; - - Err(CommandErrorKind::Ok.into()) - } - - fn display_error(&self, error: FindRootError) -> Result<(), CommandError> { - match error.kind { - FindRootErrorKind::RootNotFound(path) => { - let bytes = get_bytes_from_path(path); - - // TODO use formating macro - self.ui.write_stderr( - &[ - b"abort: no repository found in '", - bytes.as_slice(), - b"' (.hg not found)!\n", - ] - .concat(), - )?; - - Err(CommandErrorKind::RootNotFound.into()) - } - FindRootErrorKind::GetCurrentDirError(e) => { - // TODO use formating macro - self.ui.write_stderr( - &[ - b"abort: error getting current working directory: ", - e.to_string().as_bytes(), - b"\n", - ] - .concat(), - )?; - - Err(CommandErrorKind::CurrentDirNotFound.into()) - } - } + RootCommand {} } } impl Command for RootCommand { - fn run(&self) -> Result<(), CommandError> { - match FindRoot::new().run() { - Ok(path_buf) => self.display_found_path(path_buf), - Err(e) => self.display_error(e), - } + fn run(&self, ui: &Ui) -> Result<(), CommandError> { + let path_buf = FindRoot::new().run()?; + + let bytes = get_bytes_from_path(path_buf); + + // TODO use formating macro + ui.write_stdout(&[bytes.as_slice(), b"\n"].concat())?; + + Ok(()) } } diff -r bd5b2b29b82d -r e3df1f560d9a rust/rhg/src/error.rs --- a/rust/rhg/src/error.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/rhg/src/error.rs Fri Sep 18 10:48:43 2020 -0400 @@ -1,45 +1,82 @@ use crate::exitcode; use crate::ui::UiError; +use hg::operations::{FindRootError, FindRootErrorKind}; +use hg::utils::files::get_bytes_from_path; use std::convert::From; +use std::path::PathBuf; /// The kind of command error -#[derive(Debug, PartialEq)] +#[derive(Debug)] pub enum CommandErrorKind { - /// The command finished without error - Ok, /// The root of the repository cannot be found - RootNotFound, + RootNotFound(PathBuf), /// The current directory cannot be found - CurrentDirNotFound, + CurrentDirNotFound(std::io::Error), /// The standard output stream cannot be written to StdoutError, /// The standard error stream cannot be written to StderrError, + /// The command aborted + Abort(Option>), } impl CommandErrorKind { pub fn get_exit_code(&self) -> exitcode::ExitCode { match self { - CommandErrorKind::Ok => exitcode::OK, - CommandErrorKind::RootNotFound => exitcode::ABORT, - CommandErrorKind::CurrentDirNotFound => exitcode::ABORT, + CommandErrorKind::RootNotFound(_) => exitcode::ABORT, + CommandErrorKind::CurrentDirNotFound(_) => exitcode::ABORT, CommandErrorKind::StdoutError => exitcode::ABORT, CommandErrorKind::StderrError => exitcode::ABORT, + CommandErrorKind::Abort(_) => exitcode::ABORT, + } + } + + /// Return the message corresponding to the error kind if any + pub fn get_error_message_bytes(&self) -> Option> { + match self { + // TODO use formating macro + CommandErrorKind::RootNotFound(path) => { + let bytes = get_bytes_from_path(path); + Some( + [ + b"abort: no repository found in '", + bytes.as_slice(), + b"' (.hg not found)!\n", + ] + .concat(), + ) + } + // TODO use formating macro + CommandErrorKind::CurrentDirNotFound(e) => Some( + [ + b"abort: error getting current working directory: ", + e.to_string().as_bytes(), + b"\n", + ] + .concat(), + ), + CommandErrorKind::Abort(message) => message.to_owned(), + _ => None, } } } /// The error type for the Command trait -#[derive(Debug, PartialEq)] +#[derive(Debug)] pub struct CommandError { pub kind: CommandErrorKind, } impl CommandError { /// Exist the process with the corresponding exit code. - pub fn exit(&self) -> () { + pub fn exit(&self) { std::process::exit(self.kind.get_exit_code()) } + + /// Return the message corresponding to the command error if any + pub fn get_error_message_bytes(&self) -> Option> { + self.kind.get_error_message_bytes() + } } impl From for CommandError { @@ -58,3 +95,16 @@ } } } + +impl From for CommandError { + fn from(err: FindRootError) -> Self { + match err.kind { + FindRootErrorKind::RootNotFound(path) => CommandError { + kind: CommandErrorKind::RootNotFound(path), + }, + FindRootErrorKind::GetCurrentDirError(e) => CommandError { + kind: CommandErrorKind::CurrentDirNotFound(e), + }, + } + } +} diff -r bd5b2b29b82d -r e3df1f560d9a rust/rhg/src/main.rs --- a/rust/rhg/src/main.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/rhg/src/main.rs Fri Sep 18 10:48:43 2020 -0400 @@ -16,15 +16,22 @@ .version("0.0.1") .subcommand( SubCommand::with_name("root").about(commands::root::HELP_TEXT), + ) + .subcommand( + SubCommand::with_name("files").about(commands::files::HELP_TEXT), ); - let matches = app.clone().get_matches_safe().unwrap_or_else(|_| { + let matches = app.clone().get_matches_safe().unwrap_or_else(|err| { + let _ = ui::Ui::new().writeln_stderr_str(&err.message); std::process::exit(exitcode::UNIMPLEMENTED_COMMAND) }); + let ui = ui::Ui::new(); + let command_result = match matches.subcommand_name() { Some(name) => match name { - "root" => commands::root::RootCommand::new().run(), + "root" => commands::root::RootCommand::new().run(&ui), + "files" => commands::files::FilesCommand::new().run(&ui), _ => std::process::exit(exitcode::UNIMPLEMENTED_COMMAND), }, _ => { @@ -37,6 +44,15 @@ match command_result { Ok(_) => std::process::exit(exitcode::OK), - Err(e) => e.exit(), + Err(e) => { + let message = e.get_error_message_bytes(); + if let Some(msg) = message { + match ui.write_stderr(&msg) { + Ok(_) => (), + Err(_) => std::process::exit(exitcode::ABORT), + }; + }; + e.exit() + } } } diff -r bd5b2b29b82d -r e3df1f560d9a rust/rhg/src/ui.rs --- a/rust/rhg/src/ui.rs Sun Sep 13 15:59:23 2020 +0900 +++ b/rust/rhg/src/ui.rs Fri Sep 18 10:48:43 2020 -0400 @@ -1,7 +1,11 @@ use std::io; -use std::io::Write; +use std::io::{ErrorKind, Write}; -pub struct Ui {} +#[derive(Debug)] +pub struct Ui { + stdout: std::io::Stdout, + stderr: std::io::Stderr, +} /// The kind of user interface error pub enum UiError { @@ -14,41 +18,88 @@ /// The commandline user interface impl Ui { pub fn new() -> Self { - Ui {} + Ui { + stdout: std::io::stdout(), + stderr: std::io::stderr(), + } + } + + /// Returns a buffered handle on stdout for faster batch printing + /// operations. + pub fn stdout_buffer(&self) -> StdoutBuffer { + StdoutBuffer::new(self.stdout.lock()) } /// Write bytes to stdout pub fn write_stdout(&self, bytes: &[u8]) -> Result<(), UiError> { - let mut stdout = io::stdout(); - - self.write_stream(&mut stdout, bytes) - .or_else(|e| self.into_stdout_error(e))?; + let mut stdout = self.stdout.lock(); - stdout.flush().or_else(|e| self.into_stdout_error(e)) - } + stdout.write_all(bytes).or_else(handle_stdout_error)?; - fn into_stdout_error(&self, error: io::Error) -> Result<(), UiError> { - self.write_stderr( - &[b"abort: ", error.to_string().as_bytes(), b"\n"].concat(), - )?; - Err(UiError::StdoutError(error)) + stdout.flush().or_else(handle_stdout_error) } /// Write bytes to stderr pub fn write_stderr(&self, bytes: &[u8]) -> Result<(), UiError> { - let mut stderr = io::stderr(); + let mut stderr = self.stderr.lock(); + + stderr.write_all(bytes).or_else(handle_stderr_error)?; + + stderr.flush().or_else(handle_stderr_error) + } + + /// Write string line to stderr + pub fn writeln_stderr_str(&self, s: &str) -> Result<(), UiError> { + self.write_stderr(&format!("{}\n", s).as_bytes()) + } +} - self.write_stream(&mut stderr, bytes) - .or_else(|e| Err(UiError::StderrError(e)))?; +/// A buffered stdout writer for faster batch printing operations. +pub struct StdoutBuffer { + buf: io::BufWriter, +} - stderr.flush().or_else(|e| Err(UiError::StderrError(e))) +impl StdoutBuffer { + pub fn new(writer: W) -> Self { + let buf = io::BufWriter::new(writer); + Self { buf } + } + + /// Write bytes to stdout buffer + pub fn write_all(&mut self, bytes: &[u8]) -> Result<(), UiError> { + self.buf.write_all(bytes).or_else(handle_stdout_error) } - fn write_stream( - &self, - stream: &mut impl Write, - bytes: &[u8], - ) -> Result<(), io::Error> { - stream.write_all(bytes) + /// Flush bytes to stdout + pub fn flush(&mut self) -> Result<(), UiError> { + self.buf.flush().or_else(handle_stdout_error) } } + +/// Sometimes writing to stdout is not possible, try writing to stderr to +/// signal that failure, otherwise just bail. +fn handle_stdout_error(error: io::Error) -> Result<(), UiError> { + if let ErrorKind::BrokenPipe = error.kind() { + // This makes `| head` work for example + return Ok(()); + } + let mut stderr = io::stderr(); + + stderr + .write_all(&[b"abort: ", error.to_string().as_bytes(), b"\n"].concat()) + .map_err(UiError::StderrError)?; + + stderr.flush().map_err(UiError::StderrError)?; + + Err(UiError::StdoutError(error)) +} + +/// Sometimes writing to stderr is not possible. +fn handle_stderr_error(error: io::Error) -> Result<(), UiError> { + // A broken pipe should not result in a error + // like with `| head` for example + if let ErrorKind::BrokenPipe = error.kind() { + return Ok(()); + } + Err(UiError::StdoutError(error)) +} diff -r bd5b2b29b82d -r e3df1f560d9a setup.py --- a/setup.py Sun Sep 13 15:59:23 2020 +0900 +++ b/setup.py Fri Sep 18 10:48:43 2020 -0400 @@ -1268,6 +1268,7 @@ 'mercurial.hgweb', 'mercurial.interfaces', 'mercurial.pure', + 'mercurial.templates', 'mercurial.thirdparty', 'mercurial.thirdparty.attr', 'mercurial.thirdparty.zope', @@ -1292,6 +1293,13 @@ 'hgext3rd', 'hgdemandimport', ] + +for name in os.listdir(os.path.join('mercurial', 'templates')): + if name != '__pycache__' and os.path.isdir( + os.path.join('mercurial', 'templates', name) + ): + packages.append('mercurial.templates.%s' % name) + if sys.version_info[0] == 2: packages.extend( [ @@ -1614,11 +1622,8 @@ msvccompiler.MSVCCompiler = HackedMSVCCompiler packagedata = { - 'mercurial': [ - 'locale/*/LC_MESSAGES/hg.mo', - 'defaultrc/*.rc', - 'dummycert.pem', - ], + 'mercurial': ['locale/*/LC_MESSAGES/hg.mo', 'dummycert.pem',], + 'mercurial.defaultrc': ['*.rc',], 'mercurial.helptext': ['*.txt',], 'mercurial.helptext.internals': ['*.txt',], } @@ -1630,11 +1635,8 @@ for root in ('templates',): for curdir, dirs, files in os.walk(os.path.join('mercurial', root)): - curdir = curdir.split(os.sep, 1)[1] - dirs[:] = filter(ordinarypath, dirs) - for f in filter(ordinarypath, files): - f = os.path.join(curdir, f) - packagedata['mercurial'].append(f) + packagename = curdir.replace(os.sep, '.') + packagedata[packagename] = list(filter(ordinarypath, files)) datafiles = [] diff -r bd5b2b29b82d -r e3df1f560d9a tests/hghave.py --- a/tests/hghave.py Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/hghave.py Fri Sep 18 10:48:43 2020 -0400 @@ -886,8 +886,11 @@ return False -@check("virtualenv", "Python virtualenv support") -def has_virtualenv(): +@check("py2virtualenv", "Python2 virtualenv support") +def has_py2virtualenv(): + if sys.version_info[0] != 2: + return False + try: import virtualenv diff -r bd5b2b29b82d -r e3df1f560d9a tests/lockdelay.py --- a/tests/lockdelay.py Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/lockdelay.py Fri Sep 18 10:48:43 2020 -0400 @@ -10,11 +10,11 @@ def reposetup(ui, repo): class delayedlockrepo(repo.__class__): - def lock(self): + def lock(self, wait=True): delay = float(os.environ.get('HGPRELOCKDELAY', '0.0')) if delay: time.sleep(delay) - res = super(delayedlockrepo, self).lock() + res = super(delayedlockrepo, self).lock(wait=wait) delay = float(os.environ.get('HGPOSTLOCKDELAY', '0.0')) if delay: time.sleep(delay) diff -r bd5b2b29b82d -r e3df1f560d9a tests/phabricator/phabsend-hash-fixes.json --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/phabricator/phabsend-hash-fixes.json Fri Sep 18 10:48:43 2020 -0400 @@ -0,0 +1,1096 @@ +{ + "version": 1, + "interactions": [ + { + "response": { + "body": { + "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:15 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "183" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22constraints%22%3A+%7B%22callsigns%22%3A+%5B%22HG%22%5D%7D%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":{\"diffid\":22437,\"phid\":\"PHID-DIFF-q7y7rru5hbxnq2mtosrf\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/22437\\/\"},\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:15 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "1162" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22file.txt%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22file.txt%22%2C+%22delLines%22%3A+1%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22-mod3%5Cn%2Bcontent%5Cn%22%2C+%22delLines%22%3A+1%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+1%2C+%22oldOffset%22%3A+1%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%7D%2C+%22oldPath%22%3A+%22file.txt%22%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+2%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:16 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "connection": [ + "close" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "x-frame-options": [ + "Deny" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "482" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+22437%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:17 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "594" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+22437%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"base review (generate test for phabsend)\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"base review (generate test for phabsend)\"}]},\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:17 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "189" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22base+review+%28generate+test+for+phabsend%29%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":{\"object\":{\"id\":8945,\"phid\":\"PHID-DREV-suqt5s55kjw235uv2vcf\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-76klselssdel6vp\"},{\"phid\":\"PHID-XACT-DREV-atejrjnkqevgpnv\"},{\"phid\":\"PHID-XACT-DREV-wqkucxolugjm4yr\"},{\"phid\":\"PHID-XACT-DREV-pziu2ibzwaljzto\"},{\"phid\":\"PHID-XACT-DREV-k4o6ptid6jztdrx\"}]},\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:18 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "342" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-q7y7rru5hbxnq2mtosrf%22%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22base+review+%28generate+test+for+phabsend%29%22%7D%5D%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":{\"diffid\":22438,\"phid\":\"PHID-DIFF-6lntv23mzadpzyeaizej\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/22438\\/\"},\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:19 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "1170" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22file.txt%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22file.txt%22%2C+%22delLines%22%3A+1%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22-content%5Cn%2Bmore+content%5Cn%22%2C+%22delLines%22%3A+1%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+1%2C+%22oldOffset%22%3A+1%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%7D%2C+%22oldPath%22%3A+%22file.txt%22%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+2%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:20 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "482" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22c2874a398f7e0a139283fad3df053430dac536ff%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+22438%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:20 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "594" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22c2874a398f7e0a139283fad3df053430dac536ff%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22c2874a398f7e0a139283fad3df053430dac536ff%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22133c1c6c64494d545ad3c8bc4c2e42af215760c1%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+22438%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"133c1c6c6449 is my parent (generate test for phabsend)\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"133c1c6c6449 is my parent (generate test for phabsend)\"}]},\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:21 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "203" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22133c1c6c6449+is+my+parent+%28generate+test+for+phabsend%29%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":{\"object\":{\"id\":8946,\"phid\":\"PHID-DREV-ja6bdevg5fbykjrpghj4\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-pupj6v3uzdeb6iu\"},{\"phid\":\"PHID-XACT-DREV-czsnsiuaxsecqf4\"},{\"phid\":\"PHID-XACT-DREV-qs6vcl5qj4cqyu2\"},{\"phid\":\"PHID-XACT-DREV-qig4ohigvfnr4h2\"},{\"phid\":\"PHID-XACT-DREV-iv6asp4osxnslvs\"},{\"phid\":\"PHID-XACT-DREV-jn3ojiw6yt3mzuz\"}]},\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:22 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "458" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-6lntv23mzadpzyeaizej%22%7D%2C+%7B%22type%22%3A+%22parents.set%22%2C+%22value%22%3A+%5B%22PHID-DREV-suqt5s55kjw235uv2vcf%22%5D%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22133c1c6c6449+is+my+parent+%28generate+test+for+phabsend%29%22%7D%5D%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":[{\"id\":\"8946\",\"phid\":\"PHID-DREV-ja6bdevg5fbykjrpghj4\",\"title\":\"133c1c6c6449 is my parent (generate test for phabsend)\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D8946\",\"dateCreated\":\"1598307502\",\"dateModified\":\"1598307502\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":1},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-6lntv23mzadpzyeaizej\",\"diffs\":[\"22438\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-nf7kno6lkl3fjsmo5pyp\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-suqt5s55kjw235uv2vcf\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"},{\"id\":\"8945\",\"phid\":\"PHID-DREV-suqt5s55kjw235uv2vcf\",\"title\":\"base review (generate test for phabsend)\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D8945\",\"dateCreated\":\"1598307498\",\"dateModified\":\"1598307502\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":1},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-q7y7rru5hbxnq2mtosrf\",\"diffs\":[\"22437\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-nf7kno6lkl3fjsmo5pyp\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"}],\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:23 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "154" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B8945%2C+8946%5D%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.query", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:23 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "482" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+22437%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:24 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "594" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22e919cdf3d4fe9a926427b1961601eeaf4b4e2caf%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+22437%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:24 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "482" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%229c9290f945b15b9420fffd5f5fc59260c1cbbcf4%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+22438%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", + "method": "POST" + } + }, + { + "response": { + "body": { + "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}" + }, + "headers": { + "referrer-policy": [ + "no-referrer" + ], + "x-xss-protection": [ + "1; mode=block" + ], + "server": [ + "Apache/2.4.10 (Debian)" + ], + "cache-control": [ + "no-store" + ], + "date": [ + "Mon, 24 Aug 2020 22:18:25 GMT" + ], + "transfer-encoding": [ + "chunked" + ], + "expires": [ + "Sat, 01 Jan 2000 00:00:00 GMT" + ], + "x-frame-options": [ + "Deny" + ], + "content-type": [ + "application/json" + ], + "x-content-type-options": [ + "nosniff" + ], + "strict-transport-security": [ + "max-age=0; includeSubdomains; preload" + ] + }, + "status": { + "message": "OK", + "code": 200 + } + }, + "request": { + "headers": { + "content-length": [ + "594" + ], + "accept": [ + "application/mercurial-0.1" + ], + "host": [ + "phab.mercurial-scm.org" + ], + "user-agent": [ + "mercurial/proto-1.0 (Mercurial 5.5+173-3de55438d570+20200824)" + ], + "content-type": [ + "application/x-www-form-urlencoded" + ] + }, + "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%229c9290f945b15b9420fffd5f5fc59260c1cbbcf4%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%229c9290f945b15b9420fffd5f5fc59260c1cbbcf4%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22f444f060f4d648731890a4aee1ec5ce372170265%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+22438%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1", + "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", + "method": "POST" + } + } + ] +} \ No newline at end of file diff -r bd5b2b29b82d -r e3df1f560d9a tests/pullext.py --- a/tests/pullext.py Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/pullext.py Fri Sep 18 10:48:43 2020 -0400 @@ -13,8 +13,8 @@ error, extensions, localrepo, + requirements, ) -from mercurial.interfaces import repository def clonecommand(orig, ui, repo, *args, **kwargs): @@ -31,7 +31,7 @@ def featuresetup(ui, features): - features.add(repository.NARROW_REQUIREMENT) + features.add(requirements.NARROW_REQUIREMENT) def extsetup(ui): diff -r bd5b2b29b82d -r e3df1f560d9a tests/run-tests.py --- a/tests/run-tests.py Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/run-tests.py Fri Sep 18 10:48:43 2020 -0400 @@ -2336,7 +2336,6 @@ jobs=1, whitelist=None, blacklist=None, - retest=False, keywords=None, loop=False, runs_per_test=1, @@ -2364,9 +2363,6 @@ backwards compatible behavior which reports skipped tests as part of the results. - retest denotes whether to retest failed tests. This arguably belongs - outside of TestSuite. - keywords denotes key words that will be used to filter which tests to execute. This arguably belongs outside of TestSuite. @@ -2377,7 +2373,6 @@ self._jobs = jobs self._whitelist = whitelist self._blacklist = blacklist - self._retest = retest self._keywords = keywords self._loop = loop self._runs_per_test = runs_per_test @@ -2407,10 +2402,6 @@ result.addSkip(test, 'blacklisted') continue - if self._retest and not os.path.exists(test.errpath): - result.addIgnore(test, 'not retesting') - continue - if self._keywords: with open(test.path, 'rb') as f: t = f.read().lower() + test.bname.lower() @@ -3253,6 +3244,14 @@ tests.append({'path': t}) else: tests.append({'path': t}) + + if self.options.retest: + retest_args = [] + for test in tests: + errpath = self._geterrpath(test) + if os.path.exists(errpath): + retest_args.append(test) + tests = retest_args return tests def _runtests(self, testdescs): @@ -3269,13 +3268,7 @@ orig = list(testdescs) while testdescs: desc = testdescs[0] - # desc['path'] is a relative path - if 'case' in desc: - casestr = b'#'.join(desc['case']) - errpath = b'%s#%s.err' % (desc['path'], casestr) - else: - errpath = b'%s.err' % desc['path'] - errpath = os.path.join(self._outputdir, errpath) + errpath = self._geterrpath(desc) if os.path.exists(errpath): break testdescs.pop(0) @@ -3298,7 +3291,6 @@ jobs=jobs, whitelist=self.options.whitelisted, blacklist=self.options.blacklist, - retest=self.options.retest, keywords=kws, loop=self.options.loop, runs_per_test=self.options.runs_per_test, @@ -3346,6 +3338,19 @@ if failed: return 1 + def _geterrpath(self, test): + # test['path'] is a relative path + if 'case' in test: + # for multiple dimensions test cases + casestr = b'#'.join(test['case']) + errpath = b'%s#%s.err' % (test['path'], casestr) + else: + errpath = b'%s.err' % test['path'] + if self.options.outputdir: + self._outputdir = canonpath(_sys2bytes(self.options.outputdir)) + errpath = os.path.join(self._outputdir, errpath) + return errpath + def _getport(self, count): port = self._ports.get(count) # do we have a cached entry? if port is None: diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-absorb-unfinished.t --- a/tests/test-absorb-unfinished.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-absorb-unfinished.t Fri Sep 18 10:48:43 2020 -0400 @@ -25,6 +25,6 @@ $ hg --config extensions.rebase= absorb abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-amend.t --- a/tests/test-amend.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-amend.t Fri Sep 18 10:48:43 2020 -0400 @@ -93,6 +93,29 @@ nothing changed [1] +#if obsstore-on + $ hg init repo-merge-state + $ cd repo-merge-state + $ echo a > f + $ hg ci -Aqm a + $ echo b > f + $ hg ci -Aqm b + $ echo c > f + $ hg co -m '.^' + merging f + warning: conflicts while merging f! (edit, then use 'hg resolve --mark') + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges + [1] + $ echo d > f + $ hg resolve -m f + (no more unresolved files) + $ hg ci --amend --config experimental.evolution.allowunstable=True + 1 new orphan changesets + $ hg resolve -l + $ cd .. +#endif + Matcher and metadata options $ echo 3 > C diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-annotate.t --- a/tests/test-annotate.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-annotate.t Fri Sep 18 10:48:43 2020 -0400 @@ -479,26 +479,24 @@ $ cat > ../legacyrepo.py < from __future__ import absolute_import - > from mercurial import error, node - > def reposetup(ui, repo): - > class legacyrepo(repo.__class__): - > def _filecommit(self, fctx, manifest1, manifest2, - > linkrev, tr, changelist, includecopymeta): - > fname = fctx.path() - > text = fctx.data() - > flog = self.file(fname) - > fparent1 = manifest1.get(fname, node.nullid) - > fparent2 = manifest2.get(fname, node.nullid) - > meta = {} - > copy = fctx.copysource() - > if copy and copy != fname: - > raise error.Abort('copying is not supported') - > if fparent2 != node.nullid: - > changelist.append(fname) - > return flog.add(text, meta, tr, linkrev, - > fparent1, fparent2) - > raise error.Abort('only merging is supported') - > repo.__class__ = legacyrepo + > from mercurial import commit, error, extensions, node + > def _filecommit(orig, repo, fctx, manifest1, manifest2, + > linkrev, tr, includecopymeta, ms): + > fname = fctx.path() + > text = fctx.data() + > flog = repo.file(fname) + > fparent1 = manifest1.get(fname, node.nullid) + > fparent2 = manifest2.get(fname, node.nullid) + > meta = {} + > copy = fctx.copysource() + > if copy and copy != fname: + > raise error.Abort('copying is not supported') + > if fparent2 != node.nullid: + > return flog.add(text, meta, tr, linkrev, + > fparent1, fparent2), 'modified' + > raise error.Abort('only merging is supported') + > def uisetup(ui): + > extensions.wrapfunction(commit, '_filecommit', _filecommit) > EOF $ cat > baz <> $HGRCPATH << EOF > [fsmonitor] > warn_update_file_count = 2 + > warn_update_file_count_rust = 2 > EOF We should see a warning about no fsmonitor on supported platforms diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-completion.t --- a/tests/test-completion.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-completion.t Fri Sep 18 10:48:43 2020 -0400 @@ -258,7 +258,7 @@ cat: output, rev, decode, include, exclude, template clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos - config: untrusted, edit, local, global, template + config: untrusted, edit, local, shared, global, template continue: dry-run copy: forget, after, at-rev, force, include, exclude, dry-run debugancestor: @@ -353,7 +353,7 @@ push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure recover: verify remove: after, force, subrepos, include, exclude, dry-run - rename: after, force, include, exclude, dry-run + rename: after, at-rev, force, include, exclude, dry-run resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template revert: all, date, rev, no-backup, interactive, include, exclude, dry-run rollback: dry-run, force diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-contrib-emacs.t --- a/tests/test-contrib-emacs.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-contrib-emacs.t Fri Sep 18 10:48:43 2020 -0400 @@ -2,7 +2,7 @@ $ emacs -q -no-site-file -batch -l $TESTDIR/../contrib/hg-test-mode.el \ > -f ert-run-tests-batch-and-exit Running 1 tests (*) (glob) - passed 1/1 hg-test-mode--compilation-mode-support + passed 1/1 hg-test-mode--compilation-mode-support* (glob) - Ran 1 tests, 1 results as expected (*) (glob) + Ran 1 tests, 1 results as expected* (glob) diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-convert-identity.t --- a/tests/test-convert-identity.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-convert-identity.t Fri Sep 18 10:48:43 2020 -0400 @@ -8,9 +8,10 @@ > convert = > EOF $ cat <<'EOF' > changefileslist.py - > from mercurial import (changelog, extensions) + > from mercurial import (changelog, extensions, metadata) > def wrap(orig, clog, manifest, files, *args, **kwargs): - > return orig(clog, manifest, [b"a"], *args, **kwargs) + > files = metadata.ChangingFiles(touched=[b"a"]) + > return orig(clog, manifest, files, *args, **kwargs) > def extsetup(ui): > extensions.wrapfunction(changelog.changelog, 'add', wrap) > EOF diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-copies-chain-merge.t --- a/tests/test-copies-chain-merge.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-copies-chain-merge.t Fri Sep 18 10:48:43 2020 -0400 @@ -853,3 +853,144 @@ | o 0 i-0 initial commit: a b h + + +Comparing with merging with a deletion (and keeping the file) +------------------------------------------------------------- + +Merge: +- one removing a file (d) +- one updating that file +- the merge keep the modified version of the file (canceling the delete) + +In this case, the file keep on living after the merge. So we should not drop its +copy tracing chain. + + $ hg up 'desc("c-1")' + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg merge 'desc("g-1")' + file 'd' was deleted in local [working copy] but was modified in other [merge rev]. + You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved. + What do you want to do? u + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + [1] + $ hg resolve -t :other d + (no more unresolved files) + $ hg ci -m "mCGm-0" + created new head + + $ hg up 'desc("g-1")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("c-1")' + file 'd' was deleted in other [merge rev] but was modified in local [working copy]. + You can use (c)hanged version, (d)elete, or leave (u)nresolved. + What do you want to do? u + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + [1] + $ hg resolve -t :local d + (no more unresolved files) + $ hg ci -m "mGCm-0" + created new head + + $ hg log -G --rev '::(desc("mCGm")+desc("mGCm"))' + @ 31 mGCm-0 + |\ + +---o 30 mCGm-0 + | |/ + | o 25 g-1: update d + | | + o | 6 c-1 delete d + |/ + o 2 i-2: c -move-> d + | + o 1 i-1: a -move-> c + | + o 0 i-0 initial commit: a b h + + +BROKEN: 'a' should be the the source of 'd' in the changeset centric algorithm too + + $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCGm-0")' + A d + a (filelog !) + R a + $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGCm-0")' + A d + a (filelog !) + R a + $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCGm-0")' + A d + $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mGCm-0")' + A d + $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mCGm-0")' + $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGCm-0")' + + +Comparing with merge restoring an untouched deleted file +-------------------------------------------------------- + +Merge: +- one removing a file (d) +- one leaving the file untouched +- the merge actively restore the file to the same content. + +In this case, the file keep on living after the merge. So we should not drop its +copy tracing chain. + + $ hg up 'desc("c-1")' + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg merge 'desc("b-1")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg revert --rev 'desc("b-1")' d + $ hg ci -m "mCB-revert-m-0" + created new head + + $ hg up 'desc("b-1")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("c-1")' + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg revert --rev 'desc("b-1")' d + $ hg ci -m "mBC-revert-m-0" + created new head + + $ hg log -G --rev '::(desc("mCB-revert-m")+desc("mBC-revert-m"))' + @ 33 mBC-revert-m-0 + |\ + +---o 32 mCB-revert-m-0 + | |/ + | o 6 c-1 delete d + | | + o | 5 b-1: b update + |/ + o 2 i-2: c -move-> d + | + o 1 i-1: a -move-> c + | + o 0 i-0 initial commit: a b h + + +BROKEN: 'a' should be the the source of 'd' in the changeset centric algorithm too + + $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCB-revert-m-0")' + M b + A d + a (filelog !) + R a + $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC-revert-m-0")' + M b + A d + a (filelog !) + R a + $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCB-revert-m-0")' + M b + A d + $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mBC-revert-m-0")' + M b + A d + $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mCB-revert-m-0")' + $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBC-revert-m-0")' + diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-fastannotate-hg.t --- a/tests/test-fastannotate-hg.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-fastannotate-hg.t Fri Sep 18 10:48:43 2020 -0400 @@ -481,26 +481,25 @@ and its ancestor by overriding "repo._filecommit". $ cat > ../legacyrepo.py < from mercurial import error, node - > def reposetup(ui, repo): - > class legacyrepo(repo.__class__): - > def _filecommit(self, fctx, manifest1, manifest2, - > linkrev, tr, changelist, includecopymeta): - > fname = fctx.path() - > text = fctx.data() - > flog = self.file(fname) - > fparent1 = manifest1.get(fname, node.nullid) - > fparent2 = manifest2.get(fname, node.nullid) - > meta = {} - > copy = fctx.renamed() - > if copy and copy[0] != fname: - > raise error.Abort('copying is not supported') - > if fparent2 != node.nullid: - > changelist.append(fname) - > return flog.add(text, meta, tr, linkrev, - > fparent1, fparent2) - > raise error.Abort('only merging is supported') - > repo.__class__ = legacyrepo + > from __future__ import absolute_import + > from mercurial import commit, error, extensions, node + > def _filecommit(orig, repo, fctx, manifest1, manifest2, + > linkrev, tr, includecopymeta, ms): + > fname = fctx.path() + > text = fctx.data() + > flog = repo.file(fname) + > fparent1 = manifest1.get(fname, node.nullid) + > fparent2 = manifest2.get(fname, node.nullid) + > meta = {} + > copy = fctx.copysource() + > if copy and copy != fname: + > raise error.Abort('copying is not supported') + > if fparent2 != node.nullid: + > return flog.add(text, meta, tr, linkrev, + > fparent1, fparent2), 'modified' + > raise error.Abort('only merging is supported') + > def uisetup(ui): + > extensions.wrapfunction(commit, '_filecommit', _filecommit) > EOF $ cat > baz < $UPPERCASEPY < import sys + > from mercurial.utils.procutil import setbinary + > setbinary(sys.stdin) + > setbinary(sys.stdout) + > sys.stdout.write(sys.stdin.read().upper()) + > EOF + $ TESTLINES="foo\nbar\nbaz\n" + $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY + FOO + BAR + BAZ + +This file attempts to test our workarounds for pickle's lack of +support for short reads. + + $ cat >> $HGRCPATH < [extensions] + > fix = + > [fix] + > uppercase-whole-file:command="$PYTHON" $UPPERCASEPY + > uppercase-whole-file:pattern=set:** + > EOF + + $ hg init repo + $ cd repo + +# Create a file that's large enough that it seems to not fit in +# pickle's buffer, making it use the code path that expects our +# _blockingreader's read() method to return bytes. + $ echo "some stuff" > file + $ for i in $($TESTDIR/seq.py 13); do + > cat file file > tmp + > mv -f tmp file + > done + $ hg commit -Am "add large file" + adding file + +Check that we don't get a crash + + $ hg fix -r . + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-fix.hg (glob) diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-fix.t --- a/tests/test-fix.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-fix.t Fri Sep 18 10:48:43 2020 -0400 @@ -84,15 +84,15 @@ lines of files, unless the --whole flag is used. Some tools may always affect the whole file regardless of --whole. - If revisions are specified with --rev, those revisions will be checked, - and they may be replaced with new revisions that have fixed file content. - It is desirable to specify all descendants of each specified revision, so - that the fixes propagate to the descendants. If all descendants are fixed - at the same time, no merging, rebasing, or evolution will be required. + If --working-dir is used, files with uncommitted changes in the working + copy will be fixed. Note that no backup are made. - If --working-dir is used, files with uncommitted changes in the working - copy will be fixed. If the checked-out revision is also fixed, the working - directory will update to the replacement revision. + If revisions are specified with --source, those revisions and their + descendants will be checked, and they may be replaced with new revisions + that have fixed file content. By automatically including the descendants, + no merging, rebasing, or evolution will be required. If an ancestor of the + working copy is included, then the working copy itself will also be fixed, + and the working copy will be updated to the fixed parent. When determining what lines of each file to fix at each revision, the whole set of revisions being fixed is considered, so that fixes to earlier @@ -878,7 +878,7 @@ $ hg --config extensions.rebase= fix -r . abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] $ cd .. diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-git-interop.t --- a/tests/test-git-interop.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-git-interop.t Fri Sep 18 10:48:43 2020 -0400 @@ -270,3 +270,8 @@ +++ b/beta Mon Jan 01 00:00:11 2007 +0000 @@ -0,0 +1,1 @@ +beta + + +Deleting files should also work (this was issue6398) + $ hg rm beta + $ hg ci -m 'remove beta' diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-graft-interrupted.t --- a/tests/test-graft-interrupted.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-graft-interrupted.t Fri Sep 18 10:48:43 2020 -0400 @@ -622,7 +622,7 @@ $ hg log -GT "{rev}:{node|short} {desc}\n" @ 4:2aa9ad1006ff B in file a | - | % 3:09e253b87e17 A in file a + | o 3:09e253b87e17 A in file a | | | o 2:d36c0562f908 c | | @@ -669,7 +669,7 @@ $ hg log -GT "{rev}:{node|short} {desc}\n" @ 4:2aa9ad1006ff B in file a | - | % 3:09e253b87e17 A in file a + | o 3:09e253b87e17 A in file a | | | o 2:d36c0562f908 c | | @@ -712,7 +712,7 @@ $ hg log -GT "{rev}:{node|short} {desc}\n" @ 4:2aa9ad1006ff B in file a | - | % 3:09e253b87e17 A in file a + | o 3:09e253b87e17 A in file a | | | o 2:d36c0562f908 c | | diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-graft.t --- a/tests/test-graft.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-graft.t Fri Sep 18 10:48:43 2020 -0400 @@ -247,9 +247,9 @@ resolving manifests branchmerge: True, force: True, partial: False ancestor: 4c60f11aa304, local: 1905859650ec+, remote: 9c233e8e184d - preserving e for resolve of e d: remote is newer -> g getting d + preserving e for resolve of e e: versions differ -> m (premerge) picked tool ':merge' for e (binary False symlink False changedelete False) merging e diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-grep.t --- a/tests/test-grep.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-grep.t Fri Sep 18 10:48:43 2020 -0400 @@ -321,14 +321,61 @@ } ] +diff of each revision for reference + + $ hg log -p -T'== rev: {rev} ==\n' + == rev: 4 == + diff -r 95040cfd017d -r 914fa752cdea port + --- a/port Thu Jan 01 00:00:03 1970 +0000 + +++ b/port Thu Jan 01 00:00:04 1970 +0000 + @@ -1,4 +1,3 @@ + export + vaportight + import/export + -import/export + + == rev: 3 == + diff -r 3b325e3481a1 -r 95040cfd017d port + --- a/port Thu Jan 01 00:00:02 1970 +0000 + +++ b/port Thu Jan 01 00:00:03 1970 +0000 + @@ -1,3 +1,4 @@ + export + vaportight + import/export + +import/export + + == rev: 2 == + diff -r 8b20f75c1585 -r 3b325e3481a1 port + --- a/port Thu Jan 01 00:00:01 1970 +0000 + +++ b/port Thu Jan 01 00:00:02 1970 +0000 + @@ -1,2 +1,3 @@ + -import + export + +vaportight + +import/export + + == rev: 1 == + diff -r f31323c92170 -r 8b20f75c1585 port + --- a/port Thu Jan 01 00:00:00 1970 +0000 + +++ b/port Thu Jan 01 00:00:01 1970 +0000 + @@ -1,1 +1,2 @@ + import + +export + + == rev: 0 == + diff -r 000000000000 -r f31323c92170 port + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/port Thu Jan 01 00:00:00 1970 +0000 + @@ -0,0 +1,1 @@ + +import + + all $ hg grep --traceback --all -nu port port port:4:4:-:spam:import/export port:3:4:+:eggs:import/export port:2:1:-:spam:import - port:2:2:-:spam:export - port:2:1:+:spam:export port:2:2:+:spam:vaportight port:2:3:+:spam:import/export port:1:2:+:eggs:export @@ -369,26 +416,6 @@ "user": "spam" }, { - "change": "-", - "date": [2, 0], - "lineno": 2, - "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47", - "path": "port", - "rev": 2, - "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}], - "user": "spam" - }, - { - "change": "+", - "date": [2, 0], - "lineno": 1, - "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47", - "path": "port", - "rev": 2, - "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}], - "user": "spam" - }, - { "change": "+", "date": [2, 0], "lineno": 2, @@ -460,8 +487,6 @@ port:4:4:-:spam:import/export port:3:4:+:eggs:import/export port:2:1:-:spam:import - port:2:2:-:spam:export - port:2:1:+:spam:export port:2:2:+:spam:vaportight port:2:3:+:spam:import/export port:1:2:+:eggs:export @@ -640,6 +665,49 @@ $ cd .. +Moved line may not be collected by "grep --diff" since it first filters +the contents to be diffed by the pattern. (i.e. +"diff <(grep pat a) <(grep pat b)", not "diff a b | grep pat".) +This is much faster than generating full diff per revision. + + $ hg init moved-line + $ cd moved-line + $ cat <<'EOF' > a + > foo + > bar + > baz + > EOF + $ hg ci -Am initial + adding a + $ cat <<'EOF' > a + > bar + > baz + > foo + > EOF + $ hg ci -m reorder + + $ hg diff -c 1 + diff -r a593cc55e81b -r 69789a3b6e80 a + --- a/a Thu Jan 01 00:00:00 1970 +0000 + +++ b/a Thu Jan 01 00:00:00 1970 +0000 + @@ -1,3 +1,3 @@ + -foo + bar + baz + +foo + + can't find the move of "foo" at the revision 1: + + $ hg grep --diff foo -r1 + [1] + + "bar" isn't moved at the revisoin 1: + + $ hg grep --diff bar -r1 + [1] + + $ cd .. + Test for showing working of allfiles flag $ hg init sng @@ -745,3 +813,626 @@ um:1:unmod $ cd .. +--follow with/without --diff and/or paths +----------------------------------------- + +For each test case, we compare the history traversal of "hg log", +"hg grep --diff", and "hg grep" (--all-files). + +"hg grep --diff" should traverse the log in the same way as "hg log". +"hg grep" (--all-files) is slightly different in that it includes +unmodified changes. + + $ hg init follow + $ cd follow + + $ cat <<'EOF' >> .hg/hgrc + > [ui] + > logtemplate = '{rev}: {join(files % "{status} {path}", ", ")}\n' + > EOF + + $ for f in add0 add0-mod1 add0-rm1 add0-mod2 add0-rm2 add0-mod3 add0-mod4 add0-rm4; do + > echo data0 >> $f + > done + $ hg ci -qAm0 + + $ hg cp add0 add0-cp1 + $ hg cp add0 add0-cp1-mod1 + $ hg cp add0 add0-cp1-mod1-rm3 + $ hg rm add0-rm1 + $ for f in *mod1*; do + > echo data1 >> $f + > done + $ hg ci -qAm1 + + $ hg update -q 0 + $ hg cp add0 add0-cp2 + $ hg cp add0 add0-cp2-mod2 + $ hg rm add0-rm2 + $ for f in *mod2*; do + > echo data2 >> $f + > done + $ hg ci -qAm2 + + $ hg update -q 1 + $ hg cp add0-cp1 add0-cp1-cp3 + $ hg cp add0-cp1-mod1 add0-cp1-mod1-cp3-mod3 + $ hg rm add0-cp1-mod1-rm3 + $ for f in *mod3*; do + > echo data3 >> $f + > done + $ hg ci -qAm3 + + $ hg cp add0 add0-cp4 + $ hg cp add0 add0-cp4-mod4 + $ hg rm add0-rm4 + $ for f in *mod4*; do + > echo data4 >> $f + > done + + $ hg log -Gr':wdir()' + o 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4 + | + @ 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + | + | o 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2 + | | + o | 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1 + |/ + o 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + +follow revision history from wdir parent: + + $ hg log -f + 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -f data + add0-cp1-mod1-cp3-mod3:3:+:data3 + add0-mod3:3:+:data3 + add0-cp1-mod1:1:+:data1 + add0-cp1-mod1-rm3:1:+:data1 + add0-mod1:1:+:data1 + add0:0:+:data0 + add0-mod1:0:+:data0 + add0-mod2:0:+:data0 + add0-mod3:0:+:data0 + add0-mod4:0:+:data0 + add0-rm1:0:+:data0 + add0-rm2:0:+:data0 + add0-rm4:0:+:data0 + + $ hg grep -f data + add0:3:data0 + add0-cp1:3:data0 + add0-cp1-cp3:3:data0 + add0-cp1-mod1:3:data0 + add0-cp1-mod1:3:data1 + add0-cp1-mod1-cp3-mod3:3:data0 + add0-cp1-mod1-cp3-mod3:3:data1 + add0-cp1-mod1-cp3-mod3:3:data3 + add0-mod1:3:data0 + add0-mod1:3:data1 + add0-mod2:3:data0 + add0-mod3:3:data0 + add0-mod3:3:data3 + add0-mod4:3:data0 + add0-rm2:3:data0 + add0-rm4:3:data0 + add0:1:data0 + add0-cp1:1:data0 + add0-cp1-mod1:1:data0 + add0-cp1-mod1:1:data1 + add0-cp1-mod1-rm3:1:data0 + add0-cp1-mod1-rm3:1:data1 + add0-mod1:1:data0 + add0-mod1:1:data1 + add0-mod2:1:data0 + add0-mod3:1:data0 + add0-mod4:1:data0 + add0-rm2:1:data0 + add0-rm4:1:data0 + add0:0:data0 + add0-mod1:0:data0 + add0-mod2:0:data0 + add0-mod3:0:data0 + add0-mod4:0:data0 + add0-rm1:0:data0 + add0-rm2:0:data0 + add0-rm4:0:data0 + +follow revision history from specified revision: + + $ hg log -fr2 + 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -fr2 data + add0-cp2-mod2:2:+:data2 + add0-mod2:2:+:data2 + add0:0:+:data0 + add0-mod1:0:+:data0 + add0-mod2:0:+:data0 + add0-mod3:0:+:data0 + add0-mod4:0:+:data0 + add0-rm1:0:+:data0 + add0-rm2:0:+:data0 + add0-rm4:0:+:data0 + + $ hg grep -fr2 data + add0:2:data0 + add0-cp2:2:data0 + add0-cp2-mod2:2:data0 + add0-cp2-mod2:2:data2 + add0-mod1:2:data0 + add0-mod2:2:data0 + add0-mod2:2:data2 + add0-mod3:2:data0 + add0-mod4:2:data0 + add0-rm1:2:data0 + add0-rm4:2:data0 + add0:0:data0 + add0-mod1:0:data0 + add0-mod2:0:data0 + add0-mod3:0:data0 + add0-mod4:0:data0 + add0-rm1:0:data0 + add0-rm2:0:data0 + add0-rm4:0:data0 + +follow revision history from wdir: + + $ hg log -fr'wdir()' + 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4 + 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should follow history + BROKEN: should not abort because of removed file + $ hg grep --diff -fr'wdir()' data + add0-cp4-mod4:2147483647:+:data4 + add0-mod4:2147483647:+:data4 + add0-rm4:2147483647:-:abort: add0-rm4@None: not found in manifest! + [255] + + $ hg grep -fr'wdir()' data + add0:2147483647:data0 + add0-cp1:2147483647:data0 + add0-cp1-cp3:2147483647:data0 + add0-cp1-mod1:2147483647:data0 + add0-cp1-mod1:2147483647:data1 + add0-cp1-mod1-cp3-mod3:2147483647:data0 + add0-cp1-mod1-cp3-mod3:2147483647:data1 + add0-cp1-mod1-cp3-mod3:2147483647:data3 + add0-cp4:2147483647:data0 + add0-cp4-mod4:2147483647:data0 + add0-cp4-mod4:2147483647:data4 + add0-mod1:2147483647:data0 + add0-mod1:2147483647:data1 + add0-mod2:2147483647:data0 + add0-mod3:2147483647:data0 + add0-mod3:2147483647:data3 + add0-mod4:2147483647:data0 + add0-mod4:2147483647:data4 + add0-rm2:2147483647:data0 + add0:3:data0 + add0-cp1:3:data0 + add0-cp1-cp3:3:data0 + add0-cp1-mod1:3:data0 + add0-cp1-mod1:3:data1 + add0-cp1-mod1-cp3-mod3:3:data0 + add0-cp1-mod1-cp3-mod3:3:data1 + add0-cp1-mod1-cp3-mod3:3:data3 + add0-mod1:3:data0 + add0-mod1:3:data1 + add0-mod2:3:data0 + add0-mod3:3:data0 + add0-mod3:3:data3 + add0-mod4:3:data0 + add0-rm2:3:data0 + add0-rm4:3:data0 + add0:1:data0 + add0-cp1:1:data0 + add0-cp1-mod1:1:data0 + add0-cp1-mod1:1:data1 + add0-cp1-mod1-rm3:1:data0 + add0-cp1-mod1-rm3:1:data1 + add0-mod1:1:data0 + add0-mod1:1:data1 + add0-mod2:1:data0 + add0-mod3:1:data0 + add0-mod4:1:data0 + add0-rm2:1:data0 + add0-rm4:1:data0 + add0:0:data0 + add0-mod1:0:data0 + add0-mod2:0:data0 + add0-mod3:0:data0 + add0-mod4:0:data0 + add0-rm1:0:data0 + add0-rm2:0:data0 + add0-rm4:0:data0 + +follow revision history from multiple revisions: + + $ hg log -fr'1+2' + 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2 + 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should include the revision 1 + $ hg grep --diff -fr'1+2' data + add0-cp2-mod2:2:+:data2 + add0-mod2:2:+:data2 + add0:0:+:data0 + add0-mod1:0:+:data0 + add0-mod2:0:+:data0 + add0-mod3:0:+:data0 + add0-mod4:0:+:data0 + add0-rm1:0:+:data0 + add0-rm2:0:+:data0 + add0-rm4:0:+:data0 + + BROKEN: should include the revision 1 + $ hg grep -fr'1+2' data + add0:2:data0 + add0-cp2:2:data0 + add0-cp2-mod2:2:data0 + add0-cp2-mod2:2:data2 + add0-mod1:2:data0 + add0-mod2:2:data0 + add0-mod2:2:data2 + add0-mod3:2:data0 + add0-mod4:2:data0 + add0-rm1:2:data0 + add0-rm4:2:data0 + add0:0:data0 + add0-mod1:0:data0 + add0-mod2:0:data0 + add0-mod3:0:data0 + add0-mod4:0:data0 + add0-rm1:0:data0 + add0-rm2:0:data0 + add0-rm4:0:data0 + +follow file history from wdir parent, unmodified in wdir: + + $ hg log -f add0-mod3 + 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -f data add0-mod3 + add0-mod3:3:+:data3 + add0-mod3:0:+:data0 + + BROKEN: should not include the revision 2 + $ hg grep -f data add0-mod3 + add0-mod3:3:data0 + add0-mod3:3:data3 + add0-mod3:2:data0 + add0-mod3:1:data0 + add0-mod3:0:data0 + +follow file history from wdir parent, modified in wdir: + + $ hg log -f add0-mod4 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -f data add0-mod4 + add0-mod4:0:+:data0 + + BROKEN: should not include the revision 2 + $ hg grep -f data add0-mod4 + add0-mod4:3:data0 + add0-mod4:2:data0 + add0-mod4:1:data0 + add0-mod4:0:data0 + +follow file history from wdir parent, copied but unmodified: + + $ hg log -f add0-cp1-cp3 + 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -f data add0-cp1-cp3 + add0:0:+:data0 + + BROKEN: should follow history across renames + $ hg grep -f data add0-cp1-cp3 + add0-cp1-cp3:3:data0 + +follow file history from wdir parent, copied and modified: + + $ hg log -f add0-cp1-mod1-cp3-mod3 + 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -f data add0-cp1-mod1-cp3-mod3 + add0-cp1-mod1-cp3-mod3:3:+:data3 + add0-cp1-mod1:1:+:data1 + add0:0:+:data0 + + BROKEN: should follow history across renames + $ hg grep -f data add0-cp1-mod1-cp3-mod3 + add0-cp1-mod1-cp3-mod3:3:data0 + add0-cp1-mod1-cp3-mod3:3:data1 + add0-cp1-mod1-cp3-mod3:3:data3 + +follow file history from wdir parent, copied in wdir: + + $ hg log -f add0-cp4 + abort: cannot follow nonexistent file: "add0-cp4" + [255] + + $ hg grep --diff -f data add0-cp4 + abort: cannot follow file not in parent revision: "add0-cp4" + [255] + + BROKEN: maybe better to abort + $ hg grep -f data add0-cp4 + [1] + +follow file history from wdir parent, removed: + + $ hg log -f add0-cp1-mod1-rm3 + abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3" + [255] + + $ hg grep --diff -f data add0-cp1-mod1-rm3 + abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3" + [255] + + BROKEN: maybe better to abort + $ hg grep -f data add0-cp1-mod1-rm3 + add0-cp1-mod1-rm3:1:data0 + add0-cp1-mod1-rm3:1:data1 + +follow file history from wdir parent (explicit), removed: + + $ hg log -fr. add0-cp1-mod1-rm3 + abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3" + [255] + + $ hg grep --diff -fr. data add0-cp1-mod1-rm3 + abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3" + [255] + + BROKEN: should abort + $ hg grep -fr. data add0-cp1-mod1-rm3 + add0-cp1-mod1-rm3:1:data0 + add0-cp1-mod1-rm3:1:data1 + +follow file history from wdir parent, removed in wdir: + + $ hg log -f add0-rm4 + abort: cannot follow file not in parent revision: "add0-rm4" + [255] + + BROKEN: may be okay, but different behavior from "hg log" + $ hg grep --diff -f data add0-rm4 + add0-rm4:0:+:data0 + + BROKEN: should not include the revision 2, and maybe better to abort + $ hg grep -f data add0-rm4 + add0-rm4:3:data0 + add0-rm4:2:data0 + add0-rm4:1:data0 + add0-rm4:0:data0 + +follow file history from wdir parent (explicit), removed in wdir: + + $ hg log -fr. add0-rm4 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -fr. data add0-rm4 + add0-rm4:0:+:data0 + + $ hg grep -fr. data add0-rm4 + add0-rm4:3:data0 + add0-rm4:1:data0 + add0-rm4:0:data0 + +follow file history from wdir parent, multiple files: + + $ hg log -f add0-mod3 add0-cp1-mod1 + 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -f data add0-mod3 add0-cp1-mod1 + add0-mod3:3:+:data3 + add0-cp1-mod1:1:+:data1 + add0:0:+:data0 + add0-mod3:0:+:data0 + + BROKEN: should not include the revision 2 + BROKEN: should follow history across renames + $ hg grep -f data add0-mod3 add0-cp1-mod1 + add0-cp1-mod1:3:data0 + add0-cp1-mod1:3:data1 + add0-mod3:3:data0 + add0-mod3:3:data3 + add0-mod3:2:data0 + add0-cp1-mod1:1:data0 + add0-cp1-mod1:1:data1 + add0-mod3:1:data0 + add0-mod3:0:data0 + +follow file history from specified revision, modified: + + $ hg log -fr2 add0-mod2 + 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should include the revision 2 + $ hg grep --diff -fr2 data add0-mod2 + add0-mod2:0:+:data0 + + $ hg grep -fr2 data add0-mod2 + add0-mod2:2:data0 + add0-mod2:2:data2 + add0-mod2:0:data0 + +follow file history from specified revision, copied but unmodified: + + $ hg log -fr2 add0-cp2 + 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should follow history from the specified revision + $ hg grep --diff -fr2 data add0-cp2 + abort: cannot follow file not in parent revision: "add0-cp2" + [255] + + BROKEN: should follow history across renames + $ hg grep -fr2 data add0-cp2 + add0-cp2:2:data0 + +follow file history from specified revision, copied and modified: + + $ hg log -fr2 add0-cp2-mod2 + 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should follow history from the specified revision + $ hg grep --diff -fr2 data add0-cp2-mod2 + abort: cannot follow file not in parent revision: "add0-cp2-mod2" + [255] + + BROKEN: should follow history across renames + $ hg grep -fr2 data add0-cp2-mod2 + add0-cp2-mod2:2:data0 + add0-cp2-mod2:2:data2 + +follow file history from specified revision, removed: + + $ hg log -fr2 add0-rm2 + abort: cannot follow file not in any of the specified revisions: "add0-rm2" + [255] + + BROKEN: should abort + $ hg grep --diff -fr2 data add0-rm2 + add0-rm2:0:+:data0 + + BROKEN: should abort + $ hg grep -fr2 data add0-rm2 + add0-rm2:0:data0 + +follow file history from specified revision, multiple files: + + $ hg log -fr2 add0-cp2 add0-mod2 + 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should follow history from the specified revision + $ hg grep --diff -fr2 data add0-cp2 add0-mod2 + abort: cannot follow file not in parent revision: "add0-cp2" + [255] + + BROKEN: should follow history across renames + $ hg grep -fr2 data add0-cp2 add0-mod2 + add0-cp2:2:data0 + add0-mod2:2:data0 + add0-mod2:2:data2 + add0-mod2:0:data0 + +follow file history from wdir, unmodified: + + $ hg log -fr'wdir()' add0-mod3 + 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4 + 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + $ hg grep --diff -fr'wdir()' data add0-mod3 + add0-mod3:3:+:data3 + add0-mod3:0:+:data0 + + $ hg grep -fr'wdir()' data add0-mod3 + add0-mod3:2147483647:data0 + add0-mod3:2147483647:data3 + add0-mod3:3:data0 + add0-mod3:3:data3 + add0-mod3:1:data0 + add0-mod3:0:data0 + +follow file history from wdir, modified: + + $ hg log -fr'wdir()' add0-mod4 + 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should include the changes in wdir + $ hg grep --diff -fr'wdir()' data add0-mod4 + add0-mod4:0:+:data0 + + $ hg grep -fr'wdir()' data add0-mod4 + add0-mod4:2147483647:data0 + add0-mod4:2147483647:data4 + add0-mod4:3:data0 + add0-mod4:1:data0 + add0-mod4:0:data0 + +follow file history from wdir, copied but unmodified: + + $ hg log -fr'wdir()' add0-cp4 + 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should follow history + $ hg grep --diff -fr'wdir()' data add0-cp4 + abort: cannot follow file not in parent revision: "add0-cp4" + [255] + + BROKEN: should follow history across renames + $ hg grep -fr'wdir()' data add0-cp4 + add0-cp4:2147483647:data0 + +follow file history from wdir, copied and modified: + + $ hg log -fr'wdir()' add0-cp4-mod4 + 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should follow history + $ hg grep --diff -fr'wdir()' data add0-cp4-mod4 + abort: cannot follow file not in parent revision: "add0-cp4-mod4" + [255] + + BROKEN: should follow history across renames + $ hg grep -fr'wdir()' data add0-cp4-mod4 + add0-cp4-mod4:2147483647:data0 + add0-cp4-mod4:2147483647:data4 + +follow file history from wdir, multiple files: + + $ hg log -fr'wdir()' add0-cp4 add0-mod4 add0-mod3 + 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4 + 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3 + 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4 + + BROKEN: should follow history + $ hg grep --diff -fr'wdir()' data add0-cp4 add0-mod4 add0-mod3 + abort: cannot follow file not in parent revision: "add0-cp4" + [255] + + BROKEN: should follow history across renames + $ hg grep -fr'wdir()' data add0-cp4 add0-mod4 add0-mod3 + add0-cp4:2147483647:data0 + add0-mod3:2147483647:data0 + add0-mod3:2147483647:data3 + add0-mod4:2147483647:data0 + add0-mod4:2147483647:data4 + add0-mod3:3:data0 + add0-mod3:3:data3 + add0-mod4:3:data0 + add0-mod3:1:data0 + add0-mod4:1:data0 + add0-mod3:0:data0 + add0-mod4:0:data0 + + $ cd .. diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-install.t --- a/tests/test-install.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-install.t Fri Sep 18 10:48:43 2020 -0400 @@ -187,6 +187,14 @@ #if py3 ensurepip $ "$PYTHON" -m venv installenv >> pip.log +Hack: Debian does something a bit different in ensurepip.bootstrap. This makes +it so that pip thinks the 'wheel' wheel is installed so it can build wheels; +when it goes to try, however, it shells out to run `python3 -u `, +that *doesn't* get the 'wheel' wheel, and it fails with an invalid command +'bdist_wheel'. To fix this, we just delete the wheel from where Debian put it in +our virtual env. Then pip doesn't think it's installed and doesn't try to build. + $ rm installenv/share/python-wheels/wheel-*.whl >/dev/null 2>&1 || true + Note: we use this weird path to run pip and hg to avoid platform differences, since it's bin on most platforms but Scripts on Windows. $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log @@ -214,7 +222,7 @@ no problems detected #endif -#if no-py3 virtualenv +#if py2virtualenv Note: --no-site-packages is deprecated, but some places have an ancient virtualenv from their linux distro or similar and it's not yet diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-journal-share.t --- a/tests/test-journal-share.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-journal-share.t Fri Sep 18 10:48:43 2020 -0400 @@ -1,3 +1,10 @@ +#testcases safe normal + +#if safe + $ echo "[format]" >> $HGRCPATH + $ echo "exp-share-safe = True" >> $HGRCPATH +#endif + Journal extension test: tests the share extension support $ cat >> testmocks.py << EOF diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-largefiles.t --- a/tests/test-largefiles.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-largefiles.t Fri Sep 18 10:48:43 2020 -0400 @@ -1005,7 +1005,7 @@ getting changed largefiles 3 largefiles updated, 0 removed 5 files updated, 0 files merged, 0 files removed, 0 files unresolved - 8 additional largefiles cached + 7 additional largefiles cached $ rm "${USERCACHE}"/* $ hg clone --all-largefiles -u 0 a a-clone0 @@ -1013,7 +1013,7 @@ getting changed largefiles 2 largefiles updated, 0 removed 4 files updated, 0 files merged, 0 files removed, 0 files unresolved - 9 additional largefiles cached + 8 additional largefiles cached $ hg -R a-clone0 sum parent: 0:30d30fe6a5be add files @@ -1047,7 +1047,7 @@ $ rm "${USERCACHE}"/* $ hg clone --all-largefiles -U a a-clone-u - 11 additional largefiles cached + 10 additional largefiles cached $ hg -R a-clone-u sum parent: -1:000000000000 (no revision checked out) branch: default diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-lfconvert.t --- a/tests/test-lfconvert.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-lfconvert.t Fri Sep 18 10:48:43 2020 -0400 @@ -389,17 +389,17 @@ $ rm largefiles-repo/.hg/largefiles/* $ hg lfconvert --to-normal issue3519 normalized3519 initializing destination normalized3519 - anotherlarge: largefile 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3 not available from file:/*/$TESTTMP/largefiles-repo (glob) - stuff/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob) - stuff/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob) - sub/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob) + large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob) large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob) - sub/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob) + sub/maybelarge.dat: largefile 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c not available from file:/*/$TESTTMP/largefiles-repo (glob) large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob) stuff/maybelarge.dat: largefile 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c not available from file:/*/$TESTTMP/largefiles-repo (glob) large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob) - sub/maybelarge.dat: largefile 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c not available from file:/*/$TESTTMP/largefiles-repo (glob) - large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob) + sub/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob) + sub/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob) + stuff/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob) + anotherlarge: largefile 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3 not available from file:/*/$TESTTMP/largefiles-repo (glob) + stuff/maybelarge.dat: largefile 76236b6a2c6102826c61af4297dd738fb3b1de38 not available from file:/*/$TESTTMP/largefiles-repo (glob) 0 additional largefiles cached 11 largefiles failed to download abort: all largefiles must be present locally diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-log.t --- a/tests/test-log.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-log.t Fri Sep 18 10:48:43 2020 -0400 @@ -504,14 +504,50 @@ 0 (false !) follow files from the specified revisions with missing patterns -(BROKEN: should follow copies from e@4) $ hg log -T '{rev}\n' -fr4 e x - 4 - 2 (false !) + abort: cannot follow file not in any of the specified revisions: "x" + [255] + +follow files from the specified revisions with directory patterns +(BROKEN: should follow copies from dir/b@2) + + $ hg log -T '{rev}\n' -fr2 dir/b dir + 2 1 (false !) 0 (false !) +follow files from multiple revisions, but the pattern is missing in +one of the specified revisions + + $ hg log -T '{rev}\n' -fr'2+4' dir/b e + e: no such file in rev f8954cd4dc1f + dir/b: no such file in rev 7e4639b4691b + 4 + 2 + 1 + 0 + +follow files from multiple revisions, and the pattern matches a file in +one revision but matches a directory in another: +(BROKEN: should follow copies from dir/b@2 and dir/b/g@5) +(BROKEN: the revision 4 should not be included since dir/b/g@5 is unchanged) + + $ mkdir -p dir/b + $ hg mv g dir/b + $ hg ci -m 'make dir/b a directory' + + $ hg log -T '{rev}\n' -fr'2+5' dir/b + 5 + 4 + 3 (false !) + 2 + 1 (false !) + 0 (false !) + + $ hg --config extensions.strip= strip -r. --no-backup + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + follow files from the specified revisions across copies with -p/--patch $ hg log -T '== rev: {rev},{file_copies % " {source}->{name}"} ==\n' -fpr 4 e g @@ -2295,18 +2331,46 @@ 1 files changed, 1 insertions(+), 0 deletions(-) - BROKEN: added file should exist in wdir $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat d1/f2 + == 2147483647 == + d1/f2 | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) + + + $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat f1-copy + == 2147483647 == + f1-copy | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) + + == 0 == + d1/f1 | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) + + + $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat notfound + abort: cannot follow file not in any of the specified revisions: "notfound" + [255] + +follow files from wdir and non-wdir revision: + + $ hg log -T '{rev}\n' -fr'wdir()+.' f1-copy + f1-copy: no such file in rev 65624cd9070a + 2147483647 + 0 + +follow added/removed files from wdir parent + + $ hg log -T '{rev}\n' -f d1/f2 abort: cannot follow nonexistent file: "d1/f2" [255] - BROKEN: copied file should exist in wdir - $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat f1-copy + $ hg log -T '{rev}\n' -f f1-copy abort: cannot follow nonexistent file: "f1-copy" [255] - $ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat notfound - notfound: $ENOENT$ + $ hg log -T '{rev}\n' -f .d6/f1 + abort: cannot follow file not in parent revision: ".d6/f1" + [255] $ hg revert -aqC diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-merge-criss-cross.t --- a/tests/test-merge-criss-cross.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-merge-criss-cross.t Fri Sep 18 10:48:43 2020 -0400 @@ -78,9 +78,9 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 0f6b37dbe527, local: 3b08d01b0ab5+, remote: adfe50279922 - preserving f2 for resolve of f2 f1: remote is newer -> g getting f1 + preserving f2 for resolve of f2 f2: versions differ -> m (premerge) picked tool ':dump' for f2 (binary False symlink False changedelete False) merging f2 @@ -149,8 +149,14 @@ f1: versions differ -> m f2: remote unchanged -> k - auction for merging merge bids + auction for merging merge bids (2 ancestors) + list of bids for f1: + remote is newer -> g + versions differ -> m f1: picking 'get' action + list of bids for f2: + remote unchanged -> k + versions differ -> m f2: picking 'keep' action end of auction @@ -192,8 +198,14 @@ f1: versions differ -> m f2: remote is newer -> g - auction for merging merge bids + auction for merging merge bids (2 ancestors) + list of bids for f1: + remote unchanged -> k + versions differ -> m f1: picking 'keep' action + list of bids for f2: + remote is newer -> g + versions differ -> m f2: picking 'get' action end of auction @@ -230,7 +242,7 @@ calculating bids for ancestor 40663881a6dd resolving manifests - auction for merging merge bids + auction for merging merge bids (2 ancestors) f1: picking 'get' action f2: picking 'keep' action end of auction @@ -257,8 +269,14 @@ f1: versions differ -> m f2: remote unchanged -> k - auction for merging merge bids + auction for merging merge bids (2 ancestors) + list of bids for f1: + remote is newer -> g + versions differ -> m f1: picking 'get' action + list of bids for f2: + remote unchanged -> k + versions differ -> m f2: picking 'keep' action end of auction @@ -343,7 +361,7 @@ calculating bids for ancestor b211bbc6eb3c resolving manifests - auction for merging merge bids + auction for merging merge bids (2 ancestors) x: multiple bids for merge action: versions differ -> m versions differ -> m @@ -413,6 +431,8 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 11b5b303e36c, local: c0ef19750a22+, remote: 6ca01f7342b9 + d1/a: ancestor missing, remote missing -> k + d1/b: ancestor missing, remote missing -> k d2/b: remote created -> g calculating bids for ancestor 154e6000f54e @@ -430,17 +450,244 @@ d1/b: other deleted -> r d2/b: remote created -> g - auction for merging merge bids - d1/a: consensus for r - d1/b: consensus for r + auction for merging merge bids (2 ancestors) + list of bids for d1/a: + ancestor missing, remote missing -> k + other deleted -> r + d1/a: picking 'keep' action + list of bids for d1/b: + ancestor missing, remote missing -> k + other deleted -> r + d1/b: picking 'keep' action + list of bids for d2/b: + remote created -> g + remote created -> g d2/b: consensus for g end of auction - d1/a: other deleted -> r - removing d1/a - d1/b: other deleted -> r - removing d1/b d2/b: remote created -> g getting d2/b - 1 files updated, 0 files merged, 2 files removed, 0 files unresolved + d1/a: ancestor missing, remote missing -> k + d1/b: ancestor missing, remote missing -> k + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + + +Check that removal reversion does not go unotified +================================================== + +On a merge, a file can be removed and user can revert that removal. This means +user has made an explicit choice of keeping the file or reverting the removal +even though the merge algo wanted to remove it. +Based on this, when we do criss cross merges, merge algorithm should not again +choose to remove the file as in one of the merges, user made an explicit choice +to revert the removal. +Following test cases demonstrate how merge algo does not take in account +explicit choices made by users to revert the removal and on criss-cross merging +removes the file again. + +"Simple" case where the filenode changes +---------------------------------------- + + $ cd .. + $ hg init criss-cross-merge-reversal-with-update + $ cd criss-cross-merge-reversal-with-update + $ echo the-file > the-file + $ echo other-file > other-file + $ hg add the-file other-file + $ hg ci -m 'root-commit' + $ echo foo >> the-file + $ echo bar >> other-file + $ hg ci -m 'updating-both-file' + $ hg up 'desc("root-commit")' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg rm the-file + $ hg ci -m 'delete-the-file' + created new head + $ hg log -G -T '{node|short} {desc}\n' + @ 7801bc9b9899 delete-the-file + | + | o 9b610631ab29 updating-both-file + |/ + o 955800955977 root-commit + + +Do all the merge combination (from the deleted or the update side × keeping and deleting the file + + $ hg update 'desc("delete-the-file")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("updating-both-file")' -t :local + 1 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci -m "merge-deleting-the-file-from-deleted" + $ hg manifest + other-file + + $ hg update 'desc("updating-both-file")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("delete-the-file")' -t :other + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci -m "merge-deleting-the-file-from-updated" + created new head + $ hg manifest + other-file + + $ hg update 'desc("delete-the-file")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("updating-both-file")' -t :other + 1 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci -m "merge-keeping-the-file-from-deleted" + created new head + $ hg manifest + other-file + the-file + + $ hg update 'desc("updating-both-file")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("delete-the-file")' -t :local + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci -m "merge-keeping-the-file-from-updated" + created new head + $ hg manifest + other-file + the-file + +There the resulting merge together (leading to criss cross situation). Check +the conflict is properly detected. + +(merging two deletion together → no conflict) + + $ hg update --clean 'desc("merge-deleting-the-file-from-deleted")' + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg merge 'desc("merge-deleting-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + +(merging a deletion with keeping → conflict) +BROKEN: this should result in conflict + + $ hg update --clean 'desc("merge-deleting-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-keeping-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) + $ ls -1 + other-file + +(merging a deletion with keeping → conflict) +BROKEN: this should result in conflict + + $ hg update --clean 'desc("merge-deleting-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-keeping-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + +(merging two deletion together → no conflict) + + $ hg update --clean 'desc("merge-deleting-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-deleting-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + +(merging a deletion with keeping → conflict) +BROKEN: this should result in conflict + + $ hg update --clean 'desc("merge-deleting-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-keeping-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + +(merging a deletion with keeping → conflict) +BROKEN: this should result in conflict + + $ hg update --clean 'desc("merge-deleting-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-keeping-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + +(merging two "keeping" together → no conflict) + + $ hg update --clean 'desc("merge-keeping-the-file-from-updated")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-keeping-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + the-file + +(merging a deletion with keeping → conflict) +BROKEN: this should result in conflict + + $ hg update --clean 'desc("merge-keeping-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-deleted-the-file-from-deleted")' + abort: empty revision set + [255] + $ ls -1 + other-file + the-file + +(merging a deletion with keeping → conflict) +BROKEN: this should result in conflict + + $ hg update --clean 'desc("merge-keeping-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-deleting-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + the-file + +(merging two "keeping" together → no conflict) + + $ hg update --clean 'desc("merge-keeping-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-keeping-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + the-file + +(merging a deletion with keeping → conflict) +BROKEN: this should result in conflict + + $ hg update --clean 'desc("merge-keeping-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-deleted-the-file-from-deleted")' + abort: empty revision set + [255] + $ ls -1 + other-file + the-file + +(merging a deletion with keeping → conflict) +BROKEN: this should result in conflict + + $ hg update --clean 'desc("merge-keeping-the-file-from-deleted")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge 'desc("merge-deleting-the-file-from-updated")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ ls -1 + other-file + the-file diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-narrow-share.t --- a/tests/test-narrow-share.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-narrow-share.t Fri Sep 18 10:48:43 2020 -0400 @@ -1,4 +1,10 @@ #testcases flat tree +#testcases safe normal + +#if safe + $ echo "[format]" >> $HGRCPATH + $ echo "exp-share-safe = True" >> $HGRCPATH +#endif $ . "$TESTDIR/narrow-library.sh" diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-phabricator.t --- a/tests/test-phabricator.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-phabricator.t Fri Sep 18 10:48:43 2020 -0400 @@ -24,6 +24,11 @@ > EOF $ VCR="$TESTDIR/phabricator" +debugcallconduit doesn't claim invalid arguments without --test-vcr: + $ echo '{}' | HGRCSKIPREPO= hg debugcallconduit 'conduit.ping' + abort: config phabricator.url is required + [255] + Error is handled reasonably. We override the phabtoken here so that when you're developing changes to phabricator.py you can edit the above config and have a real token in the test but not have to edit @@ -965,5 +970,37 @@ Differential Revision: https://phab.mercurial-scm.org/D8388 +Hashes in the messages are updated automatically as phabsend amends and restacks +them. This covers both commits that are posted and descendants that are +restacked. + $ cat >> .hg/hgrc << EOF + > [experimental] + > evolution = all + > EOF + + $ echo content > file.txt + $ hg ci -m 'base review (generate test for phabsend)' + $ echo 'more content' > file.txt + $ hg ci -m '133c1c6c6449 is my parent (generate test for phabsend)' + $ echo 'even more content' > file.txt + $ hg ci -m 'c2874a398f7e is my parent (generate test for phabsend)' + + $ hg phabsend -r 17::18 --test-vcr "$VCR/phabsend-hash-fixes.json" + D8945 - created - 133c1c6c6449: base review (generate test for phabsend) + D8946 - created - c2874a398f7e: 133c1c6c6449 is my parent (generate test for phabsend) + new commits: ['f444f060f4d6'] + new commits: ['9c9290f945b1'] + restabilizing 1528c12fa2e4 as b28b20212bd4 + + $ hg log -l 3 -Tcompact + 22[tip] b28b20212bd4 1970-01-01 00:00 +0000 test + 9c9290f945b1 is my parent (generate test for phabsend) + + 21 9c9290f945b1 1970-01-01 00:00 +0000 test + f444f060f4d6 is my parent (generate test for phabsend) + + 20:16 f444f060f4d6 1970-01-01 00:00 +0000 test + base review (generate test for phabsend) + $ cd .. diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-rebase-abort.t --- a/tests/test-rebase-abort.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-rebase-abort.t Fri Sep 18 10:48:43 2020 -0400 @@ -327,7 +327,7 @@ $ echo new > a $ hg up 1 # user gets an error saying to run hg rebase --abort abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] $ cat a @@ -397,20 +397,20 @@ $ hg rebase -s 3 -d tip abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] $ hg up . abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] $ hg up -C . abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] $ hg graft 3 abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] $ hg abort diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-rebase-collapse.t --- a/tests/test-rebase-collapse.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-rebase-collapse.t Fri Sep 18 10:48:43 2020 -0400 @@ -762,7 +762,7 @@ abort: edit failed: false exited with status 1 [255] $ hg tglog - % 3: 63668d570d21 'C' + o 3: 63668d570d21 'C' | | @ 2: 82b8abf9c185 'D' | | diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-rebase-inmemory.t --- a/tests/test-rebase-inmemory.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-rebase-inmemory.t Fri Sep 18 10:48:43 2020 -0400 @@ -901,7 +901,7 @@ [1] $ hg rebase -r 3 -d 1 -t:merge3 abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] $ hg resolve --list U foo diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-rebase-obsolete.t --- a/tests/test-rebase-obsolete.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-rebase-obsolete.t Fri Sep 18 10:48:43 2020 -0400 @@ -2057,7 +2057,7 @@ $ hg rebase -s 3 -d 5 abort: rebase in progress - (use 'hg rebase --continue' or 'hg rebase --abort') + (use 'hg rebase --continue', 'hg rebase --abort', or 'hg rebase --stop') [255] $ hg rebase --stop --continue abort: cannot specify both --stop and --continue diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-remotefilelog-share.t --- a/tests/test-remotefilelog-share.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-remotefilelog-share.t Fri Sep 18 10:48:43 2020 -0400 @@ -1,5 +1,12 @@ #require no-windows +#testcases safe normal + +#if safe + $ echo "[format]" >> $HGRCPATH + $ echo "exp-share-safe = True" >> $HGRCPATH +#endif + $ . "$TESTDIR/remotefilelog-library.sh" $ cat >> $HGRCPATH < g + getting b2 preserving a for resolve of b removing a - b2: remote created -> g - getting b2 b: remote moved from a -> m (premerge) picked tool ':merge' for b (binary False symlink False changedelete False) merging a and b to b diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-rename-merge2.t --- a/tests/test-rename-merge2.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-rename-merge2.t Fri Sep 18 10:48:43 2020 -0400 @@ -89,6 +89,7 @@ preserving rev for resolve of rev starting 4 threads for background file closing (?) b: remote copied from a -> m (premerge) + starting 4 threads for background file closing (?) picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging a and b to b my b@e300d1c794ec+ other b@4ce40f5aca24 ancestor a@924404dff337 @@ -124,10 +125,10 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 86a2aa42fc76+, remote: f4db7e329e71 + a: remote is newer -> g + getting a preserving b for resolve of b preserving rev for resolve of rev - a: remote is newer -> g - getting a b: local copied/moved from a -> m (premerge) picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b and a to b @@ -241,9 +242,9 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 94b33a1b7f2d+, remote: 4ce40f5aca24 - preserving rev for resolve of rev b: remote created -> g getting b + preserving rev for resolve of rev rev: versions differ -> m (premerge) picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev @@ -306,11 +307,11 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 94b33a1b7f2d+, remote: bdb19105162a - preserving rev for resolve of rev a: other deleted -> r removing a b: remote created -> g getting b + preserving rev for resolve of rev rev: versions differ -> m (premerge) picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev @@ -422,9 +423,9 @@ note: possible conflict - a was renamed multiple times to: b c - preserving rev for resolve of rev c: remote created -> g getting c + preserving rev for resolve of rev rev: versions differ -> m (premerge) picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob) merging rev @@ -493,10 +494,10 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 59318016310c+, remote: bdb19105162a + a: other deleted -> r + removing a preserving b for resolve of b preserving rev for resolve of rev - a: other deleted -> r - removing a starting 4 threads for background file closing (?) b: both created -> m (premerge) picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) @@ -534,10 +535,10 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 8dbce441892a + a: remote is newer -> g + getting a preserving b for resolve of b preserving rev for resolve of rev - a: remote is newer -> g - getting a b: both renamed from a -> m (premerge) picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b @@ -571,10 +572,10 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 59318016310c+, remote: bdb19105162a + a: other deleted -> r + removing a preserving b for resolve of b preserving rev for resolve of rev - a: other deleted -> r - removing a starting 4 threads for background file closing (?) b: both created -> m (premerge) picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) @@ -612,10 +613,10 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 8dbce441892a + a: remote is newer -> g + getting a preserving b for resolve of b preserving rev for resolve of rev - a: remote is newer -> g - getting a b: both renamed from a -> m (premerge) picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b @@ -653,6 +654,7 @@ preserving rev for resolve of rev starting 4 threads for background file closing (?) b: both renamed from a -> m (premerge) + starting 4 threads for background file closing (?) picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor a@924404dff337 @@ -848,10 +850,10 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 02963e448370+, remote: 2b958612230f + c: remote created -> g + getting c preserving b for resolve of b preserving rev for resolve of rev - c: remote created -> g - getting c b: local copied/moved from a -> m (premerge) picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob) merging b and a to b diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-rename-rev.t --- a/tests/test-rename-rev.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-rename-rev.t Fri Sep 18 10:48:43 2020 -0400 @@ -43,7 +43,7 @@ A d1/d d1/b -Test moved file (not copied) +Test moved file (not copied) using 'hg cp' command $ hg co 0 0 files updated, 0 files merged, 2 files removed, 0 files unresolved @@ -59,10 +59,40 @@ d1/b R d1/b +Test moved file (not copied) using 'hg mv' command + + $ hg co 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ mv d1/b d1/d + $ hg rm -A d1/b + $ hg add d1/d + $ hg ci -m 'move d1/b to d1/d' + created new head + $ hg mv -A --at-rev . d1/b d1/d + saved backup bundle to $TESTTMP/.hg/strip-backup/519850c3ea27-153c8fbb-copy.hg + $ hg st -C --change . + A d1/d + d1/b + R d1/b + +Test moved file (not copied) for which source still exists + + $ hg co 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ cp d1/b d1/d + $ hg add d1/d + $ hg ci -m 'copy d1/b to d1/d' + created new head + $ hg mv -A --at-rev . d1/b d1/d + saved backup bundle to $TESTTMP/.hg/strip-backup/c8d0f6bcf7ca-1c9bb53e-copy.hg + $ hg st -C --change . + A d1/d + d1/b + Test using directory as destination $ hg co 0 - 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ cp -R d1 d3 $ hg add d3 adding d3/a diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-resolve.t --- a/tests/test-resolve.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-resolve.t Fri Sep 18 10:48:43 2020 -0400 @@ -328,6 +328,7 @@ [ { "commits": [{"label": "working copy", "name": "local", "node": "57653b9f834a4493f7240b0681efcb9ae7cab745"}, {"label": "merge rev", "name": "other", "node": "dc77451844e37f03f5c559e3b8529b2b48d381d1"}], + "extras": [], "files": [{"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file1", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}], "local_flags": "", "local_key": "60b27f004e454aca81b0480209cce5081ec52390", "local_path": "file1", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file1", "path": "file1", "state": "r"}, {"ancestor_node": "2ed2a3912a0b24502043eae84ee4b279c18b90dd", "ancestor_path": "file2", "extras": [{"key": "ancestorlinknode", "value": "99726c03216e233810a2564cbc0adfe395007eac"}], "local_flags": "", "local_key": "cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523", "local_path": "file2", "other_node": "6f4310b00b9a147241b071a60c28a650827fb03d", "other_path": "file2", "path": "file2", "state": "u"}] } ] diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-revlog-v2.t --- a/tests/test-revlog-v2.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-revlog-v2.t Fri Sep 18 10:48:43 2020 -0400 @@ -32,10 +32,10 @@ Unknown flags to revlog are rejected >>> with open('.hg/store/00changelog.i', 'wb') as fh: - ... fh.write(b'\x00\x04\xde\xad') and None + ... fh.write(b'\xff\x00\xde\xad') and None $ hg log - abort: unknown flags (0x04) in version 57005 revlog 00changelog.i! + abort: unknown flags (0xff00) in version 57005 revlog 00changelog.i! [255] $ cd .. diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-rhg.t --- a/tests/test-rhg.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-rhg.t Fri Sep 18 10:48:43 2020 -0400 @@ -1,5 +1,6 @@ #require rust +Define an rhg function that will only run if rhg exists $ rhg() { > if [ -f "$RUNTESTDIR/../rust/target/debug/rhg" ]; then > "$RUNTESTDIR/../rust/target/debug/rhg" "$@" @@ -8,19 +9,62 @@ > exit 80 > fi > } + +Unimplemented command $ rhg unimplemented-command + error: Found argument 'unimplemented-command' which wasn't expected, or isn't valid in this context + + USAGE: + rhg + + For more information try --help [252] + +Finding root $ rhg root abort: no repository found in '$TESTTMP' (.hg not found)! [255] + $ hg init repository $ cd repository $ rhg root $TESTTMP/repository + +Unwritable file descriptor $ rhg root > /dev/full abort: No space left on device (os error 28) [255] + +Deleted repository $ rm -rf `pwd` $ rhg root abort: error getting current working directory: $ENOENT$ [255] + +Listing tracked files + $ cd $TESTTMP + $ hg init repository + $ cd repository + $ for i in 1 2 3; do + > echo $i >> file$i + > hg add file$i + > done + > hg commit -m "commit $i" -q + +Listing tracked files from root + $ rhg files + file1 + file2 + file3 + +Listing tracked files from subdirectory + $ mkdir -p path/to/directory + $ cd path/to/directory + $ rhg files + ../../../file1 + ../../../file2 + ../../../file3 + +Listing tracked files through broken pipe + $ rhg files | head -n 1 + ../../../file1 diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-run-tests.t --- a/tests/test-run-tests.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-run-tests.t Fri Sep 18 10:48:43 2020 -0400 @@ -497,7 +497,7 @@ ==================== $ rt --retest - running 2 tests using 1 parallel processes + running 1 tests using 1 parallel processes --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err @@ -512,7 +512,7 @@ ERROR: test-failure.t output changed ! Failed test-failure.t: output changed - # Ran 2 tests, 1 skipped, 1 failed. + # Ran 1 tests, 0 skipped, 1 failed. python hash seed: * (glob) [1] @@ -521,7 +521,7 @@ $ mkdir output $ mv test-failure.t.err output $ rt --retest --outputdir output - running 2 tests using 1 parallel processes + running 1 tests using 1 parallel processes --- $TESTTMP/test-failure.t +++ $TESTTMP/output/test-failure.t.err @@ -536,7 +536,7 @@ ERROR: test-failure.t output changed ! Failed test-failure.t: output changed - # Ran 2 tests, 1 skipped, 1 failed. + # Ran 1 tests, 0 skipped, 1 failed. python hash seed: * (glob) [1] @@ -844,6 +844,8 @@ $ echo 'saved backup bundle to $TESTTMP/foo.hg' saved backup bundle to $TESTTMP/*.hg (glob)< + $ rm test-failure.t + Race condition - test file was modified when test is running $ TESTRACEDIR=`pwd` @@ -972,6 +974,25 @@ python hash seed: * (glob) [1] + $ rt --retest + running 1 tests using 1 parallel processes + + --- $TESTTMP/test-cases.t + +++ $TESTTMP/test-cases.t#b#c.err + @@ -6,5 +6,5 @@ + #endif + #if b c + $ echo yes + - no + + yes + #endif + + ERROR: test-cases.t#b#c output changed + ! + Failed test-cases.t#b#c: output changed + # Ran 1 tests, 0 skipped, 1 failed. + python hash seed: * (glob) + [1] $ rm test-cases.t#b#c.err $ rm test-cases.t diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-share-bookmarks.t --- a/tests/test-share-bookmarks.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-share-bookmarks.t Fri Sep 18 10:48:43 2020 -0400 @@ -1,4 +1,10 @@ #testcases vfs svfs +#testcases safe normal + +#if safe + $ echo "[format]" >> $HGRCPATH + $ echo "exp-share-safe = True" >> $HGRCPATH +#endif $ echo "[extensions]" >> $HGRCPATH $ echo "share = " >> $HGRCPATH @@ -279,3 +285,9 @@ bm3 4:62f4ded848e4 bm4 5:92793bfc8cad $ cd .. + +Test that if store is disabled, we drop the bookmarksinstore requirement + + $ hg init brokenrepo --config format.bookmarks-in-store=True --config format.usestore=false + ignoring enabled 'format.bookmarks-in-store' config beacuse it is incompatible with disabled 'format.usestore' config + ignoring enabled 'format.exp-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !) diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-share-safe.t --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-share-safe.t Fri Sep 18 10:48:43 2020 -0400 @@ -0,0 +1,221 @@ +setup + + $ cat >> $HGRCPATH < [extensions] + > share = + > [format] + > exp-share-safe = True + > EOF + +prepare source repo + + $ hg init source + $ cd source + $ cat .hg/requires + exp-sharesafe + $ cat .hg/store/requires + dotencode + fncache + generaldelta + revlogv1 + sparserevlog + store + $ hg debugrequirements + dotencode + exp-sharesafe + fncache + generaldelta + revlogv1 + sparserevlog + store + + $ echo a > a + $ hg ci -Aqm "added a" + $ echo b > b + $ hg ci -Aqm "added b" + + $ HGEDITOR=cat hg config --shared + abort: repository is not shared; can't use --shared + [255] + $ cd .. + +Create a shared repo and check the requirements are shared and read correctly + $ hg share source shared1 + updating working directory + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd shared1 + $ cat .hg/requires + exp-sharesafe + shared + + $ hg debugrequirements -R ../source + dotencode + exp-sharesafe + fncache + generaldelta + revlogv1 + sparserevlog + store + + $ hg debugrequirements + dotencode + exp-sharesafe + fncache + generaldelta + revlogv1 + shared + sparserevlog + store + + $ echo c > c + $ hg ci -Aqm "added c" + +Check that config of the source repository is also loaded + + $ hg showconfig ui.curses + [1] + + $ echo "[ui]" >> ../source/.hg/hgrc + $ echo "curses=true" >> ../source/.hg/hgrc + + $ hg showconfig ui.curses + true + +However, local .hg/hgrc should override the config set by share source + + $ echo "[ui]" >> .hg/hgrc + $ echo "curses=false" >> .hg/hgrc + + $ hg showconfig ui.curses + false + + $ HGEDITOR=cat hg config --shared + [ui] + curses=true + + $ HGEDITOR=cat hg config --local + [ui] + curses=false + +Testing that hooks set in source repository also runs in shared repo + + $ cd ../source + $ cat <> .hg/hgrc + > [extensions] + > hooklib= + > [hooks] + > pretxnchangegroup.reject_merge_commits = \ + > python:hgext.hooklib.reject_merge_commits.hook + > EOF + + $ cd .. + $ hg clone source cloned + updating to branch default + 3 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd cloned + $ hg up 0 + 0 files updated, 0 files merged, 2 files removed, 0 files unresolved + $ echo bar > bar + $ hg ci -Aqm "added bar" + $ hg merge + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci -m "merge commit" + + $ hg push ../source + pushing to ../source + searching for changes + adding changesets + adding manifests + adding file changes + error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase. + transaction abort! + rollback completed + abort: bcde3522682d rejected as merge on the same branch. Please consider rebase. + [255] + + $ hg push ../shared1 + pushing to ../shared1 + searching for changes + adding changesets + adding manifests + adding file changes + error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase. + transaction abort! + rollback completed + abort: bcde3522682d rejected as merge on the same branch. Please consider rebase. + [255] + +Test that if share source config is untrusted, we dont read it + + $ cd ../shared1 + + $ cat << EOF > $TESTTMP/untrusted.py + > from mercurial import scmutil, util + > def uisetup(ui): + > class untrustedui(ui.__class__): + > def _trusted(self, fp, f): + > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'): + > return False + > return super(untrustedui, self)._trusted(fp, f) + > ui.__class__ = untrustedui + > EOF + + $ hg showconfig hooks + hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook + + $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py + [1] + +Update the source repository format and check that shared repo works + + $ cd ../source + $ echo "[format]" >> .hg/hgrc + $ echo "revlog-compression=zstd" >> .hg/hgrc + + $ hg debugupgraderepo --run -q -R ../shared1 + abort: cannot upgrade repository; unsupported source requirement: shared + [255] + + $ hg debugupgraderepo --run -q + upgrade will perform the following actions: + + requirements + preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store + added: revlog-compression-zstd + + $ hg log -r . + changeset: 1:5f6d8a4bf34a + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: added b + +Shared one should work + $ cd ../shared1 + $ hg log -r . + changeset: 2:155349b645be + tag: tip + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: added c + +Unsharing works + + $ hg unshare + +Test that source config is added to the shared one after unshare, and the config +of current repo is still respected over the config which came from source config + $ cd ../cloned + $ hg push ../shared1 + pushing to ../shared1 + searching for changes + adding changesets + adding manifests + adding file changes + error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase. + transaction abort! + rollback completed + abort: bcde3522682d rejected as merge on the same branch. Please consider rebase. + [255] + $ hg showconfig ui.curses -R ../shared1 + false diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-share.t --- a/tests/test-share.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-share.t Fri Sep 18 10:48:43 2020 -0400 @@ -1,3 +1,10 @@ +#testcases safe normal + +#if safe + $ echo "[format]" >> $HGRCPATH + $ echo "exp-share-safe = True" >> $HGRCPATH +#endif + $ echo "[extensions]" >> $HGRCPATH $ echo "share = " >> $HGRCPATH @@ -252,3 +259,10 @@ $ killdaemons.py +Test sharing a repository which was created with store requirement disable + + $ hg init nostore --config format.usestore=false + ignoring enabled 'format.exp-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !) + $ hg share nostore sharednostore + abort: cannot create shared repository as source was created with 'format.usestore' config disabled + [255] diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-simple-update.t --- a/tests/test-simple-update.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-simple-update.t Fri Sep 18 10:48:43 2020 -0400 @@ -19,7 +19,7 @@ $ hg co 0 files updated, 0 files merged, 0 files removed, 0 files unresolved $ echo bar>>foo - $ hg commit -m "2" + $ hg commit -m "2" -d '1 0' $ cd ../test @@ -30,7 +30,7 @@ adding manifests adding file changes added 1 changesets with 1 changes to 1 files - new changesets 30aff43faee1 + new changesets 84b9316f7b31 1 local changesets published (run 'hg update' to get a working copy) @@ -57,25 +57,47 @@ abort: you can't specify a revision and a date [255] +update by date + + $ hg update -d '<1970-01-01 00:00:02 +0000' + found revision 1 from Thu Jan 01 00:00:01 1970 +0000 + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg update -d '<1970-01-01 00:00:01 +0000' + found revision 1 from Thu Jan 01 00:00:01 1970 +0000 + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg update -d '<1970-01-01 00:00:00 +0000' + found revision 0 from Thu Jan 01 00:00:00 1970 +0000 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ hg update -d '>1970-01-01 00:00:02 +0000' + abort: revision matching date not found + [255] + $ hg update -d '>1970-01-01 00:00:01 +0000' + found revision 1 from Thu Jan 01 00:00:01 1970 +0000 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg update -d '>1970-01-01 00:00:00 +0000' + found revision 1 from Thu Jan 01 00:00:01 1970 +0000 + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + update to default destination (with empty revspec) $ hg update -q null $ hg update 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg id - 30aff43faee1 tip + 84b9316f7b31 tip $ hg update -q null $ hg update -r '' 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg id - 30aff43faee1 tip + 84b9316f7b31 tip $ hg update -q null $ hg update '' 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg id - 30aff43faee1 tip + 84b9316f7b31 tip $ cd .. diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-tags.t --- a/tests/test-tags.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-tags.t Fri Sep 18 10:48:43 2020 -0400 @@ -156,7 +156,7 @@ Failure to acquire lock results in no write $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1 - $ echo 'foo:1' > .hg/wlock + $ echo 'foo:1' > .hg/store/lock $ hg identify b9154636be93 tip $ hg blackbox -l 6 @@ -170,7 +170,7 @@ $ fnodescacheexists no fnodes cache - $ rm .hg/wlock + $ rm .hg/store/lock $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1 $ hg identify diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-template-map.t --- a/tests/test-template-map.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-template-map.t Fri Sep 18 10:48:43 2020 -0400 @@ -125,6 +125,54 @@ date: Wed Jan 01 10:01:00 2020 +0000 summary: third +Test map inheritance with non-existent base + + $ echo "__base__ = non-existent" > map-base-nonexistent + $ hg log -l1 -T./map-base-nonexistent + abort: style '$TESTTMP/a/non-existent' not found + (available styles: bisect, changelog, compact, default, phases, show, status, xml) + [255] + +Test map inheritance with directory as base + + $ mkdir somedir + $ echo "__base__ = somedir" > map-base-dir + $ hg log -l1 -T./map-base-dir + abort: Is a directory: '$TESTTMP/a/somedir' + [255] + +Test including a built-in template map + + $ cat <<'EOF' > map-include-builtin + > %include map-cmdline.default + > [templates] + > changeset = "{changeset_quiet}\n" + > EOF + $ hg log -l1 -T./map-include-builtin + 8:95c24699272e + + +Test including a nonexistent template map +BROKEN: This should probably be an error just like the bad __base__ above + + $ cat <<'EOF' > map-include-nonexistent + > %include nonexistent + > [templates] + > changeset = "test\n" + > EOF + $ hg log -l1 -T./map-include-nonexistent + test + +Test including a directory as template map +BROKEN: This should probably be an error just like the bad __base__ above + + $ cat <<'EOF' > map-include-dir + > %include somedir + > [templates] + > changeset = "test\n" + > EOF + $ hg log -l1 -T./map-include-dir + test Test docheader, docfooter and separator in template map @@ -1227,6 +1275,19 @@ abort: specify a template [255] +Error if style is a directory: + + $ hg log --style somedir + abort: Is a directory: 'somedir' + [255] + +Error if style is a directory whose name is a built-in style: + + $ hg log --style coal + abort: style 'coal' not found + (available styles: bisect, changelog, compact, default, phases, show, status, xml) + [255] + Error if style missing key: $ echo 'q = q' > t diff -r bd5b2b29b82d -r e3df1f560d9a tests/test-up-local-change.t --- a/tests/test-up-local-change.t Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/test-up-local-change.t Fri Sep 18 10:48:43 2020 -0400 @@ -43,9 +43,9 @@ resolving manifests branchmerge: False, force: False, partial: False ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb - preserving a for resolve of a b: remote created -> g getting b + preserving a for resolve of a a: versions differ -> m (premerge) picked tool 'true' for a (binary False symlink False changedelete False) merging a @@ -68,9 +68,9 @@ resolving manifests branchmerge: False, force: False, partial: False ancestor: 1e71731e6fbb, local: 1e71731e6fbb+, remote: c19d34741b0a - preserving a for resolve of a b: other deleted -> r removing b + preserving a for resolve of a starting 4 threads for background file closing (?) a: versions differ -> m (premerge) picked tool 'true' for a (binary False symlink False changedelete False) @@ -92,9 +92,9 @@ resolving manifests branchmerge: False, force: False, partial: False ancestor: c19d34741b0a, local: c19d34741b0a+, remote: 1e71731e6fbb - preserving a for resolve of a b: remote created -> g getting b + preserving a for resolve of a a: versions differ -> m (premerge) picked tool 'true' for a (binary False symlink False changedelete False) merging a diff -r bd5b2b29b82d -r e3df1f560d9a tests/testlib/ext-sidedata.py --- a/tests/testlib/ext-sidedata.py Sun Sep 13 15:59:23 2020 +0900 +++ b/tests/testlib/ext-sidedata.py Fri Sep 18 10:48:43 2020 -0400 @@ -12,8 +12,8 @@ from mercurial import ( extensions, - localrepo, node, + requirements, revlog, upgrade, ) @@ -54,7 +54,7 @@ def wrapgetsidedatacompanion(orig, srcrepo, dstrepo): sidedatacompanion = orig(srcrepo, dstrepo) addedreqs = dstrepo.requirements - srcrepo.requirements - if localrepo.SIDEDATA_REQUIREMENT in addedreqs: + if requirements.SIDEDATA_REQUIREMENT in addedreqs: assert sidedatacompanion is None # deal with composition later def sidedatacompanion(revlog, rev):