--- a/contrib/Makefile.python Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/Makefile.python Tue Dec 09 13:32:19 2014 -0600
@@ -1,4 +1,4 @@
-PYTHONVER=2.7.6
+PYTHONVER=2.7.8
PYTHONNAME=python-
PREFIX=$(HOME)/bin/prefix-$(PYTHONNAME)$(PYTHONVER)
SYMLINKDIR=$(HOME)/bin
@@ -27,7 +27,7 @@
# debian: apt-get install zlib1g-dev libbz2-dev libssl-dev
@echo
@echo 'To build a nice collection of interesting Python versions:'
- @echo ' $$ for v in 2.{4{,.2,.3},5{,.6},6{,.1,.2,.9},7{,.6}}; do'
+ @echo ' $$ for v in 2.{4{,.2,.3},5{,.6},6{,.1,.2,.9},7{,.8}}; do'
@echo ' make -f Makefile.python symlink PYTHONVER=$$v || break; done'
@echo 'To run a Mercurial test on all these Python versions:'
@echo ' $$ for py in `cd ~/bin && ls $(PYTHONNAME)2.*`; do'
@@ -60,7 +60,7 @@
printf 'import sys, zlib, bz2\nif sys.version_info >= (2,6):\n import ssl' | $(PREFIX)/bin/python
rm -rf $(PYTHON_SRCDIR)
-DOCUTILSVER=0.11
+DOCUTILSVER=0.12
DOCUTILS_SRCDIR=docutils-$(DOCUTILSVER)
DOCUTILS_SRCFILE=$(DOCUTILS_SRCDIR).tar.gz
--- a/contrib/buildrpm Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/buildrpm Tue Dec 09 13:32:19 2014 -0600
@@ -18,6 +18,7 @@
--withpython | --with-python)
shift
PYTHONVER=2.7.8
+ PYTHONMD5=d4bca0159acb0b44a781292b5231936f
;;
--rpmbuilddir )
shift
@@ -76,11 +77,18 @@
cd build
PYTHON_SRCFILE=Python-$PYTHONVER.tgz
[ -f $PYTHON_SRCFILE ] || curl -Lo $PYTHON_SRCFILE http://www.python.org/ftp/python/$PYTHONVER/$PYTHON_SRCFILE
+ if [ "$PYTHONMD5" ]; then
+ echo "$PYTHONMD5 $PYTHON_SRCFILE" | md5sum -w -c
+ fi
ln -f $PYTHON_SRCFILE $RPMBUILDDIR/SOURCES/$PYTHON_SRCFILE
DOCUTILSVER=`sed -ne "s/^%global docutilsname docutils-//p" $specfile`
DOCUTILS_SRCFILE=docutils-$DOCUTILSVER.tar.gz
[ -f $DOCUTILS_SRCFILE ] || curl -Lo $DOCUTILS_SRCFILE http://downloads.sourceforge.net/project/docutils/docutils/$DOCUTILSVER/$DOCUTILS_SRCFILE
+ DOCUTILSMD5=`sed -ne "s/^%global docutilsmd5 //p" $specfile`
+ if [ "$DOCUTILSMD5" ]; then
+ echo "$DOCUTILSMD5 $DOCUTILS_SRCFILE" | md5sum -w -c
+ fi
ln -f $DOCUTILS_SRCFILE $RPMBUILDDIR/SOURCES/$DOCUTILS_SRCFILE
)
fi
--- a/contrib/check-code.py Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/check-code.py Tue Dec 09 13:32:19 2014 -0600
@@ -94,7 +94,7 @@
(r'sed.*-i', "don't use 'sed -i', use a temporary file"),
(r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
(r'echo -n', "don't use 'echo -n', use printf"),
- (r'(^| )\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
+ (r'(^|\|\s*)\bwc\b[^|]*$\n(?!.*\(re\))', "filter wc output"),
(r'head -c', "don't use 'head -c', use 'dd'"),
(r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
(r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
@@ -291,7 +291,7 @@
"always assign an opened file to a variable, and close it afterwards"),
(r'[\s\(](open|file)\([^)]*\)\.',
"always assign an opened file to a variable, and close it afterwards"),
- (r'(?i)descendent', "the proper spelling is descendAnt"),
+ (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
(r'\.debug\(\_', "don't mark debug messages for translation"),
(r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
(r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/docker/apache-server/Dockerfile Tue Dec 09 13:32:19 2014 -0600
@@ -0,0 +1,23 @@
+FROM debian:wheezy
+
+ENV DEBIAN_FRONTEND noninteractive
+ENV WSGI_PROCESSES 4
+ENV WSGI_THREADS 1
+ENV WSGI_MAX_REQUESTS 100000
+
+EXPOSE 80
+VOLUME ["/var/hg/htdocs", "/var/hg/repos"]
+
+RUN apt-get update && apt-get -y install libapache2-mod-wsgi python-dev vim
+
+# Install our own Apache site.
+RUN a2dissite 000-default
+ADD vhost.conf /etc/apache2/sites-available/hg
+RUN a2ensite hg
+
+ADD hgwebconfig /defaulthgwebconfig
+
+ADD entrypoint.sh /entrypoint.sh
+ENTRYPOINT ["/entrypoint.sh"]
+
+CMD ["/usr/sbin/apache2", "-DFOREGROUND"]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/docker/apache-server/README.rst Tue Dec 09 13:32:19 2014 -0600
@@ -0,0 +1,144 @@
+====================
+Apache Docker Server
+====================
+
+This directory contains code for running a Mercurial hgweb server via
+mod_wsgi with the Apache HTTP Server inside a Docker container.
+
+.. important::
+
+ This container is intended for testing purposes only: it is
+ **not** meant to be suitable for production use.
+
+Building Image
+==============
+
+The first step is to build a Docker image containing Apache and mod_wsgi::
+
+ $ docker build -t hg-apache .
+
+.. important::
+
+ You should rebuild the image whenever the content of this directory
+ changes. Rebuilding after pulling or when you haven't run the container
+ in a while is typically a good idea.
+
+Running the Server
+==================
+
+To run the container, you'll execute something like::
+
+ $ docker run --rm -it -v `pwd`/../../..:/var/hg/source -p 8000:80 hg-apache
+
+If you aren't a Docker expert:
+
+* ``--rm`` will remove the container when it stops (so it doesn't clutter
+ your system)
+* ``-i`` will launch the container in interactive mode so stdin is attached
+* ``-t`` will allocate a pseudo TTY
+* ``-v src:dst`` will mount the host filesystem at ``src`` into ``dst``
+ in the container. In our example, we assume you are running from this
+ directory and use the source code a few directories up.
+* ``-p 8000:80`` will publish port ``80`` on the container to port ``8000``
+ on the host, allowing you to access the HTTP server on the host interface.
+* ``hg-apache`` is the container image to run. This should correspond to what
+ we build with ``docker build``.
+
+.. important::
+
+ The container **requires** that ``/var/hg/source`` contain the Mercurial
+ source code.
+
+ Upon start, the container will attempt an install of the source in that
+ directory. If the architecture of the host machine doesn't match that of
+ the Docker host (e.g. when running Boot2Docker under OS X), Mercurial's
+ Python C extensions will fail to run. Be sure to ``make clean`` your
+ host's source tree before mounting it in the container to avoid this.
+
+When starting the container, you should see some start-up actions (including
+a Mercurial install) and some output saying Apache has started::
+
+Now if you load ``http://localhost:8000/`` (or whatever interface Docker
+is using), you should see hgweb running!
+
+For your convenience, we've created an empty repository available at
+``/repo``. Feel free to populate it with ``hg push``.
+
+Customizing the Server
+======================
+
+By default, the Docker container installs a basic hgweb config and an
+empty dummy repository. It also uses some reasonable defaults for
+mod_wsgi.
+
+Customizing the WSGI Dispatcher And Mercurial Config
+----------------------------------------------------
+
+By default, the Docker environment installs a custom ``hgweb.wsgi``
+file (based on the example in ``contrib/hgweb.wsgi``). The file
+is installed into ``/var/hg/htdocs/hgweb.wsgi``.
+
+A default hgweb configuration file is also installed. The ``hgwebconfig``
+file from this directory is installed into ``/var/hg/htdocs/config``.
+
+You have a few options for customizing these files.
+
+The simplest is to hack up ``hgwebconfig`` and ``entrypoint.sh`` in
+this directory and to rebuild the Docker image. This has the downside
+that the Mercurial working copy is modified and you may accidentally
+commit unwanted changes.
+
+The next simplest is to copy this directory somewhere, make your changes,
+then rebuild the image. No working copy changes involved.
+
+The preferred solution is to mount a host file into the container and
+overwrite the built-in defaults.
+
+For example, say we create a custom hgweb config file in ``~/hgweb``. We
+can start the container like so to install our custom config file::
+
+ $ docker run -v ~/hgweb:/var/hg/htdocs/config ...
+
+You can do something similar to install a custom WSGI dispatcher::
+
+ $ docker run -v ~/hgweb.wsgi:/var/hg/htdocs/hgweb.wsgi ...
+
+Managing Repositories
+---------------------
+
+Repositories are served from ``/var/hg/repos`` by default. This directory
+is configured as a Docker volume. This means you can mount an existing
+data volume container in the container so repository data is persisted
+across container invocations. See
+https://docs.docker.com/userguide/dockervolumes/ for more.
+
+Alternatively, if you just want to perform lightweight repository
+manipulation, open a shell in the container::
+
+ $ docker exec -it <container> /bin/bash
+
+Then run ``hg init``, etc to manipulate the repositories in ``/var/hg/repos``.
+
+mod_wsgi Configuration Settings
+-------------------------------
+
+mod_wsgi settings can be controlled with the following environment
+variables.
+
+WSGI_PROCESSES
+ Number of WSGI processes to run.
+WSGI_THREADS
+ Number of threads to run in each WSGI process
+WSGI_MAX_REQUESTS
+ Maximum number of requests each WSGI process may serve before it is
+ reaped.
+
+See https://code.google.com/p/modwsgi/wiki/ConfigurationDirectives#WSGIDaemonProcess
+for more on these settings.
+
+.. note::
+
+ The default is to use 1 thread per process. The reason is that Mercurial
+ doesn't perform well in multi-threaded mode due to the GIL. Most people
+ run a single thread per process in production for this reason, so that's
+ what we default to.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/docker/apache-server/entrypoint.sh Tue Dec 09 13:32:19 2014 -0600
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+# This script gets executed on container start. Its job is to set up
+# the Mercurial environment and invoke the server.
+
+# Mercurial can be started in two modes.
+# If the MERCURIAL_SOURCE environment variable is set and it points to a
+# Mercurial source directory, we will install Mercurial from that directory.
+# Otherwise, we download the Mercurial source and install it manually.
+
+set -e
+
+SOURCE_DIR=/var/hg/source
+INSTALL_DIR=/var/hg/install
+REPOS_DIR=/var/hg/repos
+HTDOCS_DIR=/var/hg/htdocs
+
+if [ ! -d ${SOURCE_DIR} ]; then
+ echo "Mercurial source not available at ${SOURCE_DIR}"
+ echo "You need to mount a volume containing the Mercurial source code"
+ echo "when running the container. For example:"
+ echo ""
+ echo " $ docker run -v ~/src/hg:/${SOURCE_DIR} hg-apache"
+ echo ""
+ echo "This container will now stop running."
+ exit 1
+fi
+
+echo "Installing Mercurial from ${SOURCE_DIR} into ${INSTALL_DIR}"
+pushd ${SOURCE_DIR}
+/usr/bin/python2.7 setup.py install --root=/ --prefix=${INSTALL_DIR} --force
+popd
+
+mkdir -p ${HTDOCS_DIR}
+
+# Provide a default config if the user hasn't supplied one.
+if [ ! -f ${HTDOCS_DIR}/config ]; then
+ cp /defaulthgwebconfig ${HTDOCS_DIR}/config
+fi
+
+if [ ! -f ${HTDOCS_DIR}/hgweb.wsgi ]; then
+ cat >> ${HTDOCS_DIR}/hgweb.wsgi << EOF
+config = '${HTDOCS_DIR}/config'
+
+import sys
+sys.path.insert(0, '${INSTALL_DIR}/lib/python2.7/site-packages')
+
+from mercurial import demandimport
+demandimport.enable()
+
+from mercurial.hgweb import hgweb
+application = hgweb(config)
+EOF
+fi
+
+mkdir -p ${REPOS_DIR}
+
+if [ ! -d ${REPOS_DIR}/repo ]; then
+ ${INSTALL_DIR}/bin/hg init ${REPOS_DIR}/repo
+ chown -R www-data:www-data ${REPOS_DIR}/repo
+fi
+
+# This is necessary to make debuginstall happy.
+if [ ! -f ~/.hgrc ]; then
+ cat >> ~/.hgrc << EOF
+[ui]
+username = Dummy User <nobody@example.com>
+EOF
+fi
+
+echo "Verifying Mercurial installation looks happy"
+${INSTALL_DIR}/bin/hg debuginstall
+
+. /etc/apache2/envvars
+
+echo "Starting Apache HTTP Server on port 80"
+echo "We hope you remembered to publish this port when running the container!"
+echo "If this is an interactive container, simply CTRL^C to stop."
+
+exec "$@"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/docker/apache-server/hgwebconfig Tue Dec 09 13:32:19 2014 -0600
@@ -0,0 +1,6 @@
+[paths]
+/ = /var/hg/repos/*
+
+[web]
+allow_push = *
+push_ssl = False
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/docker/apache-server/vhost.conf Tue Dec 09 13:32:19 2014 -0600
@@ -0,0 +1,24 @@
+# Apache won't be able to resolve its own hostname, so we sneak this
+# into the global context to silence a confusing-to-user warning on
+# server start.
+ServerName hg
+
+<VirtualHost *:80>
+ DocumentRoot /var/hg/htdocs
+ <Directory />
+ Options FollowSymLinks
+ AllowOverride None
+ </Directory>
+
+ SetEnv HGENCODING UTF-8
+ SetEnv LC_TYPE UTF-8
+
+ WSGIDaemonProcess hg processes=${WSGI_PROCESSES} threads=${WSGI_THREADS} maximum-requests=${WSGI_MAX_REQUESTS} user=www-data group=www-data display-name=hg-wsgi
+ WSGIProcessGroup hg
+
+ WSGIScriptAliasMatch ^(.*) /var/hg/htdocs/hgweb.wsgi$1
+
+ ErrorLog ${APACHE_LOG_DIR}/error.log
+ LogLevel warn
+ CustomLog ${APACHE_LOG_DIR}/access.log combined
+</VirtualHost>
--- a/contrib/mercurial.spec Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/mercurial.spec Tue Dec 09 13:32:19 2014 -0600
@@ -6,7 +6,8 @@
%global pythonver %{withpython}
%global pythonname Python-%{withpython}
-%global docutilsname docutils-0.11
+%global docutilsname docutils-0.12
+%global docutilsmd5 4622263b62c5c771c03502afa3157768
%global pythonhg python-hg
%global hgpyprefix /usr/%{pythonhg}
# byte compilation will fail on some some Python /test/ files
@@ -126,7 +127,6 @@
install -m 644 contrib/mq.el $RPM_BUILD_ROOT%{emacs_lispdir}/
mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/mercurial/hgrc.d
-install -m 644 contrib/mergetools.hgrc $RPM_BUILD_ROOT%{_sysconfdir}/mercurial/hgrc.d/mergetools.rc
%clean
rm -rf $RPM_BUILD_ROOT
@@ -149,7 +149,6 @@
%config(noreplace) %{_sysconfdir}/bash_completion.d/mercurial.sh
%dir %{_sysconfdir}/mercurial
%dir %{_sysconfdir}/mercurial/hgrc.d
-%config(noreplace) %{_sysconfdir}/mercurial/hgrc.d/mergetools.rc
%if "%{?withpython}"
%{_bindir}/%{pythonhg}
%{hgpyprefix}
--- a/contrib/mergetools.hgrc Mon Dec 08 15:41:54 2014 -0800
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,135 +0,0 @@
-# Some default global settings for common merge tools
-
-[merge-tools]
-kdiff3.args=--auto --L1 base --L2 local --L3 other $base $local $other -o $output
-kdiff3.regkey=Software\KDiff3
-kdiff3.regkeyalt=Software\Wow6432Node\KDiff3
-kdiff3.regappend=\kdiff3.exe
-kdiff3.fixeol=True
-kdiff3.gui=True
-kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
-
-gvimdiff.args=--nofork -d -g -O $local $other $base
-gvimdiff.regkey=Software\Vim\GVim
-gvimdiff.regkeyalt=Software\Wow6432Node\Vim\GVim
-gvimdiff.regname=path
-gvimdiff.priority=-9
-gvimdiff.diffargs=--nofork -d -g -O $parent $child
-
-vimdiff.args=$local $other $base -c 'redraw | echomsg "hg merge conflict, type \":cq\" to abort vimdiff"'
-vimdiff.check=changed
-vimdiff.priority=-10
-
-merge.check=conflicts
-merge.priority=-100
-
-gpyfm.gui=True
-
-meld.gui=True
-meld.args=--label='local' $local --label='merged' $base --label='other' $other -o $output
-meld.check=changed
-meld.diffargs=-a --label='$plabel1' $parent --label='$clabel' $child
-
-tkdiff.args=$local $other -a $base -o $output
-tkdiff.gui=True
-tkdiff.priority=-8
-tkdiff.diffargs=-L '$plabel1' $parent -L '$clabel' $child
-
-xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 local --title2 base --title3 other --merged-filename $output --merge $local $base $other
-xxdiff.gui=True
-xxdiff.priority=-8
-xxdiff.diffargs=--title1 '$plabel1' $parent --title2 '$clabel' $child
-
-diffmerge.regkey=Software\SourceGear\SourceGear DiffMerge\
-diffmerge.regkeyalt=Software\Wow6432Node\SourceGear\SourceGear DiffMerge\
-diffmerge.regname=Location
-diffmerge.priority=-7
-diffmerge.args=-nosplash -merge -title1=local -title2=merged -title3=other $local $base $other -result=$output
-diffmerge.check=changed
-diffmerge.gui=True
-diffmerge.diffargs=--nosplash --title1='$plabel1' --title2='$clabel' $parent $child
-
-p4merge.args=$base $local $other $output
-p4merge.regkey=Software\Perforce\Environment
-p4merge.regkeyalt=Software\Wow6432Node\Perforce\Environment
-p4merge.regname=P4INSTROOT
-p4merge.regappend=\p4merge.exe
-p4merge.gui=True
-p4merge.priority=-8
-p4merge.diffargs=$parent $child
-
-p4mergeosx.executable = /Applications/p4merge.app/Contents/MacOS/p4merge
-p4mergeosx.args = $base $local $other $output
-p4mergeosx.gui = True
-p4mergeosx.priority=-8
-p4mergeosx.diffargs=$parent $child
-
-tortoisemerge.args=/base:$base /mine:$local /theirs:$other /merged:$output
-tortoisemerge.regkey=Software\TortoiseSVN
-tortoisemerge.regkeyalt=Software\Wow6432Node\TortoiseSVN
-tortoisemerge.check=changed
-tortoisemerge.gui=True
-tortoisemerge.priority=-8
-tortoisemerge.diffargs=/base:$parent /mine:$child /basename:'$plabel1' /minename:'$clabel'
-
-ecmerge.args=$base $local $other --mode=merge3 --title0=base --title1=local --title2=other --to=$output
-ecmerge.regkey=Software\Elli\xc3\xa9 Computing\Merge
-ecmerge.regkeyalt=Software\Wow6432Node\Elli\xc3\xa9 Computing\Merge
-ecmerge.gui=True
-ecmerge.diffargs=$parent $child --mode=diff2 --title1='$plabel1' --title2='$clabel'
-
-# editmerge is a small script shipped in contrib.
-# It needs this config otherwise it behaves the same as internal:local
-editmerge.args=$output
-editmerge.check=changed
-editmerge.premerge=keep
-
-filemerge.executable=/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge
-filemerge.args=-left $other -right $local -ancestor $base -merge $output
-filemerge.gui=True
-
-; Windows version of Beyond Compare
-beyondcompare3.args=$local $other $base $output /ro /lefttitle=local /centertitle=base /righttitle=other /automerge /reviewconflicts /solo
-beyondcompare3.regkey=Software\Scooter Software\Beyond Compare 3
-beyondcompare3.regname=ExePath
-beyondcompare3.gui=True
-beyondcompare3.priority=-2
-beyondcompare3.diffargs=/lro /lefttitle='$plabel1' /righttitle='$clabel' /solo /expandall $parent $child
-
-; Linux version of Beyond Compare
-bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=parent1 -centertitle=base -righttitle=parent2 -outputtitle=merged -automerge -reviewconflicts -solo
-bcompare.gui=True
-bcompare.priority=-1
-bcompare.diffargs=-lro -lefttitle='$plabel1' -righttitle='$clabel' -solo -expandall $parent $child
-
-winmerge.args=/e /x /wl /ub /dl other /dr local $other $local $output
-winmerge.regkey=Software\Thingamahoochie\WinMerge
-winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\
-winmerge.regname=Executable
-winmerge.check=changed
-winmerge.gui=True
-winmerge.priority=-10
-winmerge.diffargs=/r /e /x /ub /wl /dl '$plabel1' /dr '$clabel' $parent $child
-
-araxis.regkey=SOFTWARE\Classes\TypeLib\{46799e0a-7bd1-4330-911c-9660bb964ea2}\7.0\HELPDIR
-araxis.regappend=\ConsoleCompare.exe
-araxis.priority=-2
-araxis.args=/3 /a2 /wait /merge /title1:"Other" /title2:"Base" /title3:"Local :"$local $other $base $local $output
-araxis.checkconflict=True
-araxis.binary=True
-araxis.gui=True
-araxis.diffargs=/2 /wait /title1:"$plabel1" /title2:"$clabel" $parent $child
-
-diffuse.priority=-3
-diffuse.args=$local $base $other
-diffuse.gui=True
-diffuse.diffargs=$parent $child
-
-UltraCompare.regkey=Software\Microsoft\Windows\CurrentVersion\App Paths\UC.exe
-UltraCompare.regkeyalt=Software\Wow6432Node\Microsoft\Windows\CurrentVersion\App Paths\UC.exe
-UltraCompare.args = $base $local $other -title1 base -title3 other
-UltraCompare.priority = -2
-UltraCompare.gui = True
-UltraCompare.binary = True
-UltraCompare.check = conflicts,changed
-UltraCompare.diffargs=$child $parent -title1 $clabel -title2 $plabel1
--- a/contrib/perf.py Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/perf.py Tue Dec 09 13:32:19 2014 -0600
@@ -4,11 +4,26 @@
from mercurial import cmdutil, scmutil, util, commands, obsolete
from mercurial import repoview, branchmap, merge, copies
import time, os, sys
+import functools
cmdtable = {}
command = cmdutil.command(cmdtable)
-def timer(func, title=None):
+def gettimer(ui, opts=None):
+ """return a timer function and formatter: (timer, formatter)
+
+ This functions exist to gather the creation of formatter in a single
+ place instead of duplicating it in all performance command."""
+ if opts is None:
+ opts = {}
+ # redirect all to stderr
+ ui = ui.copy()
+ ui.fout = ui.ferr
+ # get a formatter
+ fm = ui.formatter('perf', opts)
+ return functools.partial(_timer, fm), fm
+
+def _timer(fm, func, title=None):
results = []
begin = time.time()
count = 0
@@ -25,16 +40,25 @@
break
if cstop - begin > 10 and count >= 3:
break
+
+ fm.startitem()
+
if title:
- sys.stderr.write("! %s\n" % title)
+ fm.write('title', '! %s\n', title)
if r:
- sys.stderr.write("! result: %s\n" % r)
+ fm.write('result', '! result: %s\n', r)
m = min(results)
- sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
- % (m[0], m[1] + m[2], m[1], m[2], count))
+ fm.plain('!')
+ fm.write('wall', ' wall %f', m[0])
+ fm.write('comb', ' comb %f', m[1] + m[2])
+ fm.write('user', ' user %f', m[1])
+ fm.write('sys', ' sys %f', m[2])
+ fm.write('count', ' (best of %d)', count)
+ fm.plain('\n')
@command('perfwalk')
def perfwalk(ui, repo, *pats):
+ timer, fm = gettimer(ui)
try:
m = scmutil.match(repo[None], pats, {})
timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
@@ -44,11 +68,14 @@
timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
except Exception:
timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
+ fm.end()
@command('perfannotate')
def perfannotate(ui, repo, f):
+ timer, fm = gettimer(ui)
fc = repo['.'][f]
timer(lambda: len(fc.annotate(True)))
+ fm.end()
@command('perfstatus',
[('u', 'unknown', False,
@@ -57,16 +84,20 @@
#m = match.always(repo.root, repo.getcwd())
#timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
# False))))
+ timer, fm = gettimer(ui)
timer(lambda: sum(map(len, repo.status(**opts))))
+ fm.end()
@command('perfaddremove')
def perfaddremove(ui, repo):
+ timer, fm = gettimer(ui)
try:
oldquiet = repo.ui.quiet
repo.ui.quiet = True
timer(lambda: scmutil.addremove(repo, dry_run=True))
finally:
repo.ui.quiet = oldquiet
+ fm.end()
def clearcaches(cl):
# behave somewhat consistently across internal API changes
@@ -79,33 +110,40 @@
@command('perfheads')
def perfheads(ui, repo):
+ timer, fm = gettimer(ui)
cl = repo.changelog
def d():
len(cl.headrevs())
clearcaches(cl)
timer(d)
+ fm.end()
@command('perftags')
def perftags(ui, repo):
import mercurial.changelog
import mercurial.manifest
+ timer, fm = gettimer(ui)
def t():
repo.changelog = mercurial.changelog.changelog(repo.sopener)
repo.manifest = mercurial.manifest.manifest(repo.sopener)
repo._tags = None
return len(repo.tags())
timer(t)
+ fm.end()
@command('perfancestors')
def perfancestors(ui, repo):
+ timer, fm = gettimer(ui)
heads = repo.changelog.headrevs()
def d():
for a in repo.changelog.ancestors(heads):
pass
timer(d)
+ fm.end()
@command('perfancestorset')
def perfancestorset(ui, repo, revset):
+ timer, fm = gettimer(ui)
revs = repo.revs(revset)
heads = repo.changelog.headrevs()
def d():
@@ -113,34 +151,42 @@
for rev in revs:
rev in s
timer(d)
+ fm.end()
@command('perfdirs')
def perfdirs(ui, repo):
+ timer, fm = gettimer(ui)
dirstate = repo.dirstate
'a' in dirstate
def d():
dirstate.dirs()
del dirstate._dirs
timer(d)
+ fm.end()
@command('perfdirstate')
def perfdirstate(ui, repo):
+ timer, fm = gettimer(ui)
"a" in repo.dirstate
def d():
repo.dirstate.invalidate()
"a" in repo.dirstate
timer(d)
+ fm.end()
@command('perfdirstatedirs')
def perfdirstatedirs(ui, repo):
+ timer, fm = gettimer(ui)
"a" in repo.dirstate
def d():
"a" in repo.dirstate._dirs
del repo.dirstate._dirs
timer(d)
+ fm.end()
@command('perfdirstatefoldmap')
def perffoldmap(ui, repo):
+ timer, fm = gettimer(ui)
dirstate = repo.dirstate
'a' in dirstate
def d():
@@ -148,19 +194,23 @@
del dirstate._foldmap
del dirstate._dirs
timer(d)
+ fm.end()
@command('perfdirstatewrite')
def perfdirstatewrite(ui, repo):
+ timer, fm = gettimer(ui)
ds = repo.dirstate
"a" in ds
def d():
ds._dirty = True
ds.write()
timer(d)
+ fm.end()
@command('perfmergecalculate',
[('r', 'rev', '.', 'rev to merge against')])
def perfmergecalculate(ui, repo, rev):
+ timer, fm = gettimer(ui)
wctx = repo[None]
rctx = scmutil.revsingle(repo, rev, rev)
ancestor = wctx.ancestor(rctx)
@@ -173,17 +223,21 @@
merge.calculateupdates(repo, wctx, rctx, ancestor, False, False, False,
acceptremote=True)
timer(d)
+ fm.end()
@command('perfpathcopies', [], "REV REV")
def perfpathcopies(ui, repo, rev1, rev2):
+ timer, fm = gettimer(ui)
ctx1 = scmutil.revsingle(repo, rev1, rev1)
ctx2 = scmutil.revsingle(repo, rev2, rev2)
def d():
copies.pathcopies(ctx1, ctx2)
timer(d)
+ fm.end()
@command('perfmanifest', [], 'REV')
def perfmanifest(ui, repo, rev):
+ timer, fm = gettimer(ui)
ctx = scmutil.revsingle(repo, rev, rev)
t = ctx.manifestnode()
def d():
@@ -191,51 +245,65 @@
repo.manifest._cache = None
repo.manifest.read(t)
timer(d)
+ fm.end()
@command('perfchangeset')
def perfchangeset(ui, repo, rev):
+ timer, fm = gettimer(ui)
n = repo[rev].node()
def d():
repo.changelog.read(n)
#repo.changelog._cache = None
timer(d)
+ fm.end()
@command('perfindex')
def perfindex(ui, repo):
import mercurial.revlog
+ timer, fm = gettimer(ui)
mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
n = repo["tip"].node()
def d():
cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i")
cl.rev(n)
timer(d)
+ fm.end()
@command('perfstartup')
def perfstartup(ui, repo):
+ timer, fm = gettimer(ui)
cmd = sys.argv[0]
def d():
os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
timer(d)
+ fm.end()
@command('perfparents')
def perfparents(ui, repo):
+ timer, fm = gettimer(ui)
nl = [repo.changelog.node(i) for i in xrange(1000)]
def d():
for n in nl:
repo.changelog.parents(n)
timer(d)
+ fm.end()
@command('perflookup')
def perflookup(ui, repo, rev):
+ timer, fm = gettimer(ui)
timer(lambda: len(repo.lookup(rev)))
+ fm.end()
@command('perfrevrange')
def perfrevrange(ui, repo, *specs):
+ timer, fm = gettimer(ui)
revrange = scmutil.revrange
timer(lambda: len(revrange(repo, specs)))
+ fm.end()
@command('perfnodelookup')
def perfnodelookup(ui, repo, rev):
+ timer, fm = gettimer(ui)
import mercurial.revlog
mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
n = repo[rev].node()
@@ -244,14 +312,17 @@
cl.rev(n)
clearcaches(cl)
timer(d)
+ fm.end()
@command('perflog',
[('', 'rename', False, 'ask log to follow renames')])
def perflog(ui, repo, **opts):
+ timer, fm = gettimer(ui)
ui.pushbuffer()
timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
copies=opts.get('rename')))
ui.popbuffer()
+ fm.end()
@command('perfmoonwalk')
def perfmoonwalk(ui, repo):
@@ -259,52 +330,65 @@
This also loads the changelog data for each revision in the changelog.
"""
+ timer, fm = gettimer(ui)
def moonwalk():
for i in xrange(len(repo), -1, -1):
ctx = repo[i]
ctx.branch() # read changelog data (in addition to the index)
timer(moonwalk)
+ fm.end()
@command('perftemplating')
def perftemplating(ui, repo):
+ timer, fm = gettimer(ui)
ui.pushbuffer()
timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
template='{date|shortdate} [{rev}:{node|short}]'
' {author|person}: {desc|firstline}\n'))
ui.popbuffer()
+ fm.end()
@command('perfcca')
def perfcca(ui, repo):
+ timer, fm = gettimer(ui)
timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
+ fm.end()
@command('perffncacheload')
def perffncacheload(ui, repo):
+ timer, fm = gettimer(ui)
s = repo.store
def d():
s.fncache._load()
timer(d)
+ fm.end()
@command('perffncachewrite')
def perffncachewrite(ui, repo):
+ timer, fm = gettimer(ui)
s = repo.store
s.fncache._load()
def d():
s.fncache._dirty = True
s.fncache.write()
timer(d)
+ fm.end()
@command('perffncacheencode')
def perffncacheencode(ui, repo):
+ timer, fm = gettimer(ui)
s = repo.store
s.fncache._load()
def d():
for p in s.fncache.entries:
s.encode(p)
timer(d)
+ fm.end()
@command('perfdiffwd')
def perfdiffwd(ui, repo):
"""Profile diff of working directory changes"""
+ timer, fm = gettimer(ui)
options = {
'w': 'ignore_all_space',
'b': 'ignore_space_change',
@@ -319,11 +403,13 @@
ui.popbuffer()
title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
timer(d, title)
+ fm.end()
@command('perfrevlog',
[('d', 'dist', 100, 'distance between the revisions')],
"[INDEXFILE]")
def perfrevlog(ui, repo, file_, **opts):
+ timer, fm = gettimer(ui)
from mercurial import revlog
dist = opts['dist']
def d():
@@ -332,6 +418,7 @@
r.revision(r.node(x))
timer(d)
+ fm.end()
@command('perfrevset',
[('C', 'clear', False, 'clear volatile cache between each call.')],
@@ -342,17 +429,20 @@
Use the --clean option if need to evaluate the impact of build volatile
revisions set cache on the revset execution. Volatile cache hold filtered
and obsolete related cache."""
+ timer, fm = gettimer(ui)
def d():
if clear:
repo.invalidatevolatilesets()
for r in repo.revs(expr): pass
timer(d)
+ fm.end()
@command('perfvolatilesets')
def perfvolatilesets(ui, repo, *names):
"""benchmark the computation of various volatile set
Volatile set computes element related to filtering and obsolescence."""
+ timer, fm = gettimer(ui)
repo = repo.unfiltered()
def getobs(name):
@@ -380,6 +470,7 @@
for name in allfilter:
timer(getfiltered(name), title=name)
+ fm.end()
@command('perfbranchmap',
[('f', 'full', False,
@@ -390,6 +481,7 @@
This benchmarks the full repo.branchmap() call with read and write disabled
"""
+ timer, fm = gettimer(ui)
def getbranchmap(filtername):
"""generate a benchmark function for the filtername"""
if filtername is None:
@@ -432,3 +524,13 @@
finally:
branchmap.read = oldread
branchmap.branchcache.write = oldwrite
+ fm.end()
+
+@command('perfloadmarkers')
+def perfloadmarkers(ui, repo):
+ """benchmark the time to parse the on-disk markers for a repo
+
+ Result is the number of markers in the repo."""
+ timer, fm = gettimer(ui)
+ timer(lambda: len(obsolete.obsstore(repo.sopener)))
+ fm.end()
--- a/contrib/revsetbenchmarks.py Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/revsetbenchmarks.py Tue Dec 09 13:32:19 2014 -0600
@@ -74,7 +74,7 @@
parser = OptionParser(usage="usage: %prog [options] <revs>")
parser.add_option("-f", "--file",
- help="read revset from FILE (stdin if omited)",
+ help="read revset from FILE (stdin if omitted)",
metavar="FILE")
parser.add_option("-R", "--repo",
help="run benchmark on REPO", metavar="REPO")
--- a/contrib/synthrepo.py Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/synthrepo.py Tue Dec 09 13:32:19 2014 -0600
@@ -410,16 +410,18 @@
break
if filesadded:
dirs = list(pctx.dirs())
- dirs.append('')
+ dirs.insert(0, '')
for __ in xrange(pick(filesadded)):
- path = [random.choice(dirs)]
- if pick(dirsadded):
+ pathstr = ''
+ while pathstr in dirs:
+ path = [random.choice(dirs)]
+ if pick(dirsadded):
+ path.append(random.choice(words))
path.append(random.choice(words))
- path.append(random.choice(words))
- path = '/'.join(filter(None, path))
+ pathstr = '/'.join(filter(None, path))
data = '\n'.join(makeline()
for __ in xrange(pick(linesinfilesadded))) + '\n'
- changes[path] = context.memfilectx(repo, path, data)
+ changes[pathstr] = context.memfilectx(repo, pathstr, data)
def filectxfn(repo, memctx, path):
return changes[path]
if not changes:
@@ -428,6 +430,8 @@
date = repo['tip'].date()[0] + pick(interarrival)
else:
date = time.time() - (86400 * count)
+ # dates in mercurial must be positive, fit in 32-bit signed integers.
+ date = min(0x7fffffff, max(0, date))
user = random.choice(words) + '@' + random.choice(words)
mc = context.memctx(repo, pl, makeline(minimum=2),
sorted(changes.iterkeys()),
--- a/contrib/undumprevlog Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/undumprevlog Tue Dec 09 13:32:19 2014 -0600
@@ -10,7 +10,8 @@
util.setbinary(fp)
opener = scmutil.opener('.', False)
-tr = transaction.transaction(sys.stderr.write, opener, "undump.journal")
+tr = transaction.transaction(sys.stderr.write, opener, {'store': opener},
+ "undump.journal")
while True:
l = sys.stdin.readline()
if not l:
--- a/contrib/win32/mercurial.iss Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/win32/mercurial.iss Tue Dec 09 13:32:19 2014 -0600
@@ -67,8 +67,6 @@
Source: contrib\hgweb.fcgi; DestDir: {app}/Contrib
Source: contrib\hgweb.wsgi; DestDir: {app}/Contrib
Source: contrib\win32\ReadMe.html; DestDir: {app}; Flags: isreadme
-Source: contrib\mergetools.hgrc; DestDir: {tmp};
-Source: contrib\win32\mercurial.ini; DestDir: {app}; DestName: Mercurial.ini; Check: CheckFile; AfterInstall: ConcatenateFiles;
Source: contrib\win32\postinstall.txt; DestDir: {app}; DestName: ReleaseNotes.txt
Source: dist\hg.exe; DestDir: {app}; AfterInstall: Touch('{app}\hg.exe.local')
#if ARCH == "x64"
@@ -86,6 +84,7 @@
Source: doc\*.html; DestDir: {app}\Docs
Source: doc\style.css; DestDir: {app}\Docs
Source: mercurial\help\*.txt; DestDir: {app}\help
+Source: mercurial\default.d\*.rc; DestDir: {app}\default.d
Source: mercurial\locale\*.*; DestDir: {app}\locale; Flags: recursesubdirs createallsubdirs skipifsourcedoesntexist
Source: mercurial\templates\*.*; DestDir: {app}\Templates; Flags: recursesubdirs createallsubdirs
Source: CONTRIBUTORS; DestDir: {app}; DestName: Contributors.txt
@@ -93,10 +92,13 @@
[INI]
Filename: {app}\Mercurial.url; Section: InternetShortcut; Key: URL; String: http://mercurial.selenic.com/
-Filename: {app}\Mercurial.ini; Section: web; Key: cacerts; String: {app}\cacert.pem
+Filename: {app}\default.d\editor.rc; Section: ui; Key: editor; String: notepad
+Filename: {app}\default.d\cacerts.rc; Section: web; Key: cacerts; String: {app}\cacert.pem
[UninstallDelete]
Type: files; Name: {app}\Mercurial.url
+Type: filesandordirs; Name: {app}\default.d
+Type: files; Name: "{app}\hg.exe.local"
[Icons]
Name: {group}\Uninstall Mercurial; Filename: {uninstallexe}
@@ -111,35 +113,7 @@
[UninstallRun]
Filename: "{app}\add_path.exe"; Parameters: "/del {app}"
-[UninstallDelete]
-Type: files; Name: "{app}\hg.exe.local"
-
[Code]
-var
- WriteFile: Boolean;
- CheckDone: Boolean;
-
-function CheckFile(): Boolean;
-begin
- if not CheckDone then begin
- WriteFile := True;
- if FileExists(ExpandConstant(CurrentFileName)) then begin
- WriteFile := MsgBox('' + ExpandConstant(CurrentFileName) + '' #13#13 'The file already exists.' #13#13 'Would you like Setup to overwrite it?', mbConfirmation, MB_YESNO) = idYes;
- end;
- CheckDone := True;
- end;
- Result := WriteFile;
-end;
-
-procedure ConcatenateFiles();
-var
- MergeConfigs: TArrayOfString;
-begin
- if LoadStringsFromFile(ExpandConstant('{tmp}\mergetools.hgrc'),MergeConfigs) then begin
- SaveStringsToFile(ExpandConstant(CurrentFileName),MergeConfigs,True);
- end;
-end;
-
procedure Touch(fn: String);
begin
SaveStringToFile(ExpandConstant(fn), '', False);
--- a/contrib/wix/mercurial.wxs Mon Dec 08 15:41:54 2014 -0800
+++ b/contrib/wix/mercurial.wxs Tue Dec 09 13:32:19 2014 -0600
@@ -79,7 +79,7 @@
ReadOnly='yes' KeyPath='yes'/>
</Component>
<Component Id='mergetools.rc' Guid='$(var.mergetools.rc.guid)' Win64='$(var.IsX64)'>
- <File Id='mergetools.rc' Name='MergeTools.rc' Source='contrib\mergetools.hgrc'
+ <File Id='mergetools.rc' Name='MergeTools.rc' Source='mercurial\default.d\mergetools.rc'
ReadOnly='yes' KeyPath='yes'/>
</Component>
<Component Id='paths.rc' Guid='$(var.paths.rc.guid)' Win64='$(var.IsX64)'>
--- a/hgext/color.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/color.py Tue Dec 09 13:32:19 2014 -0600
@@ -301,6 +301,11 @@
'histedit.remaining': 'red bold',
'ui.prompt': 'yellow',
'log.changeset': 'yellow',
+ 'patchbomb.finalsummary': '',
+ 'patchbomb.from': 'magenta',
+ 'patchbomb.to': 'cyan',
+ 'patchbomb.subject': 'green',
+ 'patchbomb.diffstats': '',
'rebase.rebased': 'blue',
'rebase.remaining': 'red bold',
'resolve.resolved': 'green bold',
--- a/hgext/extdiff.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/extdiff.py Tue Dec 09 13:32:19 2014 -0600
@@ -23,10 +23,9 @@
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
- # add new command called vdiff, runs kdiff3
- vdiff = kdiff3
-
- # add new command called meld, runs meld (no need to name twice)
+ # add new command called meld, runs meld (no need to name twice). If
+ # the meld executable is not available, the meld tool in [merge-tools]
+ # will be used, if available
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
@@ -63,7 +62,7 @@
from mercurial.i18n import _
from mercurial.node import short, nullid
-from mercurial import cmdutil, scmutil, util, commands, encoding
+from mercurial import cmdutil, scmutil, util, commands, encoding, filemerge
import os, shlex, shutil, tempfile, re
cmdtable = {}
@@ -90,7 +89,7 @@
wopener = scmutil.opener(base)
fns_and_mtime = []
ctx = repo[node]
- for fn in files:
+ for fn in sorted(files):
wfn = util.pconvert(fn)
if wfn not in ctx:
# File doesn't exist; could be a bogus modify
@@ -227,7 +226,7 @@
cmdline = util.shellquote(diffcmd) + ' ' + args
ui.debug('running %r in %s\n' % (cmdline, tmproot))
- util.system(cmdline, cwd=tmproot, out=ui.fout)
+ ui.system(cmdline, cwd=tmproot)
for copy_fn, working_fn, mtime in fns_and_mtime:
if os.lstat(copy_fn).st_mtime != mtime:
@@ -279,7 +278,9 @@
if cmd.startswith('cmd.'):
cmd = cmd[4:]
if not path:
- path = cmd
+ path = util.findexe(cmd)
+ if path is None:
+ path = filemerge.findexternaltool(ui, cmd) or cmd
diffopts = shlex.split(ui.config('extdiff', 'opts.' + cmd, ''))
elif cmd.startswith('opts.'):
continue
@@ -289,7 +290,9 @@
diffopts = shlex.split(path)
path = diffopts.pop(0)
else:
- path, diffopts = cmd, []
+ path, diffopts = util.findexe(cmd), []
+ if path is None:
+ path = filemerge.findexternaltool(ui, cmd) or cmd
# look for diff arguments in [diff-tools] then [merge-tools]
if diffopts == []:
args = ui.config('diff-tools', cmd+'.diffargs') or \
--- a/hgext/factotum.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/factotum.py Tue Dec 09 13:32:19 2014 -0600
@@ -72,9 +72,14 @@
l = os.read(fd, ERRMAX).split()
if l[0] == 'ok':
os.write(fd, 'read')
- l = os.read(fd, ERRMAX).split()
- if l[0] == 'ok':
- return l[1:]
+ status, user, passwd = os.read(fd, ERRMAX).split(None, 2)
+ if status == 'ok':
+ if passwd.startswith("'"):
+ if passwd.endswith("'"):
+ passwd = passwd[1:-1].replace("''", "'")
+ else:
+ raise util.Abort(_('malformed password string'))
+ return (user, passwd)
except (OSError, IOError):
raise util.Abort(_('factotum not responding'))
finally:
--- a/hgext/hgk.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/hgk.py Tue Dec 09 13:32:19 2014 -0600
@@ -35,7 +35,7 @@
'''
import os
-from mercurial import cmdutil, commands, util, patch, revlog, scmutil
+from mercurial import cmdutil, commands, patch, revlog, scmutil
from mercurial.node import nullid, nullrev, short
from mercurial.i18n import _
@@ -95,8 +95,10 @@
if opts['pretty']:
catcommit(ui, repo, node2, "")
m = scmutil.match(repo[node1], files)
+ diffopts = patch.difffeatureopts(ui)
+ diffopts.git = True
chunks = patch.diff(repo, node1, node2, match=m,
- opts=patch.diffopts(ui, {'git': True}))
+ opts=diffopts)
for chunk in chunks:
ui.write(chunk)
else:
@@ -349,4 +351,4 @@
optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
ui.debug("running %s\n" % cmd)
- util.system(cmd)
+ ui.system(cmd)
--- a/hgext/largefiles/lfcommands.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/largefiles/lfcommands.py Tue Dec 09 13:32:19 2014 -0600
@@ -268,6 +268,7 @@
mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
getfilectx, ctx.user(), ctx.date(), ctx.extra())
ret = rdst.commitctx(mctx)
+ lfutil.copyalltostore(rdst, ret)
rdst.setparents(ret)
revmap[ctx.node()] = rdst.changelog.tip()
@@ -435,8 +436,14 @@
ui.status(_("%d largefiles failed to download\n") % totalmissing)
return totalsuccess, totalmissing
-def updatelfiles(ui, repo, filelist=None, printmessage=True,
+def updatelfiles(ui, repo, filelist=None, printmessage=None,
normallookup=False):
+ '''Update largefiles according to standins in the working directory
+
+ If ``printmessage`` is other than ``None``, it means "print (or
+ ignore, for false) message forcibly".
+ '''
+ statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
@@ -462,10 +469,10 @@
expecthash != lfutil.hashfile(abslfile))):
if lfile not in repo[None]: # not switched to normal file
util.unlinkpath(abslfile, ignoremissing=True)
- # use normallookup() to allocate entry in largefiles
+ # use normallookup() to allocate an entry in largefiles
# dirstate, because lack of it misleads
# lfilesrepo.status() into recognition that such cache
- # missing files are REMOVED.
+ # missing files are removed.
lfdirstate.normallookup(lfile)
update[lfile] = expecthash
else:
@@ -482,8 +489,7 @@
lfdirstate.write()
if lfiles:
- if printmessage:
- ui.status(_('getting changed largefiles\n'))
+ statuswriter(_('getting changed largefiles\n'))
cachelfiles(ui, repo, None, lfiles)
for lfile in lfiles:
@@ -527,8 +533,8 @@
lfutil.synclfdirstate(repo, lfdirstate, lfile, True)
lfdirstate.write()
- if printmessage and lfiles:
- ui.status(_('%d largefiles updated, %d removed\n') % (updated,
+ if lfiles:
+ statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
removed))
finally:
wlock.release()
--- a/hgext/largefiles/lfutil.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/largefiles/lfutil.py Tue Dec 09 13:32:19 2014 -0600
@@ -12,6 +12,7 @@
import platform
import shutil
import stat
+import copy
from mercurial import dirstate, httpconnection, match as match_, util, scmutil
from mercurial.i18n import _
@@ -203,7 +204,7 @@
def copytostoreabsolute(repo, file, hash):
if inusercache(repo.ui, hash):
link(usercachepath(repo.ui, hash), storepath(repo, hash))
- elif not getattr(repo, "_isconverting", False):
+ else:
util.makedirs(os.path.dirname(storepath(repo, hash)))
dst = util.atomictempfile(storepath(repo, hash),
createmode=repo.store.createmode)
@@ -386,6 +387,30 @@
elif state == '?':
lfdirstate.drop(lfile)
+def markcommitted(orig, ctx, node):
+ repo = ctx._repo
+
+ orig(node)
+
+ # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
+ # because files coming from the 2nd parent are omitted in the latter.
+ #
+ # The former should be used to get targets of "synclfdirstate",
+ # because such files:
+ # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
+ # - have to be marked as "n" after commit, but
+ # - aren't listed in "repo[node].files()"
+
+ lfdirstate = openlfdirstate(repo.ui, repo)
+ for f in ctx.files():
+ if isstandin(f):
+ lfile = splitstandin(f)
+ synclfdirstate(repo, lfdirstate, lfile, False)
+ lfdirstate.write()
+
+ # As part of committing, copy all of the largefiles into the cache.
+ copyalltostore(repo, node)
+
def getlfilestoupdate(oldstandins, newstandins):
changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
filelist = []
@@ -415,3 +440,137 @@
for fn in files:
if isstandin(fn) and fn in ctx:
addfunc(fn, ctx[fn].data().strip())
+
+def updatestandinsbymatch(repo, match):
+ '''Update standins in the working directory according to specified match
+
+ This returns (possibly modified) ``match`` object to be used for
+ subsequent commit process.
+ '''
+
+ ui = repo.ui
+
+ # Case 1: user calls commit with no specific files or
+ # include/exclude patterns: refresh and commit all files that
+ # are "dirty".
+ if match is None or match.always():
+ # Spend a bit of time here to get a list of files we know
+ # are modified so we can compare only against those.
+ # It can cost a lot of time (several seconds)
+ # otherwise to update all standins if the largefiles are
+ # large.
+ lfdirstate = openlfdirstate(ui, repo)
+ dirtymatch = match_.always(repo.root, repo.getcwd())
+ unsure, s = lfdirstate.status(dirtymatch, [], False, False,
+ False)
+ modifiedfiles = unsure + s.modified + s.added + s.removed
+ lfiles = listlfiles(repo)
+ # this only loops through largefiles that exist (not
+ # removed/renamed)
+ for lfile in lfiles:
+ if lfile in modifiedfiles:
+ if os.path.exists(
+ repo.wjoin(standin(lfile))):
+ # this handles the case where a rebase is being
+ # performed and the working copy is not updated
+ # yet.
+ if os.path.exists(repo.wjoin(lfile)):
+ updatestandin(repo,
+ standin(lfile))
+
+ return match
+
+ lfiles = listlfiles(repo)
+ match._files = repo._subdirlfs(match.files(), lfiles)
+
+ # Case 2: user calls commit with specified patterns: refresh
+ # any matching big files.
+ smatcher = composestandinmatcher(repo, match)
+ standins = repo.dirstate.walk(smatcher, [], False, False)
+
+ # No matching big files: get out of the way and pass control to
+ # the usual commit() method.
+ if not standins:
+ return match
+
+ # Refresh all matching big files. It's possible that the
+ # commit will end up failing, in which case the big files will
+ # stay refreshed. No harm done: the user modified them and
+ # asked to commit them, so sooner or later we're going to
+ # refresh the standins. Might as well leave them refreshed.
+ lfdirstate = openlfdirstate(ui, repo)
+ for fstandin in standins:
+ lfile = splitstandin(fstandin)
+ if lfdirstate[lfile] != 'r':
+ updatestandin(repo, fstandin)
+
+ # Cook up a new matcher that only matches regular files or
+ # standins corresponding to the big files requested by the
+ # user. Have to modify _files to prevent commit() from
+ # complaining "not tracked" for big files.
+ match = copy.copy(match)
+ origmatchfn = match.matchfn
+
+ # Check both the list of largefiles and the list of
+ # standins because if a largefile was removed, it
+ # won't be in the list of largefiles at this point
+ match._files += sorted(standins)
+
+ actualfiles = []
+ for f in match._files:
+ fstandin = standin(f)
+
+ # ignore known largefiles and standins
+ if f in lfiles or fstandin in standins:
+ continue
+
+ actualfiles.append(f)
+ match._files = actualfiles
+
+ def matchfn(f):
+ if origmatchfn(f):
+ return f not in lfiles
+ else:
+ return f in standins
+
+ match.matchfn = matchfn
+
+ return match
+
+class automatedcommithook(object):
+ '''Statefull hook to update standins at the 1st commit of resuming
+
+ For efficiency, updating standins in the working directory should
+ be avoided while automated committing (like rebase, transplant and
+ so on), because they should be updated before committing.
+
+ But the 1st commit of resuming automated committing (e.g. ``rebase
+ --continue``) should update them, because largefiles may be
+ modified manually.
+ '''
+ def __init__(self, resuming):
+ self.resuming = resuming
+
+ def __call__(self, repo, match):
+ if self.resuming:
+ self.resuming = False # avoids updating at subsequent commits
+ return updatestandinsbymatch(repo, match)
+ else:
+ return match
+
+def getstatuswriter(ui, repo, forcibly=None):
+ '''Return the function to write largefiles specific status out
+
+ If ``forcibly`` is ``None``, this returns the last element of
+ ``repo._lfupdatereporters`` as "default" writer function.
+
+ Otherwise, this returns the function to always write out (or
+ ignore if ``not forcibly``) status.
+ '''
+ if forcibly is None:
+ return repo._lfstatuswriters[-1]
+ else:
+ if forcibly:
+ return ui.status # forcibly WRITE OUT
+ else:
+ return lambda *msg, **opts: None # forcibly IGNORE
--- a/hgext/largefiles/overrides.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/largefiles/overrides.py Tue Dec 09 13:32:19 2014 -0600
@@ -11,11 +11,10 @@
import os
import copy
-from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
+from mercurial import hg, util, cmdutil, scmutil, match as match_, \
archival, pathutil, revset
from mercurial.i18n import _
from mercurial.node import hex
-from hgext import rebase
import lfutil
import lfcommands
@@ -23,20 +22,23 @@
# -- Utility functions: commonly/repeatedly needed functionality ---------------
+def composenormalfilematcher(match, manifest):
+ m = copy.copy(match)
+ notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
+ manifest)
+ m._files = filter(notlfile, m._files)
+ m._fmap = set(m._files)
+ m._always = False
+ origmatchfn = m.matchfn
+ m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
+ return m
+
def installnormalfilesmatchfn(manifest):
'''installmatchfn with a matchfn that ignores all largefiles'''
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
match = oldmatch(ctx, pats, opts, globbed, default)
- m = copy.copy(match)
- notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
- manifest)
- m._files = filter(notlfile, m._files)
- m._fmap = set(m._files)
- m._always = False
- origmatchfn = m.matchfn
- m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
- return m
+ return composenormalfilematcher(match, manifest)
oldmatch = installmatchfn(overridematch)
def installmatchfn(f):
@@ -63,10 +65,10 @@
def restorematchandpatsfn():
'''restores scmutil.matchandpats to what it was before
- installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
+ installmatchandpatsfn was called. No-op if scmutil.matchandpats
is its original function.
- Note that n calls to installnormalfilesmatchandpatsfn will require n calls
+ Note that n calls to installmatchandpatsfn will require n calls
to restore matchfn to reverse'''
scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
scmutil.matchandpats)
@@ -373,7 +375,7 @@
wlock.release()
# Before starting the manifest merge, merge.updates will call
-# _checkunknown to check if there are any files in the merged-in
+# checkunknown to check if there are any files in the merged-in
# changeset that collide with unknown files in the working copy.
#
# The largefiles are seen as unknown, so this prevents us from merging
@@ -381,7 +383,7 @@
#
# The overridden function filters the unknown files by removing any
# largefiles. This makes the merge proceed and we can then handle this
-# case further in the overridden manifestmerge function below.
+# case further in the overridden calculateupdates function below.
def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
return False
@@ -390,7 +392,7 @@
# The manifest merge handles conflicts on the manifest level. We want
# to handle changes in largefile-ness of files at this level too.
#
-# The strategy is to run the original manifestmerge and then process
+# The strategy is to run the original calculateupdates and then process
# the action list it outputs. There are two cases we need to deal with:
#
# 1. Normal file in p1, largefile in p2. Here the largefile is
@@ -435,51 +437,40 @@
# the second parent
lfile = splitstandin
standin = f
- msg = _('remote turned local normal file %s into a largefile\n'
- 'use (l)argefile or keep (n)ormal file?'
- '$$ &Largefile $$ &Normal file') % lfile
- if (# local has unchanged normal file, pick remote largefile
- pas and lfile in pas[0] and
- not pas[0][lfile].cmp(p1[lfile]) or
- # if remote has unchanged largefile, pick local normal file
- not (pas and standin in pas[0] and
- not pas[0][standin].cmp(p2[standin])) and
- # else, prompt
- repo.ui.promptchoice(msg, 0) == 0
- ): # pick remote largefile
- actions['r'].append((lfile, None, msg))
+ usermsg = _('remote turned local normal file %s into a largefile\n'
+ 'use (l)argefile or keep (n)ormal file?'
+ '$$ &Largefile $$ &Normal file') % lfile
+ if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
+ actions['r'].append((lfile, None, 'replaced by standin'))
newglist.append((standin, (p2.flags(standin),), msg))
else: # keep local normal file
- actions['r'].append((standin, None, msg))
+ if branchmerge:
+ actions['k'].append((standin, None,
+ 'replaced by non-standin'))
+ else:
+ actions['r'].append((standin, None,
+ 'replaced by non-standin'))
elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
# Case 2: largefile in the working copy, normal file in
# the second parent
standin = lfutil.standin(f)
lfile = f
- msg = _('remote turned local largefile %s into a normal file\n'
+ usermsg = _('remote turned local largefile %s into a normal file\n'
'keep (l)argefile or use (n)ormal file?'
'$$ &Largefile $$ &Normal file') % lfile
- if (# if remote has unchanged normal file, pick local largefile
- pas and f in pas[0] and
- not pas[0][f].cmp(p2[f]) or
- # if local has unchanged largefile, pick remote normal file
- not (pas and standin in pas[0] and
- not pas[0][standin].cmp(p1[standin])) and
- # else, prompt
- repo.ui.promptchoice(msg, 0) == 0
- ): # keep local largefile
+ if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
if branchmerge:
# largefile can be restored from standin safely
- actions['r'].append((lfile, None, msg))
+ actions['k'].append((lfile, None, 'replaced by standin'))
else:
# "lfile" should be marked as "removed" without
# removal of itself
- lfmr.append((lfile, None, msg))
+ lfmr.append((lfile, None, 'forget non-standin largefile'))
# linear-merge should treat this largefile as 're-added'
- actions['a'].append((standin, None, msg))
+ actions['a'].append((standin, None, 'keep standin'))
else: # pick remote normal file
- actions['r'].append((standin, None, msg))
+ actions['r'].append((standin, None, 'replaced by non-standin'))
newglist.append((lfile, (p2.flags(lfile),), msg))
else:
newglist.append(action)
@@ -592,7 +583,6 @@
lfile = lambda f: lfutil.standin(f) in manifest
m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
m._fmap = set(m._files)
- m._always = False
origmatchfn = m.matchfn
m.matchfn = lambda f: (lfutil.isstandin(f) and
(f in manifest) and
@@ -700,7 +690,6 @@
m._files = [tostandin(f) for f in m._files]
m._files = [f for f in m._files if f is not None]
m._fmap = set(m._files)
- m._always = False
origmatchfn = m.matchfn
def matchfn(f):
if lfutil.isstandin(f):
@@ -728,37 +717,14 @@
finally:
wlock.release()
-# When we rebase a repository with remotely changed largefiles, we need to
-# take some extra care so that the largefiles are correctly updated in the
-# working copy
+# after pulling changesets, we need to take some extra care to get
+# largefiles updated remotely
def overridepull(orig, ui, repo, source=None, **opts):
revsprepull = len(repo)
if not source:
source = 'default'
repo.lfpullsource = source
- if opts.get('rebase', False):
- repo._isrebasing = True
- try:
- if opts.get('update'):
- del opts['update']
- ui.debug('--update and --rebase are not compatible, ignoring '
- 'the update flag\n')
- del opts['rebase']
- origpostincoming = commands.postincoming
- def _dummy(*args, **kwargs):
- pass
- commands.postincoming = _dummy
- try:
- result = commands.pull(ui, repo, source, **opts)
- finally:
- commands.postincoming = origpostincoming
- revspostpull = len(repo)
- if revspostpull > revsprepull:
- result = result or rebase.rebase(ui, repo)
- finally:
- repo._isrebasing = False
- else:
- result = orig(ui, repo, source, **opts)
+ result = orig(ui, repo, source, **opts)
revspostpull = len(repo)
lfrevs = opts.get('lfrev', [])
if opts.get('all_largefiles'):
@@ -832,11 +798,14 @@
return result
def overriderebase(orig, ui, repo, **opts):
- repo._isrebasing = True
+ resuming = opts.get('continue')
+ repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
+ repo._lfstatuswriters.append(lambda *msg, **opts: None)
try:
return orig(ui, repo, **opts)
finally:
- repo._isrebasing = False
+ repo._lfstatuswriters.pop()
+ repo._lfcommithooks.pop()
def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
prefix=None, mtime=None, subrepos=None):
@@ -966,7 +935,7 @@
# If a largefile is modified, the change is not reflected in its
# standin until a commit. cmdutil.bailifchanged() raises an exception
# if the repo has uncommitted changes. Wrap it to also check if
-# largefiles were changed. This is used by bisect and backout.
+# largefiles were changed. This is used by bisect, backout and fetch.
def overridebailifchanged(orig, repo):
orig(repo)
repo.lfstatus = True
@@ -975,15 +944,6 @@
if s.modified or s.added or s.removed or s.deleted:
raise util.Abort(_('uncommitted changes'))
-# Fetch doesn't use cmdutil.bailifchanged so override it to add the check
-def overridefetch(orig, ui, repo, *pats, **opts):
- repo.lfstatus = True
- s = repo.status()
- repo.lfstatus = False
- if s.modified or s.added or s.removed or s.deleted:
- raise util.Abort(_('uncommitted changes'))
- return orig(ui, repo, *pats, **opts)
-
def overrideforget(orig, ui, repo, *pats, **opts):
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
@@ -1150,9 +1110,6 @@
# Calling purge with --all will cause the largefiles to be deleted.
# Override repo.status to prevent this from happening.
def overridepurge(orig, ui, repo, *dirs, **opts):
- # XXX large file status is buggy when used on repo proxy.
- # XXX this needs to be investigate.
- repo = repo.unfiltered()
oldstatus = repo.status
def overridestatus(node1='.', node2=None, match=None, ignored=False,
clean=False, unknown=False, listsubrepos=False):
@@ -1207,16 +1164,14 @@
return result
def overridetransplant(orig, ui, repo, *revs, **opts):
+ resuming = opts.get('continue')
+ repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
+ repo._lfstatuswriters.append(lambda *msg, **opts: None)
try:
- oldstandins = lfutil.getstandinsstate(repo)
- repo._istransplanting = True
result = orig(ui, repo, *revs, **opts)
- newstandins = lfutil.getstandinsstate(repo)
- filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
- lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
- printmessage=True)
finally:
- repo._istransplanting = False
+ repo._lfstatuswriters.pop()
+ repo._lfcommithooks.pop()
return result
def overridecat(orig, ui, repo, file1, *pats, **opts):
@@ -1267,14 +1222,6 @@
err = 0
return err
-def mercurialsinkbefore(orig, sink):
- sink.repo._isconverting = True
- orig(sink)
-
-def mercurialsinkafter(orig, sink):
- sink.repo._isconverting = False
- orig(sink)
-
def mergeupdate(orig, repo, node, branchmerge, force, partial,
*args, **kwargs):
wlock = repo.wlock()
@@ -1318,11 +1265,7 @@
newstandins = lfutil.getstandinsstate(repo)
filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
- # suppress status message while automated committing
- printmessage = not (getattr(repo, "_isrebasing", False) or
- getattr(repo, "_istransplanting", False))
lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
- printmessage=printmessage,
normallookup=partial)
return result
--- a/hgext/largefiles/reposetup.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/largefiles/reposetup.py Tue Dec 09 13:32:19 2014 -0600
@@ -102,12 +102,12 @@
except error.LockError:
pass
- # First check if there were files specified on the
- # command line. If there were, and none of them were
+ # First check if paths or patterns were specified on the
+ # command line. If there were, and they don't match any
# largefiles, we should just bail here and let super
# handle it -- thus gaining a big performance boost.
lfdirstate = lfutil.openlfdirstate(ui, self)
- if match.files() and not match.anypats():
+ if not match.always():
for f in lfdirstate:
if match(f):
break
@@ -243,11 +243,13 @@
self.lfstatus = True
return scmutil.status(*result)
- # As part of committing, copy all of the largefiles into the
- # cache.
- def commitctx(self, *args, **kwargs):
- node = super(lfilesrepo, self).commitctx(*args, **kwargs)
- lfutil.copyalltostore(self, node)
+ def commitctx(self, ctx, *args, **kwargs):
+ node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
+ class lfilesctx(ctx.__class__):
+ def markcommitted(self, node):
+ orig = super(lfilesctx, self).markcommitted
+ return lfutil.markcommitted(orig, self, node)
+ ctx.__class__ = lfilesctx
return node
# Before commit, largefile standins have not had their
@@ -259,139 +261,10 @@
wlock = self.wlock()
try:
- # Case 0: Automated committing
- #
- # While automated committing (like rebase, transplant
- # and so on), this code path is used to avoid:
- # (1) updating standins, because standins should
- # be already updated at this point
- # (2) aborting when stadnins are matched by "match",
- # because automated committing may specify them directly
- #
- if getattr(self, "_isrebasing", False) or \
- getattr(self, "_istransplanting", False):
- result = orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
-
- if result:
- lfdirstate = lfutil.openlfdirstate(ui, self)
- for f in self[result].files():
- if lfutil.isstandin(f):
- lfile = lfutil.splitstandin(f)
- lfutil.synclfdirstate(self, lfdirstate, lfile,
- False)
- lfdirstate.write()
-
- return result
- # Case 1: user calls commit with no specific files or
- # include/exclude patterns: refresh and commit all files that
- # are "dirty".
- if ((match is None) or
- (not match.anypats() and not match.files())):
- # Spend a bit of time here to get a list of files we know
- # are modified so we can compare only against those.
- # It can cost a lot of time (several seconds)
- # otherwise to update all standins if the largefiles are
- # large.
- lfdirstate = lfutil.openlfdirstate(ui, self)
- dirtymatch = match_.always(self.root, self.getcwd())
- unsure, s = lfdirstate.status(dirtymatch, [], False, False,
- False)
- modifiedfiles = unsure + s.modified + s.added + s.removed
- lfiles = lfutil.listlfiles(self)
- # this only loops through largefiles that exist (not
- # removed/renamed)
- for lfile in lfiles:
- if lfile in modifiedfiles:
- if os.path.exists(
- self.wjoin(lfutil.standin(lfile))):
- # this handles the case where a rebase is being
- # performed and the working copy is not updated
- # yet.
- if os.path.exists(self.wjoin(lfile)):
- lfutil.updatestandin(self,
- lfutil.standin(lfile))
- lfdirstate.normal(lfile)
-
- result = orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
-
- if result is not None:
- for lfile in lfdirstate:
- if lfile in modifiedfiles:
- if (not os.path.exists(self.wjoin(
- lfutil.standin(lfile)))) or \
- (not os.path.exists(self.wjoin(lfile))):
- lfdirstate.drop(lfile)
-
- # This needs to be after commit; otherwise precommit hooks
- # get the wrong status
- lfdirstate.write()
- return result
-
- lfiles = lfutil.listlfiles(self)
- match._files = self._subdirlfs(match.files(), lfiles)
-
- # Case 2: user calls commit with specified patterns: refresh
- # any matching big files.
- smatcher = lfutil.composestandinmatcher(self, match)
- standins = self.dirstate.walk(smatcher, [], False, False)
-
- # No matching big files: get out of the way and pass control to
- # the usual commit() method.
- if not standins:
- return orig(text=text, user=user, date=date, match=match,
- force=force, editor=editor, extra=extra)
-
- # Refresh all matching big files. It's possible that the
- # commit will end up failing, in which case the big files will
- # stay refreshed. No harm done: the user modified them and
- # asked to commit them, so sooner or later we're going to
- # refresh the standins. Might as well leave them refreshed.
- lfdirstate = lfutil.openlfdirstate(ui, self)
- for standin in standins:
- lfile = lfutil.splitstandin(standin)
- if lfdirstate[lfile] != 'r':
- lfutil.updatestandin(self, standin)
- lfdirstate.normal(lfile)
- else:
- lfdirstate.drop(lfile)
-
- # Cook up a new matcher that only matches regular files or
- # standins corresponding to the big files requested by the
- # user. Have to modify _files to prevent commit() from
- # complaining "not tracked" for big files.
- match = copy.copy(match)
- origmatchfn = match.matchfn
-
- # Check both the list of largefiles and the list of
- # standins because if a largefile was removed, it
- # won't be in the list of largefiles at this point
- match._files += sorted(standins)
-
- actualfiles = []
- for f in match._files:
- fstandin = lfutil.standin(f)
-
- # ignore known largefiles and standins
- if f in lfiles or fstandin in standins:
- continue
-
- actualfiles.append(f)
- match._files = actualfiles
-
- def matchfn(f):
- if origmatchfn(f):
- return f not in lfiles
- else:
- return f in standins
-
- match.matchfn = matchfn
+ lfcommithook = self._lfcommithooks[-1]
+ match = lfcommithook(self, match)
result = orig(text=text, user=user, date=date, match=match,
force=force, editor=editor, extra=extra)
- # This needs to be after commit; otherwise precommit hooks
- # get the wrong status
- lfdirstate.write()
return result
finally:
wlock.release()
@@ -407,6 +280,8 @@
return super(lfilesrepo, self).push(remote, force=force, revs=revs,
newbranch=newbranch)
+ # TODO: _subdirlfs should be moved into "lfutil.py", because
+ # it is referred only from "lfutil.updatestandinsbymatch"
def _subdirlfs(self, files, lfiles):
'''
Adjust matched file list
@@ -463,6 +338,15 @@
repo.__class__ = lfilesrepo
+ # stack of hooks being executed before committing.
+ # only last element ("_lfcommithooks[-1]") is used for each committing.
+ repo._lfcommithooks = [lfutil.updatestandinsbymatch]
+
+ # Stack of status writer functions taking "*msg, **opts" arguments
+ # like "ui.status()". Only last element ("_lfupdatereporters[-1]")
+ # is used to write status out.
+ repo._lfstatuswriters = [ui.status]
+
def prepushoutgoinghook(local, remote, outgoing):
if outgoing.missing:
toupload = set()
--- a/hgext/largefiles/uisetup.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/largefiles/uisetup.py Tue Dec 09 13:32:19 2014 -0600
@@ -160,22 +160,14 @@
# override some extensions' stuff as well
for name, module in extensions.extensions():
- if name == 'fetch':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
- overrides.overridefetch)
if name == 'purge':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
overrides.overridepurge)
if name == 'rebase':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
overrides.overriderebase)
+ extensions.wrapfunction(module, 'rebase',
+ overrides.overriderebase)
if name == 'transplant':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
overrides.overridetransplant)
- if name == 'convert':
- convcmd = getattr(module, 'convcmd')
- hgsink = getattr(convcmd, 'mercurial_sink')
- extensions.wrapfunction(hgsink, 'before',
- overrides.mercurialsinkbefore)
- extensions.wrapfunction(hgsink, 'after',
- overrides.mercurialsinkafter)
--- a/hgext/mq.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/mq.py Tue Dec 09 13:32:19 2014 -0600
@@ -114,6 +114,12 @@
'# Node ID ',
'# Parent ', # can occur twice for merges - but that is not relevant for mq
]
+# The order of headers in plain 'mail style' patches:
+PLAINHEADERS = {
+ 'from': 0,
+ 'date': 1,
+ 'subject': 2,
+ }
def inserthgheader(lines, header, value):
"""Assuming lines contains a HG patch header, add a header line with value.
@@ -156,9 +162,40 @@
return lines
def insertplainheader(lines, header, value):
- if lines and lines[0] and ':' not in lines[0]:
- lines.insert(0, '')
- lines.insert(0, '%s: %s' % (header, value))
+ """For lines containing a plain patch header, add a header line with value.
+ >>> insertplainheader([], 'Date', 'z')
+ ['Date: z']
+ >>> insertplainheader([''], 'Date', 'z')
+ ['Date: z', '']
+ >>> insertplainheader(['x'], 'Date', 'z')
+ ['Date: z', '', 'x']
+ >>> insertplainheader(['From: y', 'x'], 'Date', 'z')
+ ['From: y', 'Date: z', '', 'x']
+ >>> insertplainheader([' date : x', ' from : y', ''], 'From', 'z')
+ [' date : x', 'From: z', '']
+ >>> insertplainheader(['', 'Date: y'], 'Date', 'z')
+ ['Date: z', '', 'Date: y']
+ >>> insertplainheader(['foo: bar', 'DATE: z', 'x'], 'From', 'y')
+ ['From: y', 'foo: bar', 'DATE: z', '', 'x']
+ """
+ newprio = PLAINHEADERS[header.lower()]
+ bestpos = len(lines)
+ for i, line in enumerate(lines):
+ if ':' in line:
+ lheader = line.split(':', 1)[0].strip().lower()
+ lprio = PLAINHEADERS.get(lheader, newprio + 1)
+ if lprio == newprio:
+ lines[i] = '%s: %s' % (header, value)
+ return lines
+ if lprio > newprio and i < bestpos:
+ bestpos = i
+ else:
+ if line:
+ lines.insert(i, '')
+ if i < bestpos:
+ bestpos = i
+ break
+ lines.insert(bestpos, '%s: %s' % (header, value))
return lines
class patchheader(object):
@@ -266,38 +303,34 @@
for c in self.comments))
def setuser(self, user):
- if not self.updateheader(['From: ', '# User '], user):
- try:
- inserthgheader(self.comments, '# User ', user)
- except ValueError:
- if self.plainmode:
- insertplainheader(self.comments, 'From', user)
- else:
- tmp = ['# HG changeset patch', '# User ' + user]
- self.comments = tmp + self.comments
+ try:
+ inserthgheader(self.comments, '# User ', user)
+ except ValueError:
+ if self.plainmode:
+ insertplainheader(self.comments, 'From', user)
+ else:
+ tmp = ['# HG changeset patch', '# User ' + user]
+ self.comments = tmp + self.comments
self.user = user
def setdate(self, date):
- if not self.updateheader(['Date: ', '# Date '], date):
- try:
- inserthgheader(self.comments, '# Date ', date)
- except ValueError:
- if self.plainmode:
- insertplainheader(self.comments, 'Date', date)
- else:
- tmp = ['# HG changeset patch', '# Date ' + date]
- self.comments = tmp + self.comments
+ try:
+ inserthgheader(self.comments, '# Date ', date)
+ except ValueError:
+ if self.plainmode:
+ insertplainheader(self.comments, 'Date', date)
+ else:
+ tmp = ['# HG changeset patch', '# Date ' + date]
+ self.comments = tmp + self.comments
self.date = date
def setparent(self, parent):
- if not (self.updateheader(['# Parent '], parent) or
- self.updateheader(['# Parent '], parent)):
- try:
- inserthgheader(self.comments, '# Parent ', parent)
- except ValueError:
- if not self.plainmode:
- tmp = ['# HG changeset patch', '# Parent ' + parent]
- self.comments = tmp + self.comments
+ try:
+ inserthgheader(self.comments, '# Parent ', parent)
+ except ValueError:
+ if not self.plainmode:
+ tmp = ['# HG changeset patch', '# Parent ' + parent]
+ self.comments = tmp + self.comments
self.parent = parent
def setmessage(self, message):
@@ -309,18 +342,6 @@
self.comments.append('')
self.comments.append(message)
- def updateheader(self, prefixes, new):
- '''Update all references to a field in the patch header.
- Return whether the field is present.'''
- res = False
- for prefix in prefixes:
- for i in xrange(len(self.comments)):
- if self.comments[i].startswith(prefix):
- self.comments[i] = prefix + new
- res = True
- break
- return res
-
def __str__(self):
s = '\n'.join(self.comments).rstrip()
if not s:
--- a/hgext/notify.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/notify.py Tue Dec 09 13:32:19 2014 -0600
@@ -341,7 +341,8 @@
maxdiff = int(self.ui.config('notify', 'maxdiff', 300))
prev = ctx.p1().node()
ref = ref and ref.node() or ctx.node()
- chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui))
+ chunks = patch.diff(self.repo, prev, ref,
+ opts=patch.diffallopts(self.ui))
difflines = ''.join(chunks).splitlines()
if self.ui.configbool('notify', 'diffstat', True):
--- a/hgext/patchbomb.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/patchbomb.py Tue Dec 09 13:32:19 2014 -0600
@@ -43,6 +43,18 @@
that the patchbomb extension can automatically send patchbombs
directly from the commandline. See the [email] and [smtp] sections in
hgrc(5) for details.
+
+You can control the default inclusion of an introduction message with the
+``patchbomb.intro`` configuration option. The configuration is always
+overwritten by command line flags like --intro and --desc::
+
+ [patchbomb]
+ intro=auto # include introduction message if more than 1 patch (default)
+ intro=never # never include an introduction message
+ intro=always # always include an introduction message
+
+You can set patchbomb to always ask for confirmation by setting
+``patchbomb.confirm`` to true.
'''
import os, errno, socket, tempfile, cStringIO
@@ -66,9 +78,23 @@
prompt += ' [%s]' % default
return ui.prompt(prompt + rest, default)
-def introwanted(opts, number):
+def introwanted(ui, opts, number):
'''is an introductory message apparently wanted?'''
- return number > 1 or opts.get('intro') or opts.get('desc')
+ introconfig = ui.config('patchbomb', 'intro', 'auto')
+ if opts.get('intro') or opts.get('desc'):
+ intro = True
+ elif introconfig == 'always':
+ intro = True
+ elif introconfig == 'never':
+ intro = False
+ elif introconfig == 'auto':
+ intro = 1 < number
+ else:
+ ui.write_err(_('warning: invalid patchbomb.intro value "%s"\n')
+ % introconfig)
+ ui.write_err(_('(should be one of always, never, auto)\n'))
+ intro = 1 < number
+ return intro
def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
patchname=None):
@@ -153,6 +179,175 @@
msg['X-Mercurial-Series-Total'] = '%i' % total
return msg, subj, ds
+def _getpatches(repo, revs, **opts):
+ """return a list of patches for a list of revisions
+
+ Each patch in the list is itself a list of lines.
+ """
+ ui = repo.ui
+ prev = repo['.'].rev()
+ for r in scmutil.revrange(repo, revs):
+ if r == prev and (repo[None].files() or repo[None].deleted()):
+ ui.warn(_('warning: working directory has '
+ 'uncommitted changes\n'))
+ output = cStringIO.StringIO()
+ cmdutil.export(repo, [r], fp=output,
+ opts=patch.difffeatureopts(ui, opts, git=True))
+ yield output.getvalue().split('\n')
+def _getbundle(repo, dest, **opts):
+ """return a bundle containing changesets missing in "dest"
+
+ The `opts` keyword-arguments are the same as the one accepted by the
+ `bundle` command.
+
+ The bundle is a returned as a single in-memory binary blob.
+ """
+ ui = repo.ui
+ tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
+ tmpfn = os.path.join(tmpdir, 'bundle')
+ try:
+ commands.bundle(ui, repo, tmpfn, dest, **opts)
+ fp = open(tmpfn, 'rb')
+ data = fp.read()
+ fp.close()
+ return data
+ finally:
+ try:
+ os.unlink(tmpfn)
+ except OSError:
+ pass
+ os.rmdir(tmpdir)
+
+def _getdescription(repo, defaultbody, sender, **opts):
+ """obtain the body of the introduction message and return it
+
+ This is also used for the body of email with an attached bundle.
+
+ The body can be obtained either from the command line option or entered by
+ the user through the editor.
+ """
+ ui = repo.ui
+ if opts.get('desc'):
+ body = open(opts.get('desc')).read()
+ else:
+ ui.write(_('\nWrite the introductory message for the '
+ 'patch series.\n\n'))
+ body = ui.edit(defaultbody, sender)
+ # Save series description in case sendmail fails
+ msgfile = repo.opener('last-email.txt', 'wb')
+ msgfile.write(body)
+ msgfile.close()
+ return body
+
+def _getbundlemsgs(repo, sender, bundle, **opts):
+ """Get the full email for sending a given bundle
+
+ This function returns a list of "email" tuples (subject, content, None).
+ The list is always one message long in that case.
+ """
+ ui = repo.ui
+ _charsets = mail._charsets(ui)
+ subj = (opts.get('subject')
+ or prompt(ui, 'Subject:', 'A bundle for your repository'))
+
+ body = _getdescription(repo, '', sender, **opts)
+ msg = email.MIMEMultipart.MIMEMultipart()
+ if body:
+ msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
+ datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
+ datapart.set_payload(bundle)
+ bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
+ datapart.add_header('Content-Disposition', 'attachment',
+ filename=bundlename)
+ email.Encoders.encode_base64(datapart)
+ msg.attach(datapart)
+ msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
+ return [(msg, subj, None)]
+
+def _makeintro(repo, sender, patches, **opts):
+ """make an introduction email, asking the user for content if needed
+
+ email is returned as (subject, body, cumulative-diffstat)"""
+ ui = repo.ui
+ _charsets = mail._charsets(ui)
+ tlen = len(str(len(patches)))
+
+ flag = opts.get('flag') or ''
+ if flag:
+ flag = ' ' + ' '.join(flag)
+ prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag)
+
+ subj = (opts.get('subject') or
+ prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
+ if not subj:
+ return None # skip intro if the user doesn't bother
+
+ subj = prefix + ' ' + subj
+
+ body = ''
+ if opts.get('diffstat'):
+ # generate a cumulative diffstat of the whole patch series
+ diffstat = patch.diffstat(sum(patches, []))
+ body = '\n' + diffstat
+ else:
+ diffstat = None
+
+ body = _getdescription(repo, body, sender, **opts)
+ msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
+ msg['Subject'] = mail.headencode(ui, subj, _charsets,
+ opts.get('test'))
+ return (msg, subj, diffstat)
+
+def _getpatchmsgs(repo, sender, patches, patchnames=None, **opts):
+ """return a list of emails from a list of patches
+
+ This involves introduction message creation if necessary.
+
+ This function returns a list of "email" tuples (subject, content, None).
+ """
+ ui = repo.ui
+ _charsets = mail._charsets(ui)
+ msgs = []
+
+ ui.write(_('this patch series consists of %d patches.\n\n')
+ % len(patches))
+
+ # build the intro message, or skip it if the user declines
+ if introwanted(ui, opts, len(patches)):
+ msg = _makeintro(repo, sender, patches, **opts)
+ if msg:
+ msgs.append(msg)
+
+ # are we going to send more than one message?
+ numbered = len(msgs) + len(patches) > 1
+
+ # now generate the actual patch messages
+ name = None
+ for i, p in enumerate(patches):
+ if patchnames:
+ name = patchnames[i]
+ msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
+ len(patches), numbered, name)
+ msgs.append(msg)
+
+ return msgs
+
+def _getoutgoing(repo, dest, revs):
+ '''Return the revisions present locally but not in dest'''
+ ui = repo.ui
+ url = ui.expandpath(dest or 'default-push', dest or 'default')
+ url = hg.parseurl(url)[0]
+ ui.status(_('comparing with %s\n') % util.hidepassword(url))
+
+ revs = [r for r in scmutil.revrange(repo, revs) if r >= 0]
+ if not revs:
+ revs = [len(repo) - 1]
+ revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
+ if not revs:
+ ui.status(_("no changes found\n"))
+ return []
+ return [str(r) for r in revs]
+
emailopts = [
('', 'body', None, _('send patches as inline message text (default)')),
('a', 'attach', None, _('send patches as attachments')),
@@ -240,6 +435,9 @@
In case email sending fails, you will find a backup of your series
introductory message in ``.hg/last-email.txt``.
+ The default behavior of this command can be customized through
+ configuration. (See :hg:`help patchbomb` for details)
+
Examples::
hg email -r 3000 # send patch 3000 only
@@ -277,48 +475,6 @@
# internal option used by pbranches
patches = opts.get('patches')
- def getoutgoing(dest, revs):
- '''Return the revisions present locally but not in dest'''
- url = ui.expandpath(dest or 'default-push', dest or 'default')
- url = hg.parseurl(url)[0]
- ui.status(_('comparing with %s\n') % util.hidepassword(url))
-
- revs = [r for r in scmutil.revrange(repo, revs) if r >= 0]
- if not revs:
- revs = [len(repo) - 1]
- revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
- if not revs:
- ui.status(_("no changes found\n"))
- return []
- return [str(r) for r in revs]
-
- def getpatches(revs):
- prev = repo['.'].rev()
- for r in scmutil.revrange(repo, revs):
- if r == prev and (repo[None].files() or repo[None].deleted()):
- ui.warn(_('warning: working directory has '
- 'uncommitted changes\n'))
- output = cStringIO.StringIO()
- cmdutil.export(repo, [r], fp=output,
- opts=patch.diffopts(ui, opts))
- yield output.getvalue().split('\n')
-
- def getbundle(dest):
- tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
- tmpfn = os.path.join(tmpdir, 'bundle')
- try:
- commands.bundle(ui, repo, tmpfn, dest, **opts)
- fp = open(tmpfn, 'rb')
- data = fp.read()
- fp.close()
- return data
- finally:
- try:
- os.unlink(tmpfn)
- except OSError:
- pass
- os.rmdir(tmpdir)
-
if not (opts.get('test') or mbox):
# really sending
mail.validateconfig(ui)
@@ -342,7 +498,7 @@
revs = rev
if outgoing:
- revs = getoutgoing(dest, rev)
+ revs = _getoutgoing(repo, dest, rev)
if bundle:
opts['revs'] = revs
@@ -355,102 +511,21 @@
def genmsgid(id):
return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
- def getdescription(body, sender):
- if opts.get('desc'):
- body = open(opts.get('desc')).read()
- else:
- ui.write(_('\nWrite the introductory message for the '
- 'patch series.\n\n'))
- body = ui.edit(body, sender)
- # Save series description in case sendmail fails
- msgfile = repo.opener('last-email.txt', 'wb')
- msgfile.write(body)
- msgfile.close()
- return body
-
- def getpatchmsgs(patches, patchnames=None):
- msgs = []
-
- ui.write(_('this patch series consists of %d patches.\n\n')
- % len(patches))
-
- # build the intro message, or skip it if the user declines
- if introwanted(opts, len(patches)):
- msg = makeintro(patches)
- if msg:
- msgs.append(msg)
-
- # are we going to send more than one message?
- numbered = len(msgs) + len(patches) > 1
-
- # now generate the actual patch messages
- name = None
- for i, p in enumerate(patches):
- if patchnames:
- name = patchnames[i]
- msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
- len(patches), numbered, name)
- msgs.append(msg)
-
- return msgs
-
- def makeintro(patches):
- tlen = len(str(len(patches)))
-
- flag = opts.get('flag') or ''
- if flag:
- flag = ' ' + ' '.join(flag)
- prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag)
-
- subj = (opts.get('subject') or
- prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
- if not subj:
- return None # skip intro if the user doesn't bother
-
- subj = prefix + ' ' + subj
-
- body = ''
- if opts.get('diffstat'):
- # generate a cumulative diffstat of the whole patch series
- diffstat = patch.diffstat(sum(patches, []))
- body = '\n' + diffstat
- else:
- diffstat = None
-
- body = getdescription(body, sender)
- msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
- msg['Subject'] = mail.headencode(ui, subj, _charsets,
- opts.get('test'))
- return (msg, subj, diffstat)
-
- def getbundlemsgs(bundle):
- subj = (opts.get('subject')
- or prompt(ui, 'Subject:', 'A bundle for your repository'))
-
- body = getdescription('', sender)
- msg = email.MIMEMultipart.MIMEMultipart()
- if body:
- msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
- datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
- datapart.set_payload(bundle)
- bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
- datapart.add_header('Content-Disposition', 'attachment',
- filename=bundlename)
- email.Encoders.encode_base64(datapart)
- msg.attach(datapart)
- msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
- return [(msg, subj, None)]
-
sender = (opts.get('from') or ui.config('email', 'from') or
ui.config('patchbomb', 'from') or
prompt(ui, 'From', ui.username()))
if patches:
- msgs = getpatchmsgs(patches, opts.get('patchnames'))
+ msgs = _getpatchmsgs(repo, sender, patches, opts.get('patchnames'),
+ **opts)
elif bundle:
- msgs = getbundlemsgs(getbundle(dest))
+ bundledata = _getbundle(repo, dest, **opts)
+ bundleopts = opts.copy()
+ bundleopts.pop('bundle', None) # already processed
+ msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
else:
- msgs = getpatchmsgs(list(getpatches(revs)))
+ _patches = list(_getpatches(repo, revs, **opts))
+ msgs = _getpatchmsgs(repo, sender, _patches, **opts)
showaddrs = []
@@ -482,15 +557,18 @@
bcc = getaddrs('Bcc') or []
replyto = getaddrs('Reply-To')
- if opts.get('diffstat') or opts.get('confirm'):
- ui.write(_('\nFinal summary:\n\n'))
- ui.write(('From: %s\n' % sender))
+ confirm = ui.configbool('patchbomb', 'confirm')
+ confirm |= bool(opts.get('diffstat') or opts.get('confirm'))
+
+ if confirm:
+ ui.write(_('\nFinal summary:\n\n'), label='patchbomb.finalsummary')
+ ui.write(('From: %s\n' % sender), label='patchbomb.from')
for addr in showaddrs:
- ui.write('%s\n' % addr)
+ ui.write('%s\n' % addr, label='patchbomb.to')
for m, subj, ds in msgs:
- ui.write(('Subject: %s\n' % subj))
+ ui.write(('Subject: %s\n' % subj), label='patchbomb.subject')
if ds:
- ui.write(ds)
+ ui.write(ds, label='patchbomb.diffstats')
ui.write('\n')
if ui.promptchoice(_('are you sure you want to send (yn)?'
'$$ &Yes $$ &No')):
--- a/hgext/rebase.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/rebase.py Tue Dec 09 13:32:19 2014 -0600
@@ -18,11 +18,12 @@
from mercurial import extensions, patch, scmutil, phases, obsolete, error
from mercurial import copies
from mercurial.commands import templateopts
-from mercurial.node import nullrev
+from mercurial.node import nullrev, nullid, hex
from mercurial.lock import release
from mercurial.i18n import _
import os, errno
+revtodo = -1
nullmerge = -2
revignored = -3
@@ -282,7 +283,7 @@
if not rebaseset:
# transform to list because smartsets are not comparable to
- # lists. This should be improved to honor lazyness of
+ # lists. This should be improved to honor laziness of
# smartset.
if list(base) == [dest.rev()]:
if basef:
@@ -367,11 +368,11 @@
pos = 0
for rev in sortedstate:
pos += 1
- if state[rev] == -1:
+ if state[rev] == revtodo:
ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])),
_('changesets'), total)
- p1, p2 = defineparents(repo, rev, target, state,
- targetancestors)
+ p1, p2, base = defineparents(repo, rev, target, state,
+ targetancestors)
storestatus(repo, originalwd, target, state, collapsef, keepf,
keepbranchesf, external, activebookmark)
if len(repo.parents()) == 2:
@@ -380,8 +381,8 @@
try:
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
'rebase')
- stats = rebasenode(repo, rev, p1, state, collapsef,
- target)
+ stats = rebasenode(repo, rev, p1, base, state,
+ collapsef, target)
if stats and stats[3] > 0:
raise error.InterventionRequired(
_('unresolved conflicts (see hg '
@@ -389,20 +390,20 @@
finally:
ui.setconfig('ui', 'forcemerge', '', 'rebase')
if not collapsef:
- merging = repo[p2].rev() != nullrev
+ merging = p2 != nullrev
editform = cmdutil.mergeeditform(merging, 'rebase')
editor = cmdutil.getcommiteditor(editform=editform, **opts)
- newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn,
- editor=editor)
+ newnode = concludenode(repo, rev, p1, p2, extrafn=extrafn,
+ editor=editor)
else:
# Skip commit if we are collapsing
repo.dirstate.beginparentchange()
repo.setparents(repo[p1].node())
repo.dirstate.endparentchange()
- newrev = None
+ newnode = None
# Update the state
- if newrev is not None:
- state[rev] = repo[newrev].rev()
+ if newnode is not None:
+ state[rev] = repo[newnode].rev()
else:
if not collapsef:
ui.note(_('no changes, revision %d skipped\n') % rev)
@@ -414,8 +415,8 @@
ui.note(_('rebase merging completed\n'))
if collapsef and not keepopen:
- p1, p2 = defineparents(repo, min(state), target,
- state, targetancestors)
+ p1, p2, _base = defineparents(repo, min(state), target,
+ state, targetancestors)
editopt = opts.get('edit')
editform = 'rebase.collapse'
if collapsemsg:
@@ -427,8 +428,12 @@
commitmsg += '\n* %s' % repo[rebased].description()
editopt = True
editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
- newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
- extrafn=extrafn, editor=editor)
+ newnode = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
+ extrafn=extrafn, editor=editor)
+ if newnode is None:
+ newrev = target
+ else:
+ newrev = repo[newnode].rev()
for oldrev in state.iterkeys():
if state[oldrev] > nullmerge:
state[oldrev] = newrev
@@ -459,7 +464,7 @@
if not keepf:
collapsedas = None
if collapsef:
- collapsedas = newrev
+ collapsedas = newnode
clearrebased(ui, repo, state, skipped, collapsedas)
if currentbookmarks:
@@ -505,7 +510,9 @@
', '.join(str(p) for p in sorted(parents))))
def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None):
- 'Commit the changes and store useful information in extra'
+ '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
+ but also store useful information in extra.
+ Return node of committed revision.'''
try:
repo.dirstate.beginparentchange()
repo.setparents(repo[p1].node(), repo[p2].node())
@@ -522,73 +529,31 @@
targetphase = max(ctx.phase(), phases.draft)
repo.ui.setconfig('phases', 'new-commit', targetphase, 'rebase')
# Commit might fail if unresolved files exist
- newrev = repo.commit(text=commitmsg, user=ctx.user(),
- date=ctx.date(), extra=extra, editor=editor)
+ newnode = repo.commit(text=commitmsg, user=ctx.user(),
+ date=ctx.date(), extra=extra, editor=editor)
finally:
repo.ui.restoreconfig(backup)
- repo.dirstate.setbranch(repo[newrev].branch())
- return newrev
+ repo.dirstate.setbranch(repo[newnode].branch())
+ return newnode
except util.Abort:
# Invalidate the previous setparents
repo.dirstate.invalidate()
raise
-def rebasenode(repo, rev, p1, state, collapse, target):
- 'Rebase a single revision'
+def rebasenode(repo, rev, p1, base, state, collapse, target):
+ 'Rebase a single revision rev on top of p1 using base as merge ancestor'
# Merge phase
# Update to target and merge it with local
- if repo['.'].rev() != repo[p1].rev():
- repo.ui.debug(" update to %d:%s\n" % (repo[p1].rev(), repo[p1]))
+ if repo['.'].rev() != p1:
+ repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
merge.update(repo, p1, False, True, False)
else:
repo.ui.debug(" already in target\n")
repo.dirstate.write()
- repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev]))
- if repo[rev].rev() == repo[min(state)].rev():
- # Case (1) initial changeset of a non-detaching rebase.
- # Let the merge mechanism find the base itself.
- base = None
- elif not repo[rev].p2():
- # Case (2) detaching the node with a single parent, use this parent
- base = repo[rev].p1().node()
- else:
- # In case of merge, we need to pick the right parent as merge base.
- #
- # Imagine we have:
- # - M: currently rebase revision in this step
- # - A: one parent of M
- # - B: second parent of M
- # - D: destination of this merge step (p1 var)
- #
- # If we are rebasing on D, D is the successors of A or B. The right
- # merge base is the one D succeed to. We pretend it is B for the rest
- # of this comment
- #
- # If we pick B as the base, the merge involves:
- # - changes from B to M (actual changeset payload)
- # - changes from B to D (induced by rebase) as D is a rebased
- # version of B)
- # Which exactly represent the rebase operation.
- #
- # If we pick the A as the base, the merge involves
- # - changes from A to M (actual changeset payload)
- # - changes from A to D (with include changes between unrelated A and B
- # plus changes induced by rebase)
- # Which does not represent anything sensible and creates a lot of
- # conflicts.
- for p in repo[rev].parents():
- if state.get(p.rev()) == repo[p1].rev():
- base = p.node()
- break
- else: # fallback when base not found
- base = None
-
- # Raise because this function is called wrong (see issue 4106)
- raise AssertionError('no base found to rebase on '
- '(rebasenode called wrong)')
+ repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
if base is not None:
- repo.ui.debug(" detach base %d:%s\n" % (repo[base].rev(), repo[base]))
+ repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
# When collapsing in-place, the parent is the common ancestor, we
# have to allow merging with it.
stats = merge.update(repo, rev, True, True, False, base, collapse,
@@ -655,7 +620,50 @@
p2 = p2n
repo.ui.debug(" future parents are %d and %d\n" %
(repo[p1].rev(), repo[p2].rev()))
- return p1, p2
+
+ if rev == min(state):
+ # Case (1) initial changeset of a non-detaching rebase.
+ # Let the merge mechanism find the base itself.
+ base = None
+ elif not repo[rev].p2():
+ # Case (2) detaching the node with a single parent, use this parent
+ base = repo[rev].p1().rev()
+ else:
+ # In case of merge, we need to pick the right parent as merge base.
+ #
+ # Imagine we have:
+ # - M: currently rebase revision in this step
+ # - A: one parent of M
+ # - B: second parent of M
+ # - D: destination of this merge step (p1 var)
+ #
+ # If we are rebasing on D, D is the successors of A or B. The right
+ # merge base is the one D succeed to. We pretend it is B for the rest
+ # of this comment
+ #
+ # If we pick B as the base, the merge involves:
+ # - changes from B to M (actual changeset payload)
+ # - changes from B to D (induced by rebase) as D is a rebased
+ # version of B)
+ # Which exactly represent the rebase operation.
+ #
+ # If we pick the A as the base, the merge involves
+ # - changes from A to M (actual changeset payload)
+ # - changes from A to D (with include changes between unrelated A and B
+ # plus changes induced by rebase)
+ # Which does not represent anything sensible and creates a lot of
+ # conflicts.
+ for p in repo[rev].parents():
+ if state.get(p.rev()) == p1:
+ base = p.rev()
+ break
+ else: # fallback when base not found
+ base = None
+
+ # Raise because this function is called wrong (see issue 4106)
+ raise AssertionError('no base found to rebase on '
+ '(defineparents called wrong)')
+ return p1, p2, base
def isagitpatch(repo, patchname):
'Return true if the given patch is in git format'
@@ -729,8 +737,12 @@
f.write('%s\n' % (activebookmark or ''))
for d, v in state.iteritems():
oldrev = repo[d].hex()
- if v > nullmerge:
+ if v >= 0:
newrev = repo[v].hex()
+ elif v == revtodo:
+ # To maintain format compatibility, we have to use nullid.
+ # Please do remove this special case when upgrading the format.
+ newrev = hex(nullid)
else:
newrev = v
f.write("%s:%s\n" % (oldrev, newrev))
@@ -772,6 +784,9 @@
oldrev, newrev = l.split(':')
if newrev in (str(nullmerge), str(revignored)):
state[repo[oldrev].rev()] = int(newrev)
+ elif newrev == nullid:
+ state[repo[oldrev].rev()] = revtodo
+ # Legacy compat special case
else:
state[repo[oldrev].rev()] = repo[newrev].rev()
@@ -783,7 +798,7 @@
if not collapse:
seen = set([target])
for old, new in sorted(state.items()):
- if new != nullrev and new in seen:
+ if new != revtodo and new in seen:
skipped.add(old)
seen.add(new)
repo.ui.debug('computed skipped revs: %s\n' %
@@ -810,7 +825,7 @@
def abort(repo, originalwd, target, state):
'Restore the repository to its original state'
- dstates = [s for s in state.values() if s > nullrev]
+ dstates = [s for s in state.values() if s >= 0]
immutable = [d for d in dstates if not repo[d].mutable()]
cleanup = True
if immutable:
@@ -830,10 +845,10 @@
if cleanup:
# Update away from the rebase if necessary
if inrebase(repo, originalwd, state):
- merge.update(repo, repo[originalwd].rev(), False, True, False)
+ merge.update(repo, originalwd, False, True, False)
# Strip from the first rebased revision
- rebased = filter(lambda x: x > -1 and x != target, state.values())
+ rebased = filter(lambda x: x >= 0 and x != target, state.values())
if rebased:
strippoints = [c.node() for c in repo.set('roots(%ld)', rebased)]
# no backup of rebased cset versions needed
@@ -875,7 +890,7 @@
return None
repo.ui.debug('rebase onto %d starting from %s\n' % (dest, root))
- state.update(dict.fromkeys(rebaseset, nullrev))
+ state.update(dict.fromkeys(rebaseset, revtodo))
# Rebase tries to turn <dest> into a parent of <root> while
# preserving the number of parents of rebased changesets:
#
@@ -1012,7 +1027,7 @@
msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
ui.write(msg)
return
- numrebased = len([i for i in state.itervalues() if i != -1])
+ numrebased = len([i for i in state.itervalues() if i >= 0])
# i18n: column positioning for "hg summary"
ui.write(_('rebase: %s, %s (rebase --continue)\n') %
(ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
--- a/hgext/record.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/record.py Tue Dec 09 13:32:19 2014 -0600
@@ -328,10 +328,9 @@
f.close()
# Start the editor and wait for it to complete
editor = ui.geteditor()
- util.system("%s \"%s\"" % (editor, patchfn),
- environ={'HGUSER': ui.username()},
- onerr=util.Abort, errprefix=_("edit failed"),
- out=ui.fout)
+ ui.system("%s \"%s\"" % (editor, patchfn),
+ environ={'HGUSER': ui.username()},
+ onerr=util.Abort, errprefix=_("edit failed"))
# Remove comment lines
patchfp = open(patchfn)
ncpatchfp = cStringIO.StringIO()
@@ -520,10 +519,9 @@
'(use "hg commit" instead)'))
status = repo.status(match=match)
- diffopts = opts.copy()
- diffopts['nodates'] = True
- diffopts['git'] = True
- diffopts = patch.diffopts(ui, opts=diffopts)
+ diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
+ diffopts.nodates = True
+ diffopts.git = True
chunks = patch.diff(repo, changes=status, opts=diffopts)
fp = cStringIO.StringIO()
fp.write(''.join(chunks))
--- a/hgext/strip.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/strip.py Tue Dec 09 13:32:19 2014 -0600
@@ -1,4 +1,4 @@
-"""strip changesets and their descendents from history
+"""strip changesets and their descendants from history
This extension allows you to strip changesets and all their descendants from the
repository. See the command help for details.
--- a/hgext/transplant.py Mon Dec 08 15:41:54 2014 -0800
+++ b/hgext/transplant.py Tue Dec 09 13:32:19 2014 -0600
@@ -118,7 +118,7 @@
revs = sorted(revmap)
p1, p2 = repo.dirstate.parents()
pulls = []
- diffopts = patch.diffopts(self.ui, opts)
+ diffopts = patch.difffeatureopts(self.ui, opts)
diffopts.git = True
lock = wlock = tr = None
@@ -233,13 +233,12 @@
fp.close()
try:
- util.system('%s %s %s' % (filter, util.shellquote(headerfile),
- util.shellquote(patchfile)),
- environ={'HGUSER': changelog[1],
- 'HGREVISION': revlog.hex(node),
- },
- onerr=util.Abort, errprefix=_('filter failed'),
- out=self.ui.fout)
+ self.ui.system('%s %s %s' % (filter, util.shellquote(headerfile),
+ util.shellquote(patchfile)),
+ environ={'HGUSER': changelog[1],
+ 'HGREVISION': revlog.hex(node),
+ },
+ onerr=util.Abort, errprefix=_('filter failed'))
user, date, msg = self.parselog(file(headerfile))[1:4]
finally:
os.unlink(headerfile)
--- a/i18n/polib.py Mon Dec 08 15:41:54 2014 -0800
+++ b/i18n/polib.py Tue Dec 09 13:32:19 2014 -0600
@@ -396,7 +396,7 @@
def ordered_metadata(self):
"""
Convenience method that returns an ordered version of the metadata
- dictionnary. The return value is list of tuples (metadata name,
+ dictionary. The return value is list of tuples (metadata name,
metadata_value).
"""
# copy the dict first
--- a/mercurial/ancestor.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/ancestor.py Tue Dec 09 13:32:19 2014 -0600
@@ -134,89 +134,128 @@
return gca
return deepest(gca)
-def missingancestors(revs, bases, pfunc):
- """Return all the ancestors of revs that are not ancestors of bases.
-
- This may include elements from revs.
+class incrementalmissingancestors(object):
+ '''persistent state used to calculate missing ancestors incrementally
- Equivalent to the revset (::revs - ::bases). Revs are returned in
- revision number order, which is a topological order.
+ Although similar in spirit to lazyancestors below, this is a separate class
+ because trying to support contains and missingancestors operations with the
+ same internal data structures adds needless complexity.'''
+ def __init__(self, pfunc, bases):
+ self.bases = set(bases)
+ if not self.bases:
+ self.bases.add(nullrev)
+ self.pfunc = pfunc
- revs and bases should both be iterables. pfunc must return a list of
- parent revs for a given revs.
- """
+ def hasbases(self):
+ '''whether the common set has any non-trivial bases'''
+ return self.bases and self.bases != set([nullrev])
+
+ def addbases(self, newbases):
+ '''grow the ancestor set by adding new bases'''
+ self.bases.update(newbases)
- revsvisit = set(revs)
- basesvisit = set(bases)
- if not revsvisit:
- return []
- if not basesvisit:
- basesvisit.add(nullrev)
- start = max(max(revsvisit), max(basesvisit))
- bothvisit = revsvisit.intersection(basesvisit)
- revsvisit.difference_update(bothvisit)
- basesvisit.difference_update(bothvisit)
- # At this point, we hold the invariants that:
- # - revsvisit is the set of nodes we know are an ancestor of at least one
- # of the nodes in revs
- # - basesvisit is the same for bases
- # - bothvisit is the set of nodes we know are ancestors of at least one of
- # the nodes in revs and one of the nodes in bases
- # - a node may be in none or one, but not more, of revsvisit, basesvisit
- # and bothvisit at any given time
- # Now we walk down in reverse topo order, adding parents of nodes already
- # visited to the sets while maintaining the invariants. When a node is
- # found in both revsvisit and basesvisit, it is removed from them and
- # added to bothvisit instead. When revsvisit becomes empty, there are no
- # more ancestors of revs that aren't also ancestors of bases, so exit.
+ def removeancestorsfrom(self, revs):
+ '''remove all ancestors of bases from the set revs (in place)'''
+ bases = self.bases
+ pfunc = self.pfunc
+ revs.difference_update(bases)
+ # nullrev is always an ancestor
+ revs.discard(nullrev)
+ if not revs:
+ return
+ # anything in revs > start is definitely not an ancestor of bases
+ # revs <= start needs to be investigated
+ start = max(bases)
+ keepcount = sum(1 for r in revs if r > start)
+ if len(revs) == keepcount:
+ # no revs to consider
+ return
- missing = []
- for curr in xrange(start, nullrev, -1):
+ for curr in xrange(start, min(revs) - 1, -1):
+ if curr not in bases:
+ continue
+ revs.discard(curr)
+ bases.update(pfunc(curr))
+ if len(revs) == keepcount:
+ # no more potential revs to discard
+ break
+
+ def missingancestors(self, revs):
+ '''return all the ancestors of revs that are not ancestors of self.bases
+
+ This may include elements from revs.
+
+ Equivalent to the revset (::revs - ::self.bases). Revs are returned in
+ revision number order, which is a topological order.'''
+ revsvisit = set(revs)
+ basesvisit = self.bases
+ pfunc = self.pfunc
+ bothvisit = revsvisit.intersection(basesvisit)
+ revsvisit.difference_update(bothvisit)
if not revsvisit:
- break
+ return []
- if curr in bothvisit:
- bothvisit.remove(curr)
- # curr's parents might have made it into revsvisit or basesvisit
- # through another path
- for p in pfunc(curr):
- revsvisit.discard(p)
- basesvisit.discard(p)
- bothvisit.add(p)
- continue
+ start = max(max(revsvisit), max(basesvisit))
+ # At this point, we hold the invariants that:
+ # - revsvisit is the set of nodes we know are an ancestor of at least
+ # one of the nodes in revs
+ # - basesvisit is the same for bases
+ # - bothvisit is the set of nodes we know are ancestors of at least one
+ # of the nodes in revs and one of the nodes in bases. bothvisit and
+ # revsvisit are mutually exclusive, but bothvisit is a subset of
+ # basesvisit.
+ # Now we walk down in reverse topo order, adding parents of nodes
+ # already visited to the sets while maintaining the invariants. When a
+ # node is found in both revsvisit and basesvisit, it is removed from
+ # revsvisit and added to bothvisit. When revsvisit becomes empty, there
+ # are no more ancestors of revs that aren't also ancestors of bases, so
+ # exit.
+
+ missing = []
+ for curr in xrange(start, nullrev, -1):
+ if not revsvisit:
+ break
- # curr will never be in both revsvisit and basesvisit, since if it
- # were it'd have been pushed to bothvisit
- if curr in revsvisit:
- missing.append(curr)
- thisvisit = revsvisit
- othervisit = basesvisit
- elif curr in basesvisit:
- thisvisit = basesvisit
- othervisit = revsvisit
- else:
- # not an ancestor of revs or bases: ignore
- continue
+ if curr in bothvisit:
+ bothvisit.remove(curr)
+ # curr's parents might have made it into revsvisit through
+ # another path
+ for p in pfunc(curr):
+ revsvisit.discard(p)
+ basesvisit.add(p)
+ bothvisit.add(p)
+ continue
- thisvisit.remove(curr)
- for p in pfunc(curr):
- if p == nullrev:
- pass
- elif p in othervisit or p in bothvisit:
- # p is implicitly in thisvisit. This means p is or should be
- # in bothvisit
- revsvisit.discard(p)
- basesvisit.discard(p)
- bothvisit.add(p)
+ if curr in revsvisit:
+ missing.append(curr)
+ revsvisit.remove(curr)
+ thisvisit = revsvisit
+ othervisit = basesvisit
+ elif curr in basesvisit:
+ thisvisit = basesvisit
+ othervisit = revsvisit
else:
- # visit later
- thisvisit.add(p)
+ # not an ancestor of revs or bases: ignore
+ continue
- missing.reverse()
- return missing
+ for p in pfunc(curr):
+ if p == nullrev:
+ pass
+ elif p in othervisit or p in bothvisit:
+ # p is implicitly in thisvisit. This means p is or should be
+ # in bothvisit
+ revsvisit.discard(p)
+ basesvisit.add(p)
+ bothvisit.add(p)
+ else:
+ # visit later
+ thisvisit.add(p)
+
+ missing.reverse()
+ return missing
class lazyancestors(object):
- def __init__(self, cl, revs, stoprev=0, inclusive=False):
+ def __init__(self, pfunc, revs, stoprev=0, inclusive=False):
"""Create a new object generating ancestors for the given revs. Does
not generate revs lower than stoprev.
@@ -228,7 +267,7 @@
than stoprev will not be generated.
Result does not include the null revision."""
- self._parentrevs = cl.parentrevs
+ self._parentrevs = pfunc
self._initrevs = revs
self._stoprev = stoprev
self._inclusive = inclusive
--- a/mercurial/bookmarks.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/bookmarks.py Tue Dec 09 13:32:19 2014 -0600
@@ -5,6 +5,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+import os
from mercurial.i18n import _
from mercurial.node import hex, bin
from mercurial import encoding, error, util, obsolete, lock as lockmod
@@ -29,7 +30,8 @@
dict.__init__(self)
self._repo = repo
try:
- for line in repo.vfs('bookmarks'):
+ bkfile = self.getbkfile(repo)
+ for line in bkfile:
line = line.strip()
if not line:
continue
@@ -47,12 +49,24 @@
if inst.errno != errno.ENOENT:
raise
+ def getbkfile(self, repo):
+ bkfile = None
+ if 'HG_PENDING' in os.environ:
+ try:
+ bkfile = repo.vfs('bookmarks.pending')
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ if bkfile is None:
+ bkfile = repo.vfs('bookmarks')
+ return bkfile
+
def recordchange(self, tr):
"""record that bookmarks have been changed in a transaction
The transaction is then responsible for updating the file content."""
tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
- vfs=self._repo.vfs)
+ location='plain')
tr.hookargs['bookmark_moved'] = '1'
def write(self):
@@ -65,6 +79,10 @@
can be copied back on rollback.
'''
repo = self._repo
+ self._writerepo(repo)
+
+ def _writerepo(self, repo):
+ """Factored out for extensibility"""
if repo._bookmarkcurrent not in self:
unsetcurrent(repo)
--- a/mercurial/bundle2.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/bundle2.py Tue Dec 09 13:32:19 2014 -0600
@@ -229,7 +229,7 @@
self.getreplies(inreplyto).add(category, entry)
def getreplies(self, partid):
- """get the subrecords that replies to a specific part"""
+ """get the records that are replies to a specific part"""
return self._replies.setdefault(partid, unbundlerecords())
def __getitem__(self, cat):
@@ -277,7 +277,7 @@
to be created"""
raise TransactionUnavailable()
-def processbundle(repo, unbundler, transactiongetter=_notransaction):
+def processbundle(repo, unbundler, transactiongetter=None):
"""This function process a bundle, apply effect to/from a repo
It iterates over each part then searches for and uses the proper handling
@@ -288,6 +288,8 @@
Unknown Mandatory part will abort the process.
"""
+ if transactiongetter is None:
+ transactiongetter = _notransaction
op = bundleoperation(repo, transactiongetter)
# todo:
# - replace this is a init function soon.
@@ -303,7 +305,7 @@
# consume the bundle content
part.read()
# Small hack to let caller code distinguish exceptions from bundle2
- # processing fron the ones from bundle1 processing. This is mostly
+ # processing from processing the old format. This is mostly
# needed to handle different return codes to unbundle according to the
# type of bundle. We should probably clean up or drop this return code
# craziness in a future version.
@@ -359,7 +361,7 @@
def decodecaps(blob):
- """decode a bundle2 caps bytes blob into a dictionnary
+ """decode a bundle2 caps bytes blob into a dictionary
The blob is a list of capabilities (one per line)
Capabilities may have values using a line of the form::
@@ -741,7 +743,7 @@
self.ui.debug('bundle2 stream interruption, looking for a part.\n')
headerblock = self._readpartheader()
if headerblock is None:
- self.ui.debug('no part found during iterruption.\n')
+ self.ui.debug('no part found during interruption.\n')
return
part = unbundlepart(self.ui, headerblock, self._fp)
op = interruptoperation(self.ui)
@@ -828,7 +830,7 @@
# split mandatory from advisory
mansizes = paramsizes[:mancount]
advsizes = paramsizes[mancount:]
- # retrive param value
+ # retrieve param value
manparams = []
for key, value in mansizes:
manparams.append((self._fromheader(key), self._fromheader(value)))
@@ -871,24 +873,26 @@
capabilities = {'HG2Y': (),
'b2x:listkeys': (),
'b2x:pushkey': (),
- 'b2x:changegroup': (),
'digests': tuple(sorted(util.DIGESTS.keys())),
'b2x:remote-changegroup': ('http', 'https'),
}
-def getrepocaps(repo):
+def getrepocaps(repo, allowpushback=False):
"""return the bundle2 capabilities for a given repo
Exists to allow extensions (like evolution) to mutate the capabilities.
"""
caps = capabilities.copy()
+ caps['b2x:changegroup'] = tuple(sorted(changegroup.packermap.keys()))
if obsolete.isenabled(repo, obsolete.exchangeopt):
supportedformat = tuple('V%i' % v for v in obsolete.formats)
caps['b2x:obsmarkers'] = supportedformat
+ if allowpushback:
+ caps['b2x:pushback'] = ()
return caps
def bundle2caps(remote):
- """return the bundlecapabilities of a peer as dict"""
+ """return the bundle capabilities of a peer as dict"""
raw = remote.capable('bundle2-exp')
if not raw and raw != '':
return {}
@@ -901,7 +905,7 @@
obscaps = caps.get('b2x:obsmarkers', ())
return [int(c[1:]) for c in obscaps if c.startswith('V')]
-@parthandler('b2x:changegroup')
+@parthandler('b2x:changegroup', ('version',))
def handlechangegroup(op, inpart):
"""apply a changegroup part on the repo
@@ -914,13 +918,16 @@
# we need to make sure we trigger the creation of a transaction object used
# for the whole processing scope.
op.gettransaction()
- cg = changegroup.cg1unpacker(inpart, 'UN')
+ unpackerversion = inpart.params.get('version', '01')
+ # We should raise an appropriate exception here
+ unpacker = changegroup.packermap[unpackerversion][1]
+ cg = unpacker(inpart, 'UN')
# the source and url passed here are overwritten by the one contained in
# the transaction.hookargs argument. So 'bundle2' is a placeholder
ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
op.records.add('changegroup', {'return': ret})
if op.reply is not None:
- # This is definitly not the final form of this
+ # This is definitely not the final form of this
# return. But one need to start somewhere.
part = op.reply.newpart('b2x:reply:changegroup')
part.addparam('in-reply-to', str(inpart.id), mandatory=False)
@@ -989,7 +996,7 @@
ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
op.records.add('changegroup', {'return': ret})
if op.reply is not None:
- # This is definitly not the final form of this
+ # This is definitely not the final form of this
# return. But one need to start somewhere.
part = op.reply.newpart('b2x:reply:changegroup')
part.addparam('in-reply-to', str(inpart.id), mandatory=False)
--- a/mercurial/changegroup.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/changegroup.py Tue Dec 09 13:32:19 2014 -0600
@@ -13,6 +13,7 @@
import discovery, error, phases, branchmap
_CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
+_CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
def readexactly(stream, n):
'''read n bytes from stream.read and abort if less was available'''
@@ -215,6 +216,14 @@
pos = next
yield closechunk()
+class cg2unpacker(cg1unpacker):
+ deltaheader = _CHANGEGROUPV2_DELTA_HEADER
+ deltaheadersize = struct.calcsize(deltaheader)
+
+ def _deltaheader(self, headertuple, prevnode):
+ node, p1, p2, deltabase, cs = headertuple
+ return node, p1, p2, deltabase, cs
+
class headerlessfixup(object):
def __init__(self, fh, h):
self._h = h
@@ -413,10 +422,13 @@
reorder=reorder):
yield chunk
+ def deltaparent(self, revlog, rev, p1, p2, prev):
+ return prev
+
def revchunk(self, revlog, rev, prev, linknode):
node = revlog.node(rev)
p1, p2 = revlog.parentrevs(rev)
- base = prev
+ base = self.deltaparent(revlog, rev, p1, p2, prev)
prefix = ''
if base == nullrev:
@@ -436,6 +448,30 @@
# do nothing with basenode, it is implicitly the previous one in HG10
return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
+class cg2packer(cg1packer):
+
+ deltaheader = _CHANGEGROUPV2_DELTA_HEADER
+
+ def group(self, nodelist, revlog, lookup, units=None, reorder=None):
+ if (revlog._generaldelta and reorder is not True):
+ reorder = False
+ return super(cg2packer, self).group(nodelist, revlog, lookup,
+ units=units, reorder=reorder)
+
+ def deltaparent(self, revlog, rev, p1, p2, prev):
+ dp = revlog.deltaparent(rev)
+ # avoid storing full revisions; pick prev in those cases
+ # also pick prev when we can't be sure remote has dp
+ if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
+ return prev
+ return dp
+
+ def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
+ return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
+
+packermap = {'01': (cg1packer, cg1unpacker),
+ '02': (cg2packer, cg2unpacker)}
+
def _changegroupinfo(repo, nodes, source):
if repo.ui.verbose or source == 'bundle':
repo.ui.status(_("%d changesets found\n") % len(nodes))
@@ -444,7 +480,7 @@
for node in nodes:
repo.ui.debug("%s\n" % hex(node))
-def getsubset(repo, outgoing, bundler, source, fastpath=False):
+def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
repo = repo.unfiltered()
commonrevs = outgoing.common
csets = outgoing.missing
@@ -458,7 +494,10 @@
repo.hook('preoutgoing', throw=True, source=source)
_changegroupinfo(repo, csets, source)
- gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
+ return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
+
+def getsubset(repo, outgoing, bundler, source, fastpath=False):
+ gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
def changegroupsubset(repo, roots, heads, source):
@@ -486,6 +525,17 @@
bundler = cg1packer(repo)
return getsubset(repo, outgoing, bundler, source)
+def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
+ version='01'):
+ """Like getbundle, but taking a discovery.outgoing as an argument.
+
+ This is only implemented for local repos and reuses potentially
+ precomputed sets in outgoing. Returns a raw changegroup generator."""
+ if not outgoing.missing:
+ return None
+ bundler = packermap[version][0](repo, bundlecaps)
+ return getsubsetraw(repo, outgoing, bundler, source)
+
def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
"""Like getbundle, but taking a discovery.outgoing as an argument.
@@ -515,6 +565,22 @@
heads = cl.heads()
return discovery.outgoing(cl, common, heads)
+def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
+ version='01'):
+ """Like changegroupsubset, but returns the set difference between the
+ ancestors of heads and the ancestors common.
+
+ If heads is None, use the local heads. If common is None, use [nullid].
+
+ If version is None, use a version '1' changegroup.
+
+ The nodes in common might not all be known locally due to the way the
+ current discovery protocol works. Returns a raw changegroup generator.
+ """
+ outgoing = _computeoutgoing(repo, heads, common)
+ return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
+ version=version)
+
def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
"""Like changegroupsubset, but returns the set difference between the
ancestors of heads and the ancestors common.
@@ -598,12 +664,6 @@
changesets = files = revisions = 0
efiles = set()
- # write changelog data to temp files so concurrent readers will not see
- # inconsistent view
- cl = repo.changelog
- cl.delayupdate()
- oldheads = cl.heads()
-
tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
# The transaction could have been created before and already carries source
# information. In this case we use the top level data. We overwrite the
@@ -611,6 +671,12 @@
# this function.
srctype = tr.hookargs.setdefault('source', srctype)
url = tr.hookargs.setdefault('url', url)
+
+ # write changelog data to temp files so concurrent readers will not see
+ # inconsistent view
+ cl = repo.changelog
+ cl.delayupdate(tr)
+ oldheads = cl.heads()
try:
repo.hook('prechangegroup', throw=True, **tr.hookargs)
@@ -693,7 +759,7 @@
repo.invalidatevolatilesets()
if changesets > 0:
- p = lambda: cl.writepending() and repo.root or ""
+ p = lambda: tr.writepending() and repo.root or ""
if 'node' not in tr.hookargs:
tr.hookargs['node'] = hex(cl.node(clstart))
hookargs = dict(tr.hookargs)
@@ -725,11 +791,6 @@
# strip should not touch boundary at all
phases.retractboundary(repo, tr, targetphase, added)
- # make changelog see real files again
- cl.finalize(trp)
-
- tr.close()
-
if changesets > 0:
if srctype != 'strip':
# During strip, branchcache is invalid but coming call to
@@ -758,7 +819,11 @@
"%s incoming changes - new heads: %s\n",
len(added),
', '.join([hex(c[:6]) for c in newheads]))
- repo._afterlock(runhooks)
+
+ tr.addpostclose('changegroup-runhooks-%020i' % clstart,
+ lambda tr: repo._afterlock(runhooks))
+
+ tr.close()
finally:
tr.release()
--- a/mercurial/changelog.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/changelog.py Tue Dec 09 13:32:19 2014 -0600
@@ -108,15 +108,21 @@
self.data.append(str(s))
self.offset += len(s)
-def delayopener(opener, target, divert, buf):
- def o(name, mode='r'):
+def _divertopener(opener, target):
+ """build an opener that writes in 'target.a' instead of 'target'"""
+ def _divert(name, mode='r'):
if name != target:
return opener(name, mode)
- if divert:
- return opener(name + ".a", mode.replace('a', 'w'))
- # otherwise, divert to memory
+ return opener(name + ".a", mode)
+ return _divert
+
+def _delayopener(opener, target, buf):
+ """build an opener that stores chunks in 'buf' instead of 'target'"""
+ def _delay(name, mode='r'):
+ if name != target:
+ return opener(name, mode)
return appender(opener, name, mode, buf)
- return o
+ return _delay
class changelog(revlog.revlog):
def __init__(self, opener):
@@ -127,7 +133,7 @@
self._generaldelta = False
self._realopener = opener
self._delayed = False
- self._delaybuf = []
+ self._delaybuf = None
self._divert = False
self.filteredrevs = frozenset()
@@ -218,20 +224,30 @@
raise error.FilteredIndexError(rev)
return super(changelog, self).flags(rev)
- def delayupdate(self):
+ def delayupdate(self, tr):
"delay visibility of index updates to other readers"
+
+ if not self._delayed:
+ if len(self) == 0:
+ self._divert = True
+ if self._realopener.exists(self.indexfile + '.a'):
+ self._realopener.unlink(self.indexfile + '.a')
+ self.opener = _divertopener(self._realopener, self.indexfile)
+ else:
+ self._delaybuf = []
+ self.opener = _delayopener(self._realopener, self.indexfile,
+ self._delaybuf)
self._delayed = True
- self._divert = (len(self) == 0)
- self._delaybuf = []
- self.opener = delayopener(self._realopener, self.indexfile,
- self._divert, self._delaybuf)
+ tr.addpending('cl-%i' % id(self), self._writepending)
+ tr.addfinalize('cl-%i' % id(self), self._finalize)
- def finalize(self, tr):
+ def _finalize(self, tr):
"finalize index updates"
self._delayed = False
self.opener = self._realopener
# move redirected index data back into place
if self._divert:
+ assert not self._delaybuf
tmpname = self.indexfile + ".a"
nfile = self.opener.open(tmpname)
nfile.close()
@@ -240,7 +256,8 @@
fp = self.opener(self.indexfile, 'a')
fp.write("".join(self._delaybuf))
fp.close()
- self._delaybuf = []
+ self._delaybuf = None
+ self._divert = False
# split when we're done
self.checkinlinesize(tr)
@@ -251,19 +268,24 @@
self._nodecache = r._nodecache
self._chunkcache = r._chunkcache
- def writepending(self):
+ def _writepending(self, tr):
"create a file containing the unfinalized state for pretxnchangegroup"
if self._delaybuf:
# make a temporary copy of the index
fp1 = self._realopener(self.indexfile)
- fp2 = self._realopener(self.indexfile + ".a", "w")
+ pendingfilename = self.indexfile + ".a"
+ # register as a temp file to ensure cleanup on failure
+ tr.registertmp(pendingfilename)
+ # write existing data
+ fp2 = self._realopener(pendingfilename, "w")
fp2.write(fp1.read())
# add pending data
fp2.write("".join(self._delaybuf))
fp2.close()
# switch modes so finalize can simply rename
- self._delaybuf = []
+ self._delaybuf = None
self._divert = True
+ self.opener = _divertopener(self._realopener, self.indexfile)
if self._divert:
return True
--- a/mercurial/cmdutil.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/cmdutil.py Tue Dec 09 13:32:19 2014 -0600
@@ -113,7 +113,7 @@
def mergeeditform(ctxorbool, baseform):
"""build appropriate editform from ctxorbool and baseform
- 'cxtorbool' is one of a ctx to be committed, or a bool whether
+ 'ctxorbool' is one of a ctx to be committed, or a bool whether
merging is committed.
This returns editform 'baseform' with '.merge' if merging is
@@ -1093,7 +1093,7 @@
if matchfn:
stat = self.diffopts.get('stat')
diff = self.diffopts.get('patch')
- diffopts = patch.diffopts(self.ui, self.diffopts)
+ diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
node, prev = ctx.node(), ctx.p1().node()
if stat:
self.ui.pushbuffer()
@@ -1785,8 +1785,8 @@
# If we're forced to take the slowpath it means we're following
# at least one pattern/directory, so don't bother with rename tracking.
if follow and not match.always() and not slowpath:
- # _makelogfilematcher expects its files argument to be relative to
- # the repo root, so use match.files(), not pats.
+ # _makefollowlogfilematcher expects its files argument to be
+ # relative to the repo root, so use match.files(), not pats.
filematcher = _makefollowlogfilematcher(repo, match.files(),
followfirst)
else:
@@ -1984,9 +1984,9 @@
abort, warn = scmutil.checkportabilityalert(ui)
if abort or warn:
cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
- for f in repo.walk(match):
+ for f in wctx.walk(match):
exact = match.exact(f)
- if exact or not explicitonly and f not in repo.dirstate:
+ if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
if cca:
cca(f)
names.append(f)
@@ -2054,6 +2054,90 @@
forgot.extend(forget)
return bad, forgot
+def remove(ui, repo, m, prefix, after, force, subrepos):
+ join = lambda f: os.path.join(prefix, f)
+ ret = 0
+ s = repo.status(match=m, clean=True)
+ modified, added, deleted, clean = s[0], s[1], s[3], s[6]
+
+ wctx = repo[None]
+
+ for subpath in sorted(wctx.substate):
+ def matchessubrepo(matcher, subpath):
+ if matcher.exact(subpath):
+ return True
+ for f in matcher.files():
+ if f.startswith(subpath):
+ return True
+ return False
+
+ if subrepos or matchessubrepo(m, subpath):
+ sub = wctx.sub(subpath)
+ try:
+ submatch = matchmod.narrowmatcher(subpath, m)
+ if sub.removefiles(ui, submatch, prefix, after, force,
+ subrepos):
+ ret = 1
+ except error.LookupError:
+ ui.status(_("skipping missing subrepository: %s\n")
+ % join(subpath))
+
+ # warn about failure to delete explicit files/dirs
+ for f in m.files():
+ def insubrepo():
+ for subpath in wctx.substate:
+ if f.startswith(subpath):
+ return True
+ return False
+
+ if f in repo.dirstate or f in wctx.dirs() or f == '.' or insubrepo():
+ continue
+
+ if os.path.exists(m.rel(join(f))):
+ if os.path.isdir(m.rel(join(f))):
+ ui.warn(_('not removing %s: no tracked files\n')
+ % m.rel(join(f)))
+ else:
+ ui.warn(_('not removing %s: file is untracked\n')
+ % m.rel(join(f)))
+ # missing files will generate a warning elsewhere
+ ret = 1
+
+ if force:
+ list = modified + deleted + clean + added
+ elif after:
+ list = deleted
+ for f in modified + added + clean:
+ ui.warn(_('not removing %s: file still exists\n') % m.rel(join(f)))
+ ret = 1
+ else:
+ list = deleted + clean
+ for f in modified:
+ ui.warn(_('not removing %s: file is modified (use -f'
+ ' to force removal)\n') % m.rel(join(f)))
+ ret = 1
+ for f in added:
+ ui.warn(_('not removing %s: file has been marked for add'
+ ' (use forget to undo)\n') % m.rel(join(f)))
+ ret = 1
+
+ for f in sorted(list):
+ if ui.verbose or not m.exact(f):
+ ui.status(_('removing %s\n') % m.rel(join(f)))
+
+ wlock = repo.wlock()
+ try:
+ if not after:
+ for f in list:
+ if f in added:
+ continue # we never unlink added files on remove
+ util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+ repo[None].forget(list)
+ finally:
+ wlock.release()
+
+ return ret
+
def cat(ui, repo, ctx, matcher, prefix, **opts):
err = 1
@@ -2508,13 +2592,13 @@
m = scmutil.matchfiles(repo, names)
- modified = set(changes[0])
- added = set(changes[1])
- removed = set(changes[2])
- _deleted = set(changes[3])
- unknown = set(changes[4])
- unknown.update(changes[5])
- clean = set(changes[6])
+ modified = set(changes.modified)
+ added = set(changes.added)
+ removed = set(changes.removed)
+ _deleted = set(changes.deleted)
+ unknown = set(changes.unknown)
+ unknown.update(changes.ignored)
+ clean = set(changes.clean)
modadded = set()
# split between files known in target manifest and the others
@@ -2524,11 +2608,11 @@
deladded = _deleted - smf
deleted = _deleted - deladded
- # We need to account for the state of file in the dirstate
+ # We need to account for the state of file in the dirstate.
#
- # Even, when we revert agains something else than parent. this will
+ # Even, when we revert against something else than parent. This will
# slightly alter the behavior of revert (doing back up or not, delete
- # or just forget etc)
+ # or just forget etc).
if parent == node:
dsmodified = modified
dsadded = added
@@ -2538,9 +2622,9 @@
modified, added, removed = set(), set(), set()
else:
changes = repo.status(node1=parent, match=m)
- dsmodified = set(changes[0])
- dsadded = set(changes[1])
- dsremoved = set(changes[2])
+ dsmodified = set(changes.modified)
+ dsadded = set(changes.added)
+ dsremoved = set(changes.removed)
# store all local modifications, useful later for rename detection
localchanges = dsmodified | dsadded
--- a/mercurial/commands.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/commands.py Tue Dec 09 13:32:19 2014 -0600
@@ -141,6 +141,7 @@
]
diffopts2 = [
+ ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
('p', 'show-function', None, _('show which function each change is in')),
('', 'reverse', None, _('produce a diff that undoes the changes')),
] + diffwsopts + [
@@ -315,7 +316,8 @@
m = scmutil.match(ctx, pats, opts)
m.bad = bad
follow = not opts.get('no_follow')
- diffopts = patch.diffopts(ui, opts, section='annotate')
+ diffopts = patch.difffeatureopts(ui, opts, section='annotate',
+ whitespace=True)
for abs in ctx.walk(m):
fctx = ctx[abs]
if not opts.get('text') and util.binary(fctx.data()):
@@ -743,9 +745,7 @@
# update state
state['current'] = [node]
hbisect.save_state(repo, state)
- status = util.system(command,
- environ={'HG_NODE': hex(node)},
- out=ui.fout)
+ status = ui.system(command, environ={'HG_NODE': hex(node)})
if status == 125:
transition = "skip"
elif status == 0:
@@ -1573,9 +1573,8 @@
fp.close()
editor = ui.geteditor()
- util.system("%s \"%s\"" % (editor, f),
- onerr=util.Abort, errprefix=_("edit failed"),
- out=ui.fout)
+ ui.system("%s \"%s\"" % (editor, f),
+ onerr=util.Abort, errprefix=_("edit failed"))
return
for f in scmutil.rcpath():
@@ -2653,22 +2652,13 @@
" rawsize totalsize compression heads chainlen\n")
ts = 0
heads = set()
- rindex = r.index
-
- def chainbaseandlen(rev):
- clen = 0
- base = rindex[rev][3]
- while base != rev:
- clen += 1
- rev = base
- base = rindex[rev][3]
- return base, clen
for rev in xrange(numrevs):
dbase = r.deltaparent(rev)
if dbase == -1:
dbase = rev
- cbase, clen = chainbaseandlen(rev)
+ cbase = r.chainbase(rev)
+ clen = r.chainlen(rev)
p1, p2 = r.parentrevs(rev)
rs = r.rawsize(rev)
ts = ts + rs
@@ -3083,7 +3073,7 @@
if reverse:
node1, node2 = node2, node1
- diffopts = patch.diffopts(ui, opts)
+ diffopts = patch.diffallopts(ui, opts)
m = scmutil.match(repo[node2], pats, opts)
cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
listsubrepos=opts.get('subrepos'))
@@ -3406,7 +3396,8 @@
# don't mutate while iterating, create a copy
for rev in list(revs):
if rev in ancestors:
- ui.warn(_('skipping ancestor revision %s\n') % rev)
+ ui.warn(_('skipping ancestor revision %d:%s\n') %
+ (rev, repo[rev]))
# XXX remove on list is slow
revs.remove(rev)
if not revs:
@@ -3432,23 +3423,25 @@
except error.RepoLookupError:
r = None
if r in revs:
- ui.warn(_('skipping revision %s (already grafted to %s)\n')
- % (r, rev))
+ ui.warn(_('skipping revision %d:%s '
+ '(already grafted to %d:%s)\n')
+ % (r, repo[r], rev, ctx))
revs.remove(r)
elif ids[n] in revs:
if r is None:
- ui.warn(_('skipping already grafted revision %s '
- '(%s also has unknown origin %s)\n')
- % (ids[n], rev, n))
+ ui.warn(_('skipping already grafted revision %d:%s '
+ '(%d:%s also has unknown origin %s)\n')
+ % (ids[n], repo[ids[n]], rev, ctx, n[:12]))
else:
- ui.warn(_('skipping already grafted revision %s '
- '(%s also has origin %d)\n')
- % (ids[n], rev, r))
+ ui.warn(_('skipping already grafted revision %d:%s '
+ '(%d:%s also has origin %d:%s)\n')
+ % (ids[n], repo[ids[n]], rev, ctx, r, n[:12]))
revs.remove(ids[n])
elif ctx.hex() in ids:
r = ids[ctx.hex()]
- ui.warn(_('skipping already grafted revision %s '
- '(was grafted from %d)\n') % (r, rev))
+ ui.warn(_('skipping already grafted revision %d:%s '
+ '(was grafted from %d:%s)\n') %
+ (r, repo[r], rev, ctx))
revs.remove(r)
if not revs:
return -1
@@ -3456,8 +3449,12 @@
wlock = repo.wlock()
try:
for pos, ctx in enumerate(repo.set("%ld", revs)):
-
- ui.status(_('grafting revision %s\n') % ctx.rev())
+ desc = '%d:%s "%s"' % (ctx.rev(), ctx,
+ ctx.description().split('\n', 1)[0])
+ names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
+ if names:
+ desc += ' (%s)' % ' '.join(names)
+ ui.status(_('grafting %s\n') % desc)
if opts.get('dry_run'):
continue
@@ -3501,7 +3498,9 @@
node = repo.commit(text=message, user=user,
date=date, extra=extra, editor=editor)
if node is None:
- ui.status(_('graft for revision %s is empty\n') % ctx.rev())
+ ui.warn(
+ _('note: graft of %d:%s created no changes to commit\n') %
+ (ctx.rev(), ctx))
finally:
wlock.release()
@@ -5097,7 +5096,7 @@
[('A', 'after', None, _('record delete for missing files')),
('f', 'force', None,
_('remove (and delete) file even if added or modified')),
- ] + walkopts,
+ ] + subrepoopts + walkopts,
_('[OPTION]... FILE...'),
inferrepo=True)
def remove(ui, repo, *pats, **opts):
@@ -5137,62 +5136,13 @@
Returns 0 on success, 1 if any warnings encountered.
"""
- ret = 0
after, force = opts.get('after'), opts.get('force')
if not pats and not after:
raise util.Abort(_('no files specified'))
m = scmutil.match(repo[None], pats, opts)
- s = repo.status(match=m, clean=True)
- modified, added, deleted, clean = s[0], s[1], s[3], s[6]
-
- # warn about failure to delete explicit files/dirs
- wctx = repo[None]
- for f in m.files():
- if f in repo.dirstate or f in wctx.dirs():
- continue
- if os.path.exists(m.rel(f)):
- if os.path.isdir(m.rel(f)):
- ui.warn(_('not removing %s: no tracked files\n') % m.rel(f))
- else:
- ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
- # missing files will generate a warning elsewhere
- ret = 1
-
- if force:
- list = modified + deleted + clean + added
- elif after:
- list = deleted
- for f in modified + added + clean:
- ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
- ret = 1
- else:
- list = deleted + clean
- for f in modified:
- ui.warn(_('not removing %s: file is modified (use -f'
- ' to force removal)\n') % m.rel(f))
- ret = 1
- for f in added:
- ui.warn(_('not removing %s: file has been marked for add'
- ' (use forget to undo)\n') % m.rel(f))
- ret = 1
-
- for f in sorted(list):
- if ui.verbose or not m.exact(f):
- ui.status(_('removing %s\n') % m.rel(f))
-
- wlock = repo.wlock()
- try:
- if not after:
- for f in list:
- if f in added:
- continue # we never unlink added files on remove
- util.unlinkpath(repo.wjoin(f), ignoremissing=True)
- repo[None].forget(list)
- finally:
- wlock.release()
-
- return ret
+ subrepos = opts.get('subrepos')
+ return cmdutil.remove(ui, repo, m, "", after, force, subrepos)
@command('rename|move|mv',
[('A', 'after', None, _('record a rename that has already occurred')),
@@ -6245,7 +6195,6 @@
raise util.Abort(_("uncommitted changes"))
if rev is None:
rev = repo[repo[None].branch()].rev()
- mergemod._checkunknown(repo, repo[None], repo[rev])
repo.ui.setconfig('ui', 'forcemerge', tool, 'update')
--- a/mercurial/commandserver.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/commandserver.py Tue Dec 09 13:32:19 2014 -0600
@@ -248,15 +248,47 @@
return 0
+def _protectio(ui):
+ """ duplicates streams and redirect original to null if ui uses stdio """
+ ui.flush()
+ newfiles = []
+ nullfd = os.open(os.devnull, os.O_RDWR)
+ for f, sysf, mode in [(ui.fin, sys.stdin, 'rb'),
+ (ui.fout, sys.stdout, 'wb')]:
+ if f is sysf:
+ newfd = os.dup(f.fileno())
+ os.dup2(nullfd, f.fileno())
+ f = os.fdopen(newfd, mode)
+ newfiles.append(f)
+ os.close(nullfd)
+ return tuple(newfiles)
+
+def _restoreio(ui, fin, fout):
+ """ restores streams from duplicated ones """
+ ui.flush()
+ for f, uif in [(fin, ui.fin), (fout, ui.fout)]:
+ if f is not uif:
+ os.dup2(f.fileno(), uif.fileno())
+ f.close()
+
class pipeservice(object):
def __init__(self, ui, repo, opts):
- self.server = server(ui, repo, sys.stdin, sys.stdout)
+ self.ui = ui
+ self.repo = repo
def init(self):
pass
def run(self):
- return self.server.serve()
+ ui = self.ui
+ # redirect stdio to null device so that broken extensions or in-process
+ # hooks will never cause corruption of channel protocol.
+ fin, fout = _protectio(ui)
+ try:
+ sv = server(ui, self.repo, fin, fout)
+ return sv.serve()
+ finally:
+ _restoreio(ui, fin, fout)
class _requesthandler(SocketServer.StreamRequestHandler):
def handle(self):
--- a/mercurial/context.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/context.py Tue Dec 09 13:32:19 2014 -0600
@@ -71,22 +71,9 @@
object oriented way for other contexts to customize the manifest
generation.
"""
- if match.always():
- return self.manifest().copy()
-
- files = match.files()
- if (match.matchfn == match.exact or
- (not match.anypats() and util.all(fn in self for fn in files))):
- return self.manifest().intersectfiles(files)
+ return self.manifest().matches(match)
- mf = self.manifest().copy()
- for fn in mf.keys():
- if not match(fn):
- del mf[fn]
- return mf
-
- def _matchstatus(self, other, s, match, listignored, listclean,
- listunknown):
+ def _matchstatus(self, other, match):
"""return match.always if match is none
This internal method provides a way for child objects to override the
@@ -94,33 +81,22 @@
"""
return match or matchmod.always(self._repo.root, self._repo.getcwd())
- def _prestatus(self, other, s, match, listignored, listclean, listunknown):
- """provide a hook to allow child objects to preprocess status results
-
- For example, this allows other contexts, such as workingctx, to query
- the dirstate before comparing the manifests.
- """
- # load earliest manifest first for caching reasons
- if self.rev() < other.rev():
- self.manifest()
- return s
-
- def _poststatus(self, other, s, match, listignored, listclean, listunknown):
- """provide a hook to allow child objects to postprocess status results
-
- For example, this allows other contexts, such as workingctx, to filter
- suspect symlinks in the case of FAT32 and NTFS filesytems.
- """
- return s
-
def _buildstatus(self, other, s, match, listignored, listclean,
listunknown):
"""build a status with respect to another context"""
+ # Load earliest manifest first for caching reasons. More specifically,
+ # if you have revisions 1000 and 1001, 1001 is probably stored as a
+ # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
+ # 1000 and cache it so that when you read 1001, we just need to apply a
+ # delta to what's in the cache. So that's one full reconstruction + one
+ # delta application.
+ if self.rev() is not None and self.rev() < other.rev():
+ self.manifest()
mf1 = other._manifestmatches(match, s)
mf2 = self._manifestmatches(match, s)
modified, added, clean = [], [], []
- deleted, unknown, ignored = s[3], s[4], s[5]
+ deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
deletedset = set(deleted)
withflags = mf1.withflags() | mf2.withflags()
for fn, mf2node in mf2.iteritems():
@@ -141,7 +117,8 @@
unknown = [fn for fn in unknown if fn not in mf1]
ignored = [fn for fn in ignored if fn not in mf1]
- return [modified, added, removed, deleted, unknown, ignored, clean]
+ return scmutil.status(modified, added, removed, deleted, unknown,
+ ignored, clean)
@propertycache
def substate(self):
@@ -311,18 +288,16 @@
reversed = True
ctx1, ctx2 = ctx2, ctx1
- r = [[], [], [], [], [], [], []]
- match = ctx2._matchstatus(ctx1, r, match, listignored, listclean,
- listunknown)
- r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
+ match = ctx2._matchstatus(ctx1, match)
+ r = scmutil.status([], [], [], [], [], [], [])
r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
listunknown)
- r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
- listunknown)
if reversed:
- # reverse added and removed
- r[1], r[2] = r[2], r[1]
+ # Reverse added and removed. Clear deleted, unknown and ignored as
+ # these make no sense to reverse.
+ r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
+ r.clean)
if listsubrepos:
for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
@@ -341,8 +316,7 @@
for l in r:
l.sort()
- # we return a tuple to signify that this list isn't changing
- return scmutil.status(*r)
+ return r
def makememctx(repo, parents, text, user, date, branch, files, store,
@@ -1061,7 +1035,13 @@
@propertycache
def _manifest(self):
- """generate a manifest corresponding to the values in self._status"""
+ """generate a manifest corresponding to the values in self._status
+
+ This reuse the file nodeid from parent, but we append an extra letter
+ when modified. Modified files get an extra 'm' while added files get
+ appened an extra 'a'. This is used by manifests merge to see that files
+ are different and by update logic to avoid deleting newly added files.
+ """
man1 = self._parents[0].manifest()
man = man1.copy()
@@ -1406,37 +1386,14 @@
need to build a manifest and return what matches.
"""
mf = self._repo['.']._manifestmatches(match, s)
- modified, added, removed = s[0:3]
- for f in modified + added:
+ for f in s.modified + s.added:
mf[f] = None
mf.setflag(f, self.flags(f))
- for f in removed:
+ for f in s.removed:
if f in mf:
del mf[f]
return mf
- def _prestatus(self, other, s, match, listignored, listclean, listunknown):
- """override the parent hook with a dirstate query
-
- We use this prestatus hook to populate the status with information from
- the dirstate.
- """
- # doesn't need to call super; if that changes, be aware that super
- # calls self.manifest which would slow down the common case of calling
- # status against a workingctx's parent
- return self._dirstatestatus(match, listignored, listclean, listunknown)
-
- def _poststatus(self, other, s, match, listignored, listclean, listunknown):
- """override the parent hook with a filter for suspect symlinks
-
- We use this poststatus hook to filter out symlinks that might have
- accidentally ended up with the entire contents of the file they are
- susposed to be linking to.
- """
- s[0] = self._filtersuspectsymlink(s[0])
- self._status = scmutil.status(*s)
- return s
-
def _dirstatestatus(self, match=None, ignored=False, clean=False,
unknown=False):
'''Gets the status from the dirstate -- internal use only.'''
@@ -1447,18 +1404,17 @@
subrepos = sorted(self.substate)
cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
listclean, listunknown)
- modified, added, removed, deleted, unknown, ignored, clean = s
# check for any possibly clean files
if cmp:
modified2, fixup = self._checklookup(cmp)
- modified += modified2
+ s.modified.extend(modified2)
# update dirstate for files that are actually clean
if fixup and listclean:
- clean += fixup
+ s.clean.extend(fixup)
- return [modified, added, removed, deleted, unknown, ignored, clean]
+ return s
def _buildstatus(self, other, s, match, listignored, listclean,
listunknown):
@@ -1469,14 +1425,19 @@
building a new manifest if self (working directory) is not comparing
against its parent (repo['.']).
"""
+ s = self._dirstatestatus(match, listignored, listclean, listunknown)
+ # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
+ # might have accidentally ended up with the entire contents of the file
+ # they are susposed to be linking to.
+ s.modified[:] = self._filtersuspectsymlink(s.modified)
if other != self._repo['.']:
s = super(workingctx, self)._buildstatus(other, s, match,
listignored, listclean,
listunknown)
+ self._status = s
return s
- def _matchstatus(self, other, s, match, listignored, listclean,
- listunknown):
+ def _matchstatus(self, other, match):
"""override the match method with a filter for directory patterns
We use inheritance to customize the match.bad method only in cases of
@@ -1487,8 +1448,7 @@
just use the default match object sent to us.
"""
superself = super(workingctx, self)
- match = superself._matchstatus(other, s, match, listignored, listclean,
- listunknown)
+ match = superself._matchstatus(other, match)
if other != self._repo['.']:
def bad(f, msg):
# 'f' may be a directory pattern from 'match.files()',
@@ -1499,14 +1459,6 @@
match.bad = bad
return match
- def status(self, other='.', match=None, listignored=False,
- listclean=False, listunknown=False, listsubrepos=False):
- # yet to be determined: what to do if 'other' is a 'workingctx' or a
- # 'memctx'?
- return super(workingctx, self).status(other, match, listignored,
- listclean, listunknown,
- listsubrepos)
-
class committablefilectx(basefilectx):
"""A committablefilectx provides common functionality for a file context
that wants the ability to commit, e.g. workingfilectx or memfilectx."""
@@ -1696,7 +1648,7 @@
class memfilectx(committablefilectx):
"""memfilectx represents an in-memory file to commit.
- See memctx and commitablefilectx for more details.
+ See memctx and committablefilectx for more details.
"""
def __init__(self, repo, path, data, islink=False,
isexec=False, copied=None, memctx=None):
--- a/mercurial/copies.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/copies.py Tue Dec 09 13:32:19 2014 -0600
@@ -97,7 +97,7 @@
# |/
# o 0 a0
#
- # When findlimit is called, a and b are revs 3 and 0, so limit will be 2,
+ # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
# yet the filelog has the copy information in rev 1 and we will not look
# back far enough unless we also look at the a and b as candidates.
# This only occurs when a is a descendent of b or visa-versa.
--- a/mercurial/dagutil.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/dagutil.py Tue Dec 09 13:32:19 2014 -0600
@@ -25,7 +25,7 @@
self._inverse = None
def nodeset(self):
- '''set of all node idxs'''
+ '''set of all node ixs'''
raise NotImplementedError
def heads(self):
@@ -77,7 +77,7 @@
return self._internalize(id)
def internalizeall(self, ids, filterunknown=False):
- '''return a list of (or set if given a set) of node ids'''
+ '''return a list of (or set if given a set) of node ixs'''
ixs = self._internalizeall(ids, filterunknown)
if isinstance(ids, set):
return set(ixs)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/default.d/mergetools.rc Tue Dec 09 13:32:19 2014 -0600
@@ -0,0 +1,135 @@
+# Some default global settings for common merge tools
+
+[merge-tools]
+kdiff3.args=--auto --L1 base --L2 local --L3 other $base $local $other -o $output
+kdiff3.regkey=Software\KDiff3
+kdiff3.regkeyalt=Software\Wow6432Node\KDiff3
+kdiff3.regappend=\kdiff3.exe
+kdiff3.fixeol=True
+kdiff3.gui=True
+kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
+
+gvimdiff.args=--nofork -d -g -O $local $other $base
+gvimdiff.regkey=Software\Vim\GVim
+gvimdiff.regkeyalt=Software\Wow6432Node\Vim\GVim
+gvimdiff.regname=path
+gvimdiff.priority=-9
+gvimdiff.diffargs=--nofork -d -g -O $parent $child
+
+vimdiff.args=$local $other $base -c 'redraw | echomsg "hg merge conflict, type \":cq\" to abort vimdiff"'
+vimdiff.check=changed
+vimdiff.priority=-10
+
+merge.check=conflicts
+merge.priority=-100
+
+gpyfm.gui=True
+
+meld.gui=True
+meld.args=--label='local' $local --label='merged' $base --label='other' $other -o $output
+meld.check=changed
+meld.diffargs=-a --label='$plabel1' $parent --label='$clabel' $child
+
+tkdiff.args=$local $other -a $base -o $output
+tkdiff.gui=True
+tkdiff.priority=-8
+tkdiff.diffargs=-L '$plabel1' $parent -L '$clabel' $child
+
+xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 local --title2 base --title3 other --merged-filename $output --merge $local $base $other
+xxdiff.gui=True
+xxdiff.priority=-8
+xxdiff.diffargs=--title1 '$plabel1' $parent --title2 '$clabel' $child
+
+diffmerge.regkey=Software\SourceGear\SourceGear DiffMerge\
+diffmerge.regkeyalt=Software\Wow6432Node\SourceGear\SourceGear DiffMerge\
+diffmerge.regname=Location
+diffmerge.priority=-7
+diffmerge.args=-nosplash -merge -title1=local -title2=merged -title3=other $local $base $other -result=$output
+diffmerge.check=changed
+diffmerge.gui=True
+diffmerge.diffargs=--nosplash --title1='$plabel1' --title2='$clabel' $parent $child
+
+p4merge.args=$base $local $other $output
+p4merge.regkey=Software\Perforce\Environment
+p4merge.regkeyalt=Software\Wow6432Node\Perforce\Environment
+p4merge.regname=P4INSTROOT
+p4merge.regappend=\p4merge.exe
+p4merge.gui=True
+p4merge.priority=-8
+p4merge.diffargs=$parent $child
+
+p4mergeosx.executable = /Applications/p4merge.app/Contents/MacOS/p4merge
+p4mergeosx.args = $base $local $other $output
+p4mergeosx.gui = True
+p4mergeosx.priority=-8
+p4mergeosx.diffargs=$parent $child
+
+tortoisemerge.args=/base:$base /mine:$local /theirs:$other /merged:$output
+tortoisemerge.regkey=Software\TortoiseSVN
+tortoisemerge.regkeyalt=Software\Wow6432Node\TortoiseSVN
+tortoisemerge.check=changed
+tortoisemerge.gui=True
+tortoisemerge.priority=-8
+tortoisemerge.diffargs=/base:$parent /mine:$child /basename:'$plabel1' /minename:'$clabel'
+
+ecmerge.args=$base $local $other --mode=merge3 --title0=base --title1=local --title2=other --to=$output
+ecmerge.regkey=Software\Elli\xc3\xa9 Computing\Merge
+ecmerge.regkeyalt=Software\Wow6432Node\Elli\xc3\xa9 Computing\Merge
+ecmerge.gui=True
+ecmerge.diffargs=$parent $child --mode=diff2 --title1='$plabel1' --title2='$clabel'
+
+# editmerge is a small script shipped in contrib.
+# It needs this config otherwise it behaves the same as internal:local
+editmerge.args=$output
+editmerge.check=changed
+editmerge.premerge=keep
+
+filemerge.executable=/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge
+filemerge.args=-left $other -right $local -ancestor $base -merge $output
+filemerge.gui=True
+
+; Windows version of Beyond Compare
+beyondcompare3.args=$local $other $base $output /ro /lefttitle=local /centertitle=base /righttitle=other /automerge /reviewconflicts /solo
+beyondcompare3.regkey=Software\Scooter Software\Beyond Compare 3
+beyondcompare3.regname=ExePath
+beyondcompare3.gui=True
+beyondcompare3.priority=-2
+beyondcompare3.diffargs=/lro /lefttitle='$plabel1' /righttitle='$clabel' /solo /expandall $parent $child
+
+; Linux version of Beyond Compare
+bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=parent1 -centertitle=base -righttitle=parent2 -outputtitle=merged -automerge -reviewconflicts -solo
+bcompare.gui=True
+bcompare.priority=-1
+bcompare.diffargs=-lro -lefttitle='$plabel1' -righttitle='$clabel' -solo -expandall $parent $child
+
+winmerge.args=/e /x /wl /ub /dl other /dr local $other $local $output
+winmerge.regkey=Software\Thingamahoochie\WinMerge
+winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\
+winmerge.regname=Executable
+winmerge.check=changed
+winmerge.gui=True
+winmerge.priority=-10
+winmerge.diffargs=/r /e /x /ub /wl /dl '$plabel1' /dr '$clabel' $parent $child
+
+araxis.regkey=SOFTWARE\Classes\TypeLib\{46799e0a-7bd1-4330-911c-9660bb964ea2}\7.0\HELPDIR
+araxis.regappend=\ConsoleCompare.exe
+araxis.priority=-2
+araxis.args=/3 /a2 /wait /merge /title1:"Other" /title2:"Base" /title3:"Local :"$local $other $base $local $output
+araxis.checkconflict=True
+araxis.binary=True
+araxis.gui=True
+araxis.diffargs=/2 /wait /title1:"$plabel1" /title2:"$clabel" $parent $child
+
+diffuse.priority=-3
+diffuse.args=$local $base $other
+diffuse.gui=True
+diffuse.diffargs=$parent $child
+
+UltraCompare.regkey=Software\Microsoft\Windows\CurrentVersion\App Paths\UC.exe
+UltraCompare.regkeyalt=Software\Wow6432Node\Microsoft\Windows\CurrentVersion\App Paths\UC.exe
+UltraCompare.args = $base $local $other -title1 base -title3 other
+UltraCompare.priority = -2
+UltraCompare.gui = True
+UltraCompare.binary = True
+UltraCompare.check = conflicts,changed
+UltraCompare.diffargs=$child $parent -title1 $clabel -title2 $plabel1
--- a/mercurial/dirstate.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/dirstate.py Tue Dec 09 13:32:19 2014 -0600
@@ -8,7 +8,7 @@
from node import nullid
from i18n import _
import scmutil, util, ignore, osutil, parsers, encoding, pathutil
-import os, stat, errno, gc
+import os, stat, errno
propertycache = util.propertycache
filecache = scmutil.filecache
@@ -317,13 +317,10 @@
# Depending on when in the process's lifetime the dirstate is parsed,
# this can get very expensive. As a workaround, disable GC while
# parsing the dirstate.
- gcenabled = gc.isenabled()
- gc.disable()
- try:
- p = parsers.parse_dirstate(self._map, self._copymap, st)
- finally:
- if gcenabled:
- gc.enable()
+ #
+ # (we cannot decorate the function directly since it is in a C module)
+ parse_dirstate = util.nogc(parsers.parse_dirstate)
+ p = parse_dirstate(self._map, self._copymap, st)
if not self._dirtypl:
self._pl = p
@@ -629,6 +626,7 @@
results = dict.fromkeys(subrepos)
results['.hg'] = None
+ alldirs = None
for ff in files:
if normalize:
nf = normalize(normpath(ff), False, True)
@@ -657,13 +655,12 @@
if nf in dmap: # does it exactly match a missing file?
results[nf] = None
else: # does it match a missing directory?
- prefix = nf + "/"
- for fn in dmap:
- if fn.startswith(prefix):
- if matchedir:
- matchedir(nf)
- notfoundadd(nf)
- break
+ if alldirs is None:
+ alldirs = scmutil.dirs(dmap)
+ if nf in alldirs:
+ if matchedir:
+ matchedir(nf)
+ notfoundadd(nf)
else:
badfn(ff, inst.strerror)
--- a/mercurial/dispatch.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/dispatch.py Tue Dec 09 13:32:19 2014 -0600
@@ -402,7 +402,7 @@
return ''
cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
cmd = aliasinterpolate(self.name, args, cmd)
- return util.system(cmd, environ=env, out=ui.fout)
+ return ui.system(cmd, environ=env)
self.fn = fn
return
--- a/mercurial/exchange.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/exchange.py Tue Dec 09 13:32:19 2014 -0600
@@ -104,6 +104,8 @@
self.outobsmarkers = set()
# outgoing bookmarks
self.outbookmarks = []
+ # transaction manager
+ self.trmanager = None
@util.propertycache
def futureheads(self):
@@ -204,6 +206,10 @@
msg = 'cannot lock source repository: %s\n' % err
pushop.ui.debug(msg)
try:
+ if pushop.locallocked:
+ pushop.trmanager = transactionmanager(repo,
+ 'push-response',
+ pushop.remote.url())
pushop.repo.checkpush(pushop)
lock = None
unbundle = pushop.remote.capable('unbundle')
@@ -222,7 +228,11 @@
finally:
if lock is not None:
lock.release()
+ if pushop.trmanager:
+ pushop.trmanager.close()
finally:
+ if pushop.trmanager:
+ pushop.trmanager.release()
if locallock is not None:
locallock.release()
@@ -298,7 +308,7 @@
else:
# adds changeset we are going to push as draft
#
- # should not be necessary for pushblishing server, but because of an
+ # should not be necessary for publishing server, but because of an
# issue fixed in xxxxx we have to do it anyway.
fdroots = list(unfi.set('roots(%ln + %ln::)',
outgoing.missing, droots))
@@ -445,10 +455,25 @@
pushop.outgoing)
if not pushop.force:
bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
- cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
- cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
+ b2caps = bundle2.bundle2caps(pushop.remote)
+ version = None
+ cgversions = b2caps.get('b2x:changegroup')
+ if not cgversions: # 3.1 and 3.2 ship with an empty value
+ cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
+ pushop.outgoing)
+ else:
+ cgversions = [v for v in cgversions if v in changegroup.packermap]
+ if not cgversions:
+ raise ValueError(_('no common changegroup version'))
+ version = max(cgversions)
+ cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
+ pushop.outgoing,
+ version=version)
+ cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg)
+ if version is not None:
+ cgpart.addparam('version', version)
def handlereply(op):
- """extract addchangroup returns from server reply"""
+ """extract addchangegroup returns from server reply"""
cgreplies = op.records.getreplies(cgpart.id)
assert len(cgreplies['changegroup']) == 1
pushop.cgresult = cgreplies['changegroup'][0]['return']
@@ -547,8 +572,12 @@
The only currently supported type of data is changegroup but this will
evolve in the future."""
bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
+ pushback = (pushop.trmanager
+ and pushop.ui.configbool('experimental', 'bundle2.pushback'))
+
# create reply capability
- capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
+ capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
+ allowpushback=pushback))
bundler.newpart('b2x:replycaps', data=capsblob)
replyhandlers = []
for partgenname in b2partsgenorder:
@@ -565,7 +594,10 @@
except error.BundleValueError, exc:
raise util.Abort('missing support for %s' % exc)
try:
- op = bundle2.processbundle(pushop.repo, reply)
+ trgetter = None
+ if pushback:
+ trgetter = pushop.trmanager.transaction
+ op = bundle2.processbundle(pushop.repo, reply, trgetter)
except error.BundleValueError, exc:
raise util.Abort('missing support for %s' % exc)
for rephand in replyhandlers:
@@ -678,13 +710,11 @@
def _localphasemove(pushop, nodes, phase=phases.public):
"""move <nodes> to <phase> in the local source repo"""
- if pushop.locallocked:
- tr = pushop.repo.transaction('push-phase-sync')
- try:
- phases.advanceboundary(pushop.repo, tr, phase, nodes)
- tr.close()
- finally:
- tr.release()
+ if pushop.trmanager:
+ phases.advanceboundary(pushop.repo,
+ pushop.trmanager.transaction(),
+ phase,
+ nodes)
else:
# repo is not locked, do not change any phases!
# Informs the user that phases should have been moved when
@@ -739,7 +769,7 @@
class pulloperation(object):
"""A object that represent a single pull operation
- It purpose is to carry push related state and very common operation.
+ It purpose is to carry pull related state and very common operation.
A new should be created at the beginning of each pull and discarded
afterward.
@@ -756,10 +786,8 @@
self.explicitbookmarks = bookmarks
# do we force pull?
self.force = force
- # the name the pull transaction
- self._trname = 'pull\n' + util.hidepassword(remote.url())
- # hold the transaction once created
- self._tr = None
+ # transaction manager
+ self.trmanager = None
# set of common changeset between local and remote before pull
self.common = None
# set of pulled head
@@ -792,29 +820,44 @@
return self.heads
def gettransaction(self):
- """get appropriate pull transaction, creating it if needed"""
- if self._tr is None:
- self._tr = self.repo.transaction(self._trname)
- self._tr.hookargs['source'] = 'pull'
- self._tr.hookargs['url'] = self.remote.url()
+ # deprecated; talk to trmanager directly
+ return self.trmanager.transaction()
+
+class transactionmanager(object):
+ """An object to manages the lifecycle of a transaction
+
+ It creates the transaction on demand and calls the appropriate hooks when
+ closing the transaction."""
+ def __init__(self, repo, source, url):
+ self.repo = repo
+ self.source = source
+ self.url = url
+ self._tr = None
+
+ def transaction(self):
+ """Return an open transaction object, constructing if necessary"""
+ if not self._tr:
+ trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
+ self._tr = self.repo.transaction(trname)
+ self._tr.hookargs['source'] = self.source
+ self._tr.hookargs['url'] = self.url
return self._tr
- def closetransaction(self):
+ def close(self):
"""close transaction if created"""
if self._tr is not None:
repo = self.repo
- cl = repo.unfiltered().changelog
- p = cl.writepending() and repo.root or ""
- p = cl.writepending() and repo.root or ""
+ p = lambda: self._tr.writepending() and repo.root or ""
repo.hook('b2x-pretransactionclose', throw=True, pending=p,
**self._tr.hookargs)
- self._tr.close()
hookargs = dict(self._tr.hookargs)
def runhooks():
repo.hook('b2x-transactionclose', **hookargs)
- repo._afterlock(runhooks)
+ self._tr.addpostclose('b2x-hook-transactionclose',
+ lambda tr: repo._afterlock(runhooks))
+ self._tr.close()
- def releasetransaction(self):
+ def release(self):
"""release transaction if created"""
if self._tr is not None:
self._tr.release()
@@ -832,6 +875,7 @@
pullop.remotebookmarks = remote.listkeys('bookmarks')
lock = pullop.repo.lock()
try:
+ pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
_pulldiscovery(pullop)
if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
and pullop.remote.capable('bundle2-exp')):
@@ -840,9 +884,9 @@
_pullphase(pullop)
_pullbookmarks(pullop)
_pullobsolete(pullop)
- pullop.closetransaction()
+ pullop.trmanager.close()
finally:
- pullop.releasetransaction()
+ pullop.trmanager.release()
lock.release()
return pullop
@@ -965,9 +1009,9 @@
return
pullop.stepsdone.add('changegroup')
if not pullop.fetch:
- pullop.repo.ui.status(_("no changes found\n"))
- pullop.cgresult = 0
- return
+ pullop.repo.ui.status(_("no changes found\n"))
+ pullop.cgresult = 0
+ return
pullop.gettransaction()
if pullop.heads is None and list(pullop.common) == [nullid]:
pullop.repo.ui.status(_("requesting all changes\n"))
@@ -1133,10 +1177,11 @@
b2caps.update(bundle2.decodecaps(blob))
bundler = bundle2.bundle20(repo.ui, b2caps)
+ kwargs['heads'] = heads
+ kwargs['common'] = common
+
for name in getbundle2partsorder:
func = getbundle2partsmapping[name]
- kwargs['heads'] = heads
- kwargs['common'] = common
func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
**kwargs)
@@ -1149,11 +1194,26 @@
cg = None
if kwargs.get('cg', True):
# build changegroup bundle here.
- cg = changegroup.getchangegroup(repo, source, heads=heads,
- common=common, bundlecaps=bundlecaps)
+ version = None
+ cgversions = b2caps.get('b2x:changegroup')
+ if not cgversions: # 3.1 and 3.2 ship with an empty value
+ cg = changegroup.getchangegroupraw(repo, source, heads=heads,
+ common=common,
+ bundlecaps=bundlecaps)
+ else:
+ cgversions = [v for v in cgversions if v in changegroup.packermap]
+ if not cgversions:
+ raise ValueError(_('no common changegroup version'))
+ version = max(cgversions)
+ cg = changegroup.getchangegroupraw(repo, source, heads=heads,
+ common=common,
+ bundlecaps=bundlecaps,
+ version=version)
if cg:
- bundler.newpart('b2x:changegroup', data=cg.getchunks())
+ part = bundler.newpart('b2x:changegroup', data=cg)
+ if version is not None:
+ part.addparam('version', version)
@getbundle2partsgenerator('listkeys')
def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
@@ -1213,15 +1273,15 @@
tr.hookargs['url'] = url
tr.hookargs['bundle2-exp'] = '1'
r = bundle2.processbundle(repo, cg, lambda: tr).reply
- cl = repo.unfiltered().changelog
- p = cl.writepending() and repo.root or ""
+ p = lambda: tr.writepending() and repo.root or ""
repo.hook('b2x-pretransactionclose', throw=True, pending=p,
**tr.hookargs)
- tr.close()
hookargs = dict(tr.hookargs)
def runhooks():
repo.hook('b2x-transactionclose', **hookargs)
- repo._afterlock(runhooks)
+ tr.addpostclose('b2x-hook-transactionclose',
+ lambda tr: repo._afterlock(runhooks))
+ tr.close()
except Exception, exc:
exc.duringunbundle2 = True
raise
--- a/mercurial/filemerge.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/filemerge.py Tue Dec 09 13:32:19 2014 -0600
@@ -37,6 +37,9 @@
def _findtool(ui, tool):
if tool in internals:
return tool
+ return findexternaltool(ui, tool)
+
+def findexternaltool(ui, tool):
for kn in ("regkey", "regkeyalt"):
k = _toolstr(ui, tool, kn)
if not k:
@@ -298,8 +301,7 @@
replace = {'local': a, 'base': b, 'other': c, 'output': out}
args = util.interpolate(r'\$', replace, args,
lambda s: util.shellquote(util.localpath(s)))
- r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
- out=ui.fout)
+ r = ui.system(toolpath + ' ' + args, cwd=repo.root, environ=env)
return True, r
return False, 0
--- a/mercurial/help/config.txt Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/help/config.txt Tue Dec 09 13:32:19 2014 -0600
@@ -38,6 +38,7 @@
- ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
- ``/etc/mercurial/hgrc`` (per-system)
- ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
+ - ``<internal>/default.d/*.rc`` (defaults)
.. container:: verbose.windows
@@ -51,6 +52,7 @@
- ``<install-dir>\Mercurial.ini`` (per-installation)
- ``<install-dir>\hgrc.d\*.rc`` (per-installation)
- ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-installation)
+ - ``<internal>/default.d/*.rc`` (defaults)
.. note::
@@ -67,6 +69,7 @@
- ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
- ``/lib/mercurial/hgrc`` (per-system)
- ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
+ - ``<internal>/default.d/*.rc`` (defaults)
Per-repository configuration options only apply in a
particular repository. This file is not version-controlled, and
@@ -102,6 +105,13 @@
executed by any user in any directory. Options in these files
override per-installation options.
+Mercurial comes with some default configuration. The default configuration
+files are installed with Mercurial and will be overwritten on upgrades. Default
+configuration files should never be edited by users or administrators but can
+be overridden in other configuration files. So far the directory only contains
+merge tool configuration but packagers can also put other default configuration
+there.
+
Syntax
======
@@ -537,6 +547,9 @@
``nodates``
Don't include dates in diff headers.
+``noprefix``
+ Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
+
``showfunc``
Show which function each change is in.
--- a/mercurial/help/subrepos.txt Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/help/subrepos.txt Tue Dec 09 13:32:19 2014 -0600
@@ -129,6 +129,12 @@
elements. Subversion subrepositories are currently silently
ignored.
+:remove: remove does not recurse into subrepositories unless
+ -S/--subrepos is specified. However, if you specify a file or
+ directory path in a subrepo, it will be removed even without
+ -S/--subrepos. Git and Subversion subrepositories are currently
+ silently ignored.
+
:update: update restores the subrepos in the state they were
originally committed in target changeset. If the recorded
changeset is not available in the current subrepository, Mercurial
--- a/mercurial/hg.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/hg.py Tue Dec 09 13:32:19 2014 -0600
@@ -390,7 +390,7 @@
dstcachedir = os.path.join(destpath, 'cache')
# In local clones we're copying all nodes, not just served
- # ones. Therefore copy all branchcaches over.
+ # ones. Therefore copy all branch caches over.
copybranchcache('branch2')
for cachename in repoview.filtertable:
copybranchcache('branch2-%s' % cachename)
--- a/mercurial/hook.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/hook.py Tue Dec 09 13:32:19 2014 -0600
@@ -131,10 +131,7 @@
cwd = repo.root
else:
cwd = os.getcwd()
- if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'):
- r = util.system(cmd, environ=env, cwd=cwd, out=ui)
- else:
- r = util.system(cmd, environ=env, cwd=cwd, out=ui.fout)
+ r = ui.system(cmd, environ=env, cwd=cwd)
duration = time.time() - starttime
ui.log('exthook', 'exthook-%s: %s finished in %0.2f seconds\n',
--- a/mercurial/localrepo.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/localrepo.py Tue Dec 09 13:32:19 2014 -0600
@@ -316,6 +316,9 @@
chunkcachesize = self.ui.configint('format', 'chunkcachesize')
if chunkcachesize is not None:
self.sopener.options['chunkcachesize'] = chunkcachesize
+ maxchainlen = self.ui.configint('format', 'maxchainlen')
+ if maxchainlen is not None:
+ self.sopener.options['maxchainlen'] = maxchainlen
def _writerequirements(self):
reqfile = self.opener("requires", "w")
@@ -862,9 +865,16 @@
def wwritedata(self, filename, data):
return self._filter(self._decodefilterpats, filename, data)
- def transaction(self, desc, report=None):
+ def currenttransaction(self):
+ """return the current transaction or None if non exists"""
tr = self._transref and self._transref() or None
if tr and tr.running():
+ return tr
+ return None
+
+ def transaction(self, desc, report=None):
+ tr = self.currenttransaction()
+ if tr is not None:
return tr.nest()
# abort here if the journal already exists
@@ -879,7 +889,8 @@
self._writejournal(desc)
renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
rp = report and report or self.ui.warn
- tr = transaction.transaction(rp, self.sopener,
+ vfsmap = {'plain': self.opener} # root of .hg/
+ tr = transaction.transaction(rp, self.sopener, vfsmap,
"journal",
aftertrans(renames),
self.store.createmode,
@@ -915,7 +926,9 @@
try:
if self.svfs.exists("journal"):
self.ui.status(_("rolling back interrupted transaction\n"))
- transaction.rollback(self.sopener, "journal",
+ vfsmap = {'': self.sopener,
+ 'plain': self.opener,}
+ transaction.rollback(self.sopener, vfsmap, "journal",
self.ui.warn)
self.invalidate()
return True
@@ -971,7 +984,8 @@
parents = self.dirstate.parents()
self.destroying()
- transaction.rollback(self.sopener, 'undo', ui.warn)
+ vfsmap = {'plain': self.opener}
+ transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
if self.vfs.exists('undo.bookmarks'):
self.vfs.rename('undo.bookmarks', 'bookmarks')
if self.svfs.exists('undo.phaseroots'):
@@ -1437,15 +1451,14 @@
files = []
# update changelog
- self.changelog.delayupdate()
+ self.changelog.delayupdate(tr)
n = self.changelog.add(mn, files, ctx.description(),
trp, p1.node(), p2.node(),
user, ctx.date(), ctx.extra().copy())
- p = lambda: self.changelog.writepending() and self.root or ""
+ p = lambda: tr.writepending() and self.root or ""
xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
parent2=xp2, pending=p)
- self.changelog.finalize(trp)
# set the new commit is proper phase
targetphase = subrepo.newcommitphase(self.ui, ctx)
if targetphase:
--- a/mercurial/manifest.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/manifest.py Tue Dec 09 13:32:19 2014 -0600
@@ -39,6 +39,22 @@
ret._flags[fn] = flags
return ret
+ def matches(self, match):
+ '''generate a new manifest filtered by the match argument'''
+ if match.always():
+ return self.copy()
+
+ files = match.files()
+ if (match.matchfn == match.exact or
+ (not match.anypats() and util.all(fn in self for fn in files))):
+ return self.intersectfiles(files)
+
+ mf = self.copy()
+ for fn in mf.keys():
+ if not match(fn):
+ del mf[fn]
+ return mf
+
def diff(self, m2):
'''Finds changes between the current manifest and m2. The result is
returned as a dict with filename as key and values of the form
--- a/mercurial/match.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/match.py Tue Dec 09 13:32:19 2014 -0600
@@ -65,6 +65,7 @@
self._anypats = bool(include or exclude)
self._ctx = ctx
self._always = False
+ self._pathrestricted = bool(include or exclude or patterns)
matchfns = []
if include:
@@ -128,6 +129,12 @@
'''Convert repo path back to path that is relative to cwd of matcher.'''
return util.pathto(self._root, self._cwd, f)
+ def uipath(self, f):
+ '''Convert repo path to a display path. If patterns or -I/-X were used
+ to create this matcher, the display path will be relative to cwd.
+ Otherwise it is relative to the root of the repo.'''
+ return (self._pathrestricted and self.rel(f)) or f
+
def files(self):
'''Explicitly listed files or patterns or roots:
if no patterns or .always(): empty list,
@@ -191,6 +198,7 @@
self._path = path
self._matcher = matcher
self._always = matcher._always
+ self._pathrestricted = matcher._pathrestricted
self._files = [f[len(path) + 1:] for f in matcher._files
if f.startswith(path + "/")]
--- a/mercurial/mdiff.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/mdiff.py Tue Dec 09 13:32:19 2014 -0600
@@ -25,6 +25,8 @@
showfunc enables diff -p output
git enables the git extended patch format
nodates removes dates from diff headers
+ nobinary ignores binary files
+ noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
ignorews ignores all whitespace changes in the diff
ignorewsamount ignores changes in the amount of whitespace
ignoreblanklines ignores changes whose lines are all blank
@@ -38,6 +40,7 @@
'git': False,
'nodates': False,
'nobinary': False,
+ 'noprefix': False,
'ignorews': False,
'ignorewsamount': False,
'ignoreblanklines': False,
@@ -153,6 +156,13 @@
if not a and not b:
return ""
+
+ if opts.noprefix:
+ aprefix = bprefix = ''
+ else:
+ aprefix = 'a/'
+ bprefix = 'b/'
+
epoch = util.datestr((0, 0))
fn1 = util.pconvert(fn1)
@@ -167,17 +177,17 @@
if a is None:
l1 = '--- /dev/null%s' % datetag(epoch)
else:
- l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
- l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
+ l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
+ l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
l3 = "@@ -0,0 +1,%d @@\n" % len(b)
l = [l1, l2, l3] + ["+" + e for e in b]
elif not b:
a = splitnewlines(a)
- l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
+ l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
if b is None:
l2 = '+++ /dev/null%s' % datetag(epoch)
else:
- l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
+ l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
l3 = "@@ -1,%d +0,0 @@\n" % len(a)
l = [l1, l2, l3] + ["-" + e for e in a]
else:
@@ -187,8 +197,8 @@
if not l:
return ""
- l.insert(0, "--- a/%s%s" % (fn1, datetag(ad, fn1)))
- l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd, fn2)))
+ l.insert(0, "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)))
+ l.insert(1, "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)))
for ln in xrange(len(l)):
if l[ln][-1] != '\n':
--- a/mercurial/merge.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/merge.py Tue Dec 09 13:32:19 2014 -0600
@@ -18,9 +18,9 @@
def _droponode(data):
# used for compatibility for v1
- bits = data.split("\0")
+ bits = data.split('\0')
bits = bits[:-2] + bits[-1:]
- return "\0".join(bits)
+ return '\0'.join(bits)
class mergestate(object):
'''track 3-way merge state of individual files
@@ -45,8 +45,8 @@
O: the node of the "other" part of the merge (hexified version)
F: a file to be merged entry
'''
- statepathv1 = "merge/state"
- statepathv2 = "merge/state2"
+ statepathv1 = 'merge/state'
+ statepathv2 = 'merge/state2'
def __init__(self, repo):
self._repo = repo
@@ -60,7 +60,7 @@
if node:
self._local = node
self._other = other
- shutil.rmtree(self._repo.join("merge"), True)
+ shutil.rmtree(self._repo.join('merge'), True)
self._dirty = False
def _read(self):
@@ -78,8 +78,8 @@
self._local = bin(record)
elif rtype == 'O':
self._other = bin(record)
- elif rtype == "F":
- bits = record.split("\0")
+ elif rtype == 'F':
+ bits = record.split('\0')
self._state[bits[0]] = bits[1:]
elif not rtype.islower():
raise util.Abort(_('unsupported merge state record: %s')
@@ -121,9 +121,9 @@
# if mctx was wrong `mctx[bits[-2]]` may fails.
for idx, r in enumerate(v1records):
if r[0] == 'F':
- bits = r[1].split("\0")
+ bits = r[1].split('\0')
bits.insert(-2, '')
- v1records[idx] = (r[0], "\0".join(bits))
+ v1records[idx] = (r[0], '\0'.join(bits))
return v1records
else:
return v2records
@@ -191,10 +191,10 @@
"""Write current state on disk (if necessary)"""
if self._dirty:
records = []
- records.append(("L", hex(self._local)))
- records.append(("O", hex(self._other)))
+ records.append(('L', hex(self._local)))
+ records.append(('O', hex(self._other)))
for d, v in self._state.iteritems():
- records.append(("F", "\0".join([d] + v)))
+ records.append(('F', '\0'.join([d] + v)))
self._writerecords(records)
self._dirty = False
@@ -205,22 +205,22 @@
def _writerecordsv1(self, records):
"""Write current state on disk in a version 1 file"""
- f = self._repo.opener(self.statepathv1, "w")
+ f = self._repo.opener(self.statepathv1, 'w')
irecords = iter(records)
lrecords = irecords.next()
assert lrecords[0] == 'L'
- f.write(hex(self._local) + "\n")
+ f.write(hex(self._local) + '\n')
for rtype, data in irecords:
- if rtype == "F":
- f.write("%s\n" % _droponode(data))
+ if rtype == 'F':
+ f.write('%s\n' % _droponode(data))
f.close()
def _writerecordsv2(self, records):
"""Write current state on disk in a version 2 file"""
- f = self._repo.opener(self.statepathv2, "w")
+ f = self._repo.opener(self.statepathv2, 'w')
for key, data in records:
assert len(key) == 1
- format = ">sI%is" % len(data)
+ format = '>sI%is' % len(data)
f.write(_pack(format, key, len(data), data))
f.close()
@@ -234,7 +234,7 @@
note: also write the local version to the `.hg/merge` directory.
"""
hash = util.sha1(fcl.path()).hexdigest()
- self._repo.opener.write("merge/" + hash, fcl.data())
+ self._repo.opener.write('merge/' + hash, fcl.data())
self._state[fd] = ['u', hash, fcl.path(),
fca.path(), hex(fca.filenode()),
fco.path(), hex(fco.filenode()),
@@ -284,7 +284,7 @@
elif flags == fla:
flags = flo
# restore local
- f = self._repo.opener("merge/" + hash)
+ f = self._repo.opener('merge/' + hash)
self._repo.wwrite(dfile, f.read(), flags)
f.close()
r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
@@ -303,18 +303,6 @@
and repo.dirstate.normalize(f) not in repo.dirstate
and mctx[f].cmp(wctx[f]))
-def _checkunknown(repo, wctx, mctx):
- "check for collisions between unknown files and files in mctx"
-
- error = False
- for f in mctx:
- if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
- error = True
- wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
- if error:
- raise util.Abort(_("untracked files in working directory differ "
- "from files in requested revision"))
-
def _forgetremoved(wctx, mctx, branchmerge):
"""
Forget removed files
@@ -361,7 +349,6 @@
pmmf.discard(f2)
pmmf.add(f)
for f, args, msg in actions['dg']:
- f2, flags = args
pmmf.add(f)
for f, args, msg in actions['m']:
f1, f2, fa, move, anc = args
@@ -416,7 +403,7 @@
# check whether sub state is modified
for s in sorted(wctx.substate):
if wctx.sub(s).dirty():
- m1['.hgsubstate'] += "+"
+ m1['.hgsubstate'] += '+'
break
aborts = []
@@ -426,101 +413,118 @@
for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
if partial and not partial(f):
continue
- if n1 and n2:
- fa = f
- a = ma.get(f, nullid)
- if a == nullid:
- fa = copy.get(f, f)
- # Note: f as default is wrong - we can't really make a 3-way
- # merge without an ancestor file.
- fla = ma.flags(fa)
- nol = 'l' not in fl1 + fl2 + fla
- if n2 == a and fl2 == fla:
- actions['k'].append((f, (), "keep")) # remote unchanged
- elif n1 == a and fl1 == fla: # local unchanged - use remote
- if n1 == n2: # optimization: keep local content
- actions['e'].append((f, (fl2,), "update permissions"))
+ if n1 and n2: # file exists on both local and remote side
+ if f not in ma:
+ fa = copy.get(f, None)
+ if fa is not None:
+ actions['m'].append((f, (f, f, fa, False, pa.node()),
+ "both renamed from " + fa))
else:
- actions['g'].append((f, (fl2,), "remote is newer"))
- elif nol and n2 == a: # remote only changed 'x'
- actions['e'].append((f, (fl2,), "update permissions"))
- elif nol and n1 == a: # local only changed 'x'
- actions['g'].append((f, (fl1,), "remote is newer"))
- else: # both changed something
- actions['m'].append((f, (f, f, fa, False, pa.node()),
- "versions differ"))
- elif f in copied: # files we'll deal with on m2 side
- pass
- elif n1 and f in movewithdir: # directory rename, move local
- f2 = movewithdir[f]
- actions['dm'].append((f2, (f, fl1),
- "remote directory rename - move from " + f))
- elif n1 and f in copy:
- f2 = copy[f]
- actions['m'].append((f, (f, f2, f2, False, pa.node()),
- "local copied/moved from " + f2))
- elif n1 and f in ma: # clean, a different, no remote
- if n1 != ma[f]:
- if acceptremote:
- actions['r'].append((f, None, "remote delete"))
- else:
- actions['cd'].append((f, None, "prompt changed/deleted"))
- elif n1[20:] == "a": # added, no remote
- actions['f'].append((f, None, "remote deleted"))
+ actions['m'].append((f, (f, f, None, False, pa.node()),
+ "both created"))
else:
- actions['r'].append((f, None, "other deleted"))
- elif n2 and f in movewithdir:
- f2 = movewithdir[f]
- actions['dg'].append((f2, (f, fl2),
- "local directory rename - get from " + f))
- elif n2 and f in copy:
- f2 = copy[f]
- if f2 in m2:
- actions['m'].append((f, (f2, f, f2, False, pa.node()),
- "remote copied from " + f2))
- else:
- actions['m'].append((f, (f2, f, f2, True, pa.node()),
- "remote moved from " + f2))
- elif n2 and f not in ma:
- # local unknown, remote created: the logic is described by the
- # following table:
- #
- # force branchmerge different | action
- # n * n | get
- # n * y | abort
- # y n * | get
- # y y n | get
- # y y y | merge
- #
- # Checking whether the files are different is expensive, so we
- # don't do that when we can avoid it.
- if force and not branchmerge:
- actions['g'].append((f, (fl2,), "remote created"))
- else:
+ a = ma[f]
+ fla = ma.flags(f)
+ nol = 'l' not in fl1 + fl2 + fla
+ if n2 == a and fl2 == fla:
+ actions['k'].append((f, (), "remote unchanged"))
+ elif n1 == a and fl1 == fla: # local unchanged - use remote
+ if n1 == n2: # optimization: keep local content
+ actions['e'].append((f, (fl2,), "update permissions"))
+ else:
+ actions['g'].append((f, (fl2,), "remote is newer"))
+ elif nol and n2 == a: # remote only changed 'x'
+ actions['e'].append((f, (fl2,), "update permissions"))
+ elif nol and n1 == a: # local only changed 'x'
+ actions['g'].append((f, (fl1,), "remote is newer"))
+ else: # both changed something
+ actions['m'].append((f, (f, f, f, False, pa.node()),
+ "versions differ"))
+ elif n1: # file exists only on local side
+ if f in copied:
+ pass # we'll deal with it on m2 side
+ elif f in movewithdir: # directory rename, move local
+ f2 = movewithdir[f]
+ if f2 in m2:
+ actions['m'].append((f2, (f, f2, None, True, pa.node()),
+ "remote directory rename, both created"))
+ else:
+ actions['dm'].append((f2, (f, fl1),
+ "remote directory rename - move from " + f))
+ elif f in copy:
+ f2 = copy[f]
+ actions['m'].append((f, (f, f2, f2, False, pa.node()),
+ "local copied/moved from " + f2))
+ elif f in ma: # clean, a different, no remote
+ if n1 != ma[f]:
+ if acceptremote:
+ actions['r'].append((f, None, "remote delete"))
+ else:
+ actions['cd'].append((f, None,
+ "prompt changed/deleted"))
+ elif n1[20:] == 'a':
+ # This extra 'a' is added by working copy manifest to mark
+ # the file as locally added. We should forget it instead of
+ # deleting it.
+ actions['f'].append((f, None, "remote deleted"))
+ else:
+ actions['r'].append((f, None, "other deleted"))
+ elif n2: # file exists only on remote side
+ if f in copied:
+ pass # we'll deal with it on m1 side
+ elif f in movewithdir:
+ f2 = movewithdir[f]
+ if f2 in m1:
+ actions['m'].append((f2, (f2, f, None, False, pa.node()),
+ "local directory rename, both created"))
+ else:
+ actions['dg'].append((f2, (f, fl2),
+ "local directory rename - get from " + f))
+ elif f in copy:
+ f2 = copy[f]
+ if f2 in m2:
+ actions['m'].append((f, (f2, f, f2, False, pa.node()),
+ "remote copied from " + f2))
+ else:
+ actions['m'].append((f, (f2, f, f2, True, pa.node()),
+ "remote moved from " + f2))
+ elif f not in ma:
+ # local unknown, remote created: the logic is described by the
+ # following table:
+ #
+ # force branchmerge different | action
+ # n * n | get
+ # n * y | abort
+ # y n * | get
+ # y y n | get
+ # y y y | merge
+ #
+ # Checking whether the files are different is expensive, so we
+ # don't do that when we can avoid it.
+ if force and not branchmerge:
+ actions['g'].append((f, (fl2,), "remote created"))
+ else:
+ different = _checkunknownfile(repo, wctx, p2, f)
+ if force and branchmerge and different:
+ actions['m'].append((f, (f, f, None, False, pa.node()),
+ "remote differs from untracked local"))
+ elif not force and different:
+ aborts.append((f, 'ud'))
+ else:
+ actions['g'].append((f, (fl2,), "remote created"))
+ elif n2 != ma[f]:
different = _checkunknownfile(repo, wctx, p2, f)
- if force and branchmerge and different:
- # FIXME: This is wrong - f is not in ma ...
- actions['m'].append((f, (f, f, f, False, pa.node()),
- "remote differs from untracked local"))
- elif not force and different:
- aborts.append((f, "ud"))
+ if not force and different:
+ aborts.append((f, 'ud'))
else:
- actions['g'].append((f, (fl2,), "remote created"))
- elif n2 and n2 != ma[f]:
- different = _checkunknownfile(repo, wctx, p2, f)
- if not force and different:
- aborts.append((f, "ud"))
- else:
- # if different: old untracked f may be overwritten and lost
- if acceptremote:
- actions['g'].append((f, (m2.flags(f),),
- "remote recreating"))
- else:
- actions['dc'].append((f, (m2.flags(f),),
- "prompt deleted/changed"))
+ if acceptremote:
+ actions['g'].append((f, (fl2,), "remote recreating"))
+ else:
+ actions['dc'].append((f, (fl2,),
+ "prompt deleted/changed"))
for f, m in sorted(aborts):
- if m == "ud":
+ if m == 'ud':
repo.ui.warn(_("%s: untracked file differs\n") % f)
else: assert False, m
if aborts:
@@ -537,6 +541,111 @@
return actions
+def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
+ acceptremote, followcopies):
+ "Calculate the actions needed to merge mctx into wctx using ancestors"
+
+ if len(ancestors) == 1: # default
+ actions = manifestmerge(repo, wctx, mctx, ancestors[0],
+ branchmerge, force,
+ partial, acceptremote, followcopies)
+
+ else: # only when merge.preferancestor=* - the default
+ repo.ui.note(
+ _("note: merging %s and %s using bids from ancestors %s\n") %
+ (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
+
+ # Call for bids
+ fbids = {} # mapping filename to bids (action method to list af actions)
+ for ancestor in ancestors:
+ repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
+ actions = manifestmerge(repo, wctx, mctx, ancestor,
+ branchmerge, force,
+ partial, acceptremote, followcopies)
+ for m, l in sorted(actions.items()):
+ for a in l:
+ f, args, msg = a
+ repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
+ if f in fbids:
+ d = fbids[f]
+ if m in d:
+ d[m].append(a)
+ else:
+ d[m] = [a]
+ else:
+ fbids[f] = {m: [a]}
+
+ # Pick the best bid for each file
+ repo.ui.note(_('\nauction for merging merge bids\n'))
+ actions = dict((m, []) for m in actions.keys())
+ for f, bids in sorted(fbids.items()):
+ # bids is a mapping from action method to list af actions
+ # Consensus?
+ if len(bids) == 1: # all bids are the same kind of method
+ m, l = bids.items()[0]
+ if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
+ repo.ui.note(" %s: consensus for %s\n" % (f, m))
+ actions[m].append(l[0])
+ continue
+ # If keep is an option, just do it.
+ if 'k' in bids:
+ repo.ui.note(" %s: picking 'keep' action\n" % f)
+ actions['k'].append(bids['k'][0])
+ continue
+ # If there are gets and they all agree [how could they not?], do it.
+ if 'g' in bids:
+ ga0 = bids['g'][0]
+ if util.all(a == ga0 for a in bids['g'][1:]):
+ repo.ui.note(" %s: picking 'get' action\n" % f)
+ actions['g'].append(ga0)
+ continue
+ # TODO: Consider other simple actions such as mode changes
+ # Handle inefficient democrazy.
+ repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
+ for m, l in sorted(bids.items()):
+ for _f, args, msg in l:
+ repo.ui.note(' %s -> %s\n' % (msg, m))
+ # Pick random action. TODO: Instead, prompt user when resolving
+ m, l = bids.items()[0]
+ repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
+ (f, m))
+ actions[m].append(l[0])
+ continue
+ repo.ui.note(_('end of auction\n\n'))
+
+ # Prompt and create actions. TODO: Move this towards resolve phase.
+ for f, args, msg in sorted(actions['cd']):
+ if f in ancestors[0] and not wctx[f].cmp(ancestors[0][f]):
+ # local did change but ended up with same content
+ actions['r'].append((f, None, "prompt same"))
+ elif repo.ui.promptchoice(
+ _("local changed %s which remote deleted\n"
+ "use (c)hanged version or (d)elete?"
+ "$$ &Changed $$ &Delete") % f, 0):
+ actions['r'].append((f, None, "prompt delete"))
+ else:
+ actions['a'].append((f, None, "prompt keep"))
+ del actions['cd'][:]
+
+ for f, args, msg in sorted(actions['dc']):
+ flags, = args
+ if f in ancestors[0] and not mctx[f].cmp(ancestors[0][f]):
+ # remote did change but ended up with same content
+ pass # don't get = keep local deleted
+ elif repo.ui.promptchoice(
+ _("remote changed %s which local deleted\n"
+ "use (c)hanged version or leave (d)eleted?"
+ "$$ &Changed $$ &Deleted") % f, 0) == 0:
+ actions['g'].append((f, (flags,), "prompt recreating"))
+ del actions['dc'][:]
+
+ if wctx.rev() is None:
+ ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
+ actions['r'].extend(ractions)
+ actions['f'].extend(factions)
+
+ return actions
+
def batchremove(repo, actions):
"""apply removes to the working directory
@@ -678,7 +787,6 @@
repo.ui.debug(" %s: %s -> m\n" % (f, msg))
z += 1
progress(_updating, z, item=f, total=numupdates, unit=_files)
- f1, f2, fa, move, anc = args
if f == '.hgsubstate': # subrepo states need updating
subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
overwrite)
@@ -752,111 +860,6 @@
return updated, merged, removed, unresolved
-def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
- acceptremote, followcopies):
- "Calculate the actions needed to merge mctx into wctx using ancestors"
-
- if len(ancestors) == 1: # default
- actions = manifestmerge(repo, wctx, mctx, ancestors[0],
- branchmerge, force,
- partial, acceptremote, followcopies)
-
- else: # only when merge.preferancestor=* - the default
- repo.ui.note(
- _("note: merging %s and %s using bids from ancestors %s\n") %
- (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
-
- # Call for bids
- fbids = {} # mapping filename to bids (action method to list af actions)
- for ancestor in ancestors:
- repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
- actions = manifestmerge(repo, wctx, mctx, ancestor,
- branchmerge, force,
- partial, acceptremote, followcopies)
- for m, l in sorted(actions.items()):
- for a in l:
- f, args, msg = a
- repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
- if f in fbids:
- d = fbids[f]
- if m in d:
- d[m].append(a)
- else:
- d[m] = [a]
- else:
- fbids[f] = {m: [a]}
-
- # Pick the best bid for each file
- repo.ui.note(_('\nauction for merging merge bids\n'))
- actions = dict((m, []) for m in actions.keys())
- for f, bids in sorted(fbids.items()):
- # bids is a mapping from action method to list af actions
- # Consensus?
- if len(bids) == 1: # all bids are the same kind of method
- m, l = bids.items()[0]
- if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
- repo.ui.note(" %s: consensus for %s\n" % (f, m))
- actions[m].append(l[0])
- continue
- # If keep is an option, just do it.
- if "k" in bids:
- repo.ui.note(" %s: picking 'keep' action\n" % f)
- actions['k'].append(bids["k"][0])
- continue
- # If there are gets and they all agree [how could they not?], do it.
- if "g" in bids:
- ga0 = bids["g"][0]
- if util.all(a == ga0 for a in bids["g"][1:]):
- repo.ui.note(" %s: picking 'get' action\n" % f)
- actions['g'].append(ga0)
- continue
- # TODO: Consider other simple actions such as mode changes
- # Handle inefficient democrazy.
- repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
- for m, l in sorted(bids.items()):
- for _f, args, msg in l:
- repo.ui.note(' %s -> %s\n' % (msg, m))
- # Pick random action. TODO: Instead, prompt user when resolving
- m, l = bids.items()[0]
- repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
- (f, m))
- actions[m].append(l[0])
- continue
- repo.ui.note(_('end of auction\n\n'))
-
- # Prompt and create actions. TODO: Move this towards resolve phase.
- for f, args, msg in actions['cd']:
- if f in ancestors[0] and not wctx[f].cmp(ancestors[0][f]):
- # local did change but ended up with same content
- actions['r'].append((f, None, "prompt same"))
- elif repo.ui.promptchoice(
- _("local changed %s which remote deleted\n"
- "use (c)hanged version or (d)elete?"
- "$$ &Changed $$ &Delete") % f, 0):
- actions['r'].append((f, None, "prompt delete"))
- else:
- actions['a'].append((f, None, "prompt keep"))
- del actions['cd'][:]
-
- for f, args, msg in actions['dc']:
- flags, = args
- if f in ancestors[0] and not mctx[f].cmp(ancestors[0][f]):
- # remote did change but ended up with same content
- pass # don't get = keep local deleted
- elif repo.ui.promptchoice(
- _("remote changed %s which local deleted\n"
- "use (c)hanged version or leave (d)eleted?"
- "$$ &Changed $$ &Deleted") % f, 0) == 0:
- actions['g'].append((f, (flags,), "prompt recreating"))
- del actions['dc'][:]
-
- if wctx.rev() is None:
- ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
- actions['r'].extend(ractions)
- actions['f'].extend(factions)
-
- return actions
-
def recordupdates(repo, actions, branchmerge):
"record merge actions to the dirstate"
# remove (must come first)
@@ -918,9 +921,6 @@
# directory rename, move local
for f, args, msg in actions['dm']:
f0, flag = args
- if f0 not in repo.dirstate:
- # untracked file moved
- continue
if branchmerge:
repo.dirstate.add(f)
repo.dirstate.remove(f0)
@@ -990,7 +990,7 @@
pl = wc.parents()
p1 = pl[0]
pas = [None]
- if ancestor:
+ if ancestor is not None:
pas = [repo[ancestor]]
if node is None:
@@ -1000,8 +1000,8 @@
try:
node = repo.branchtip(wc.branch())
except errormod.RepoLookupError:
- if wc.branch() == "default": # no default branch!
- node = repo.lookup("tip") # update to tip
+ if wc.branch() == 'default': # no default branch!
+ node = repo.lookup('tip') # update to tip
else:
raise util.Abort(_("branch %s not found") % wc.branch())
@@ -1029,14 +1029,14 @@
# get the max revision for the given successors set,
# i.e. the 'tip' of a set
- node = repo.revs("max(%ln)", successors).first()
+ node = repo.revs('max(%ln)', successors).first()
pas = [p1]
overwrite = force and not branchmerge
p2 = repo[node]
if pas[0] is None:
- if repo.ui.config("merge", "preferancestor", '*') == '*':
+ if repo.ui.config('merge', 'preferancestor', '*') == '*':
cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
else:
@@ -1104,7 +1104,7 @@
pas = [wc.p1()]
elif not branchmerge and not wc.dirty(missing=True):
pass
- elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
+ elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
followcopies = True
### calculate phase
--- a/mercurial/obsolete.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/obsolete.py Tue Dec 09 13:32:19 2014 -0600
@@ -74,6 +74,7 @@
_pack = struct.pack
_unpack = struct.unpack
+_calcsize = struct.calcsize
_SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
@@ -142,8 +143,8 @@
_fm0version = 0
_fm0fixed = '>BIB20s'
_fm0node = '20s'
-_fm0fsize = struct.calcsize(_fm0fixed)
-_fm0fnodesize = struct.calcsize(_fm0node)
+_fm0fsize = _calcsize(_fm0fixed)
+_fm0fnodesize = _calcsize(_fm0node)
def _fm0readmarkers(data, off=0):
# Loop on markers
@@ -275,12 +276,14 @@
_fm1fixed = '>IdhHBBB20s'
_fm1nodesha1 = '20s'
_fm1nodesha256 = '32s'
-_fm1fsize = struct.calcsize(_fm1fixed)
+_fm1nodesha1size = _calcsize(_fm1nodesha1)
+_fm1nodesha256size = _calcsize(_fm1nodesha256)
+_fm1fsize = _calcsize(_fm1fixed)
_fm1parentnone = 3
_fm1parentshift = 14
_fm1parentmask = (_fm1parentnone << _fm1parentshift)
_fm1metapair = 'BB'
-_fm1metapairsize = struct.calcsize('BB')
+_fm1metapairsize = _calcsize('BB')
def _fm1readmarkers(data, off=0):
# Loop on markers
@@ -297,9 +300,10 @@
# build the date tuple (upgrade tz minutes to seconds)
date = (seconds, tz * 60)
_fm1node = _fm1nodesha1
+ fnodesize = _fm1nodesha1size
if flags & usingsha256:
_fm1node = _fm1nodesha256
- fnodesize = struct.calcsize(_fm1node)
+ fnodesize = _fm1nodesha256size
# read replacement
sucs = ()
if numsuc:
@@ -358,7 +362,7 @@
data.extend(sucs)
if parents is not None:
data.extend(parents)
- totalsize = struct.calcsize(format)
+ totalsize = _calcsize(format)
for key, value in metadata:
lk = len(key)
lv = len(value)
@@ -377,6 +381,7 @@
formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
_fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
+@util.nogc
def _readmarkers(data):
"""Read and enumerate markers from raw data"""
off = 0
@@ -562,6 +567,7 @@
version, markers = _readmarkers(data)
return self.add(transaction, markers)
+ @util.nogc
def _load(self, markers):
for mark in markers:
self._all.append(mark)
--- a/mercurial/parsers.c Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/parsers.c Tue Dec 09 13:32:19 2014 -0600
@@ -1978,6 +1978,9 @@
PyErr_SetString(PyExc_ValueError, "rev out of range");
return -1;
}
+
+ if (nt_init(self) == -1)
+ return -1;
return nt_insert(self, node, (int)rev);
}
--- a/mercurial/patch.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/patch.py Tue Dec 09 13:32:19 2014 -0600
@@ -1558,20 +1558,53 @@
class GitDiffRequired(Exception):
pass
-def diffopts(ui, opts=None, untrusted=False, section='diff'):
- def get(key, name=None, getter=ui.configbool):
- return ((opts and opts.get(key)) or
- getter(section, name or key, None, untrusted=untrusted))
- return mdiff.diffopts(
- text=opts and opts.get('text'),
- git=get('git'),
- nodates=get('nodates'),
- nobinary=get('nobinary'),
- showfunc=get('show_function', 'showfunc'),
- ignorews=get('ignore_all_space', 'ignorews'),
- ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
- ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
- context=get('unified', getter=ui.config))
+def diffallopts(ui, opts=None, untrusted=False, section='diff'):
+ '''return diffopts with all features supported and parsed'''
+ return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
+ git=True, whitespace=True, formatchanging=True)
+
+diffopts = diffallopts
+
+def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
+ whitespace=False, formatchanging=False):
+ '''return diffopts with only opted-in features parsed
+
+ Features:
+ - git: git-style diffs
+ - whitespace: whitespace options like ignoreblanklines and ignorews
+ - formatchanging: options that will likely break or cause correctness issues
+ with most diff parsers
+ '''
+ def get(key, name=None, getter=ui.configbool, forceplain=None):
+ if opts:
+ v = opts.get(key)
+ if v:
+ return v
+ if forceplain is not None and ui.plain():
+ return forceplain
+ return getter(section, name or key, None, untrusted=untrusted)
+
+ # core options, expected to be understood by every diff parser
+ buildopts = {
+ 'nodates': get('nodates'),
+ 'showfunc': get('show_function', 'showfunc'),
+ 'context': get('unified', getter=ui.config),
+ }
+
+ if git:
+ buildopts['git'] = get('git')
+ if whitespace:
+ buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
+ buildopts['ignorewsamount'] = get('ignore_space_change',
+ 'ignorewsamount')
+ buildopts['ignoreblanklines'] = get('ignore_blank_lines',
+ 'ignoreblanklines')
+ if formatchanging:
+ buildopts['text'] = opts and opts.get('text')
+ buildopts['nobinary'] = get('nobinary')
+ buildopts['noprefix'] = get('noprefix', forceplain=False)
+
+ return mdiff.diffopts(**buildopts)
def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
losedatafn=None, prefix=''):
@@ -1731,9 +1764,15 @@
s.update(text)
return s.hexdigest()
+ if opts.noprefix:
+ aprefix = bprefix = ''
+ else:
+ aprefix = 'a/'
+ bprefix = 'b/'
+
def diffline(a, b, revs):
if opts.git:
- line = 'diff --git a/%s b/%s\n' % (a, b)
+ line = 'diff --git %s%s %s%s\n' % (aprefix, a, bprefix, b)
elif not repo.ui.quiet:
if revs:
revinfo = ' '.join(["-r %s" % rev for rev in revs])
--- a/mercurial/pathutil.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/pathutil.py Tue Dec 09 13:32:19 2014 -0600
@@ -146,7 +146,7 @@
def normasprefix(path):
'''normalize the specified path as path prefix
- Returned vaule can be used safely for "p.startswith(prefix)",
+ Returned value can be used safely for "p.startswith(prefix)",
"p[len(prefix):]", and so on.
For efficiency, this expects "path" argument to be already
--- a/mercurial/phases.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/phases.py Tue Dec 09 13:32:19 2014 -0600
@@ -100,6 +100,7 @@
"""
+import os
import errno
from node import nullid, nullrev, bin, hex, short
from i18n import _
@@ -124,7 +125,15 @@
dirty = False
roots = [set() for i in allphases]
try:
- f = repo.sopener('phaseroots')
+ f = None
+ if 'HG_PENDING' in os.environ:
+ try:
+ f = repo.svfs('phaseroots.pending')
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ if f is None:
+ f = repo.sopener('phaseroots')
try:
for line in f:
phase, nh = line.split()
--- a/mercurial/repoview.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/repoview.py Tue Dec 09 13:32:19 2014 -0600
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+import types
import copy
import error
import phases
@@ -72,6 +73,13 @@
h.update(str(hash(frozenset(hideable))))
return h.digest()
+def _writehiddencache(cachefile, cachehash, hidden):
+ """write hidden data to a cache file"""
+ data = struct.pack('>%ii' % len(hidden), *sorted(hidden))
+ cachefile.write(struct.pack(">H", cacheversion))
+ cachefile.write(cachehash)
+ cachefile.write(data)
+
def trywritehiddencache(repo, hideable, hidden):
"""write cache of hidden changesets to disk
@@ -87,12 +95,8 @@
wlock = repo.wlock(wait=False)
# write cache to file
newhash = cachehash(repo, hideable)
- sortedset = sorted(hidden)
- data = struct.pack('>%ii' % len(sortedset), *sortedset)
fh = repo.vfs.open(cachefile, 'w+b', atomictemp=True)
- fh.write(struct.pack(">H", cacheversion))
- fh.write(newhash)
- fh.write(data)
+ _writehiddencache(fh, newhash, hidden)
except (IOError, OSError):
repo.ui.debug('error writing hidden changesets cache')
except error.LockHeld:
@@ -307,6 +311,10 @@
return getattr(self._unfilteredrepo, attr)
def __setattr__(self, attr, value):
+ # Allow method replacement on filtered repos, like status() in
+ # largefiles' purge override
+ if type(value) == types.FunctionType:
+ object.__setattr__(self, attr, value)
return setattr(self._unfilteredrepo, attr, value)
def __delattr__(self, attr):
--- a/mercurial/revlog.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/revlog.py Tue Dec 09 13:32:19 2014 -0600
@@ -204,6 +204,7 @@
self._basecache = None
self._chunkcache = (0, '')
self._chunkcachesize = 65536
+ self._maxchainlen = None
self.index = []
self._pcache = {}
self._nodecache = {nullid: nullrev}
@@ -219,6 +220,8 @@
v = 0
if 'chunkcachesize' in opts:
self._chunkcachesize = opts['chunkcachesize']
+ if 'maxchainlen' in opts:
+ self._maxchainlen = opts['maxchainlen']
if self._chunkcachesize <= 0:
raise RevlogError(_('revlog chunk cache size %r is not greater '
@@ -267,6 +270,8 @@
self.nodemap = self._nodecache = nodemap
if not self._chunkcache:
self._chunkclear()
+ # revnum -> (chain-length, sum-delta-length)
+ self._chaininfocache = {}
def tip(self):
return self.node(len(self.index) - 2)
@@ -350,6 +355,40 @@
rev = base
base = index[rev][3]
return base
+ def chainlen(self, rev):
+ return self._chaininfo(rev)[0]
+
+ def _chaininfo(self, rev):
+ chaininfocache = self._chaininfocache
+ if rev in chaininfocache:
+ return chaininfocache[rev]
+ index = self.index
+ generaldelta = self._generaldelta
+ iterrev = rev
+ e = index[iterrev]
+ clen = 0
+ compresseddeltalen = 0
+ while iterrev != e[3]:
+ clen += 1
+ compresseddeltalen += e[1]
+ if generaldelta:
+ iterrev = e[3]
+ else:
+ iterrev -= 1
+ if iterrev in chaininfocache:
+ t = chaininfocache[iterrev]
+ clen += t[0]
+ compresseddeltalen += t[1]
+ break
+ e = index[iterrev]
+ else:
+ # Add text length of base since decompressing that also takes
+ # work. For cache hits the length is already included.
+ compresseddeltalen += e[1]
+ r = (clen, compresseddeltalen)
+ chaininfocache[rev] = r
+ return r
+
def flags(self, rev):
return self.index[rev][0] & 0xFFFF
def rawsize(self, rev):
@@ -368,7 +407,7 @@
See the documentation for ancestor.lazyancestors for more details."""
- return ancestor.lazyancestors(self, revs, stoprev=stoprev,
+ return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
inclusive=inclusive)
def descendants(self, revs):
@@ -456,6 +495,20 @@
missing.sort()
return has, [self.node(r) for r in missing]
+ def incrementalmissingrevs(self, common=None):
+ """Return an object that can be used to incrementally compute the
+ revision numbers of the ancestors of arbitrary sets that are not
+ ancestors of common. This is an ancestor.incrementalmissingancestors
+ object.
+
+ 'common' is a list of revision numbers. If common is not supplied, uses
+ nullrev.
+ """
+ if common is None:
+ common = [nullrev]
+
+ return ancestor.incrementalmissingancestors(self.parentrevs, common)
+
def findmissingrevs(self, common=None, heads=None):
"""Return the revision numbers of the ancestors of heads that
are not ancestors of common.
@@ -477,7 +530,8 @@
if heads is None:
heads = self.headrevs()
- return ancestor.missingancestors(heads, common, self.parentrevs)
+ inc = self.incrementalmissingrevs(common=common)
+ return inc.missingancestors(heads)
def findmissing(self, common=None, heads=None):
"""Return the ancestors of heads that are not ancestors of common.
@@ -502,8 +556,8 @@
common = [self.rev(n) for n in common]
heads = [self.rev(n) for n in heads]
- return [self.node(r) for r in
- ancestor.missingancestors(heads, common, self.parentrevs)]
+ inc = self.incrementalmissingrevs(common=common)
+ return [self.node(r) for r in inc.missingancestors(heads)]
def nodesbetween(self, roots=None, heads=None):
"""Return a topological path from 'roots' to 'heads'.
@@ -1202,11 +1256,15 @@
base = rev
else:
base = chainbase
- return dist, l, data, base, chainbase
+ chainlen, compresseddeltalen = self._chaininfo(rev)
+ chainlen += 1
+ compresseddeltalen += l
+ return dist, l, data, base, chainbase, chainlen, compresseddeltalen
curr = len(self)
prev = curr - 1
base = chainbase = curr
+ chainlen = None
offset = self.end(prev)
flags = 0
d = None
@@ -1226,7 +1284,7 @@
d = builddelta(prev)
else:
d = builddelta(prev)
- dist, l, data, base, chainbase = d
+ dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
# full versions are inserted when the needed deltas
# become comparable to the uncompressed text
@@ -1235,7 +1293,14 @@
cachedelta[1])
else:
textlen = len(text)
- if d is None or dist > textlen * 2:
+
+ # - 'dist' is the distance from the base revision -- bounding it limits
+ # the amount of I/O we need to do.
+ # - 'compresseddeltalen' is the sum of the total size of deltas we need
+ # to apply -- bounding it limits the amount of CPU we consume.
+ if (d is None or dist > textlen * 4 or l > textlen or
+ compresseddeltalen > textlen * 2 or
+ (self._maxchainlen and chainlen > self._maxchainlen)):
text = buildtext()
data = self.compress(text)
l = len(data[1]) + len(data[0])
@@ -1419,6 +1484,7 @@
# then reset internal state in memory to forget those revisions
self._cache = None
+ self._chaininfocache = {}
self._chunkclear()
for x in xrange(rev, len(self)):
del self.nodemap[self.node(x)]
--- a/mercurial/revset.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/revset.py Tue Dec 09 13:32:19 2014 -0600
@@ -10,7 +10,6 @@
import node
import heapq
import match as matchmod
-import ancestor as ancestormod
from i18n import _
import encoding
import obsolete as obsmod
@@ -265,9 +264,8 @@
return stringset(repo, subset, x)
def rangeset(repo, subset, x, y):
- cl = baseset(repo.changelog)
- m = getset(repo, cl, x)
- n = getset(repo, cl, y)
+ m = getset(repo, fullreposet(repo), x)
+ n = getset(repo, fullreposet(repo), y)
if not m or not n:
return baseset()
@@ -371,7 +369,7 @@
raise error.ParseError(_("~ expects a number"))
ps = set()
cl = repo.changelog
- for r in getset(repo, baseset(cl), x):
+ for r in getset(repo, fullreposet(repo), x):
for i in range(n):
r = cl.parentrevs(r)[0]
ps.add(r)
@@ -386,30 +384,6 @@
kind, pattern, matcher = _substringmatcher(n)
return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
-def only(repo, subset, x):
- """``only(set, [set])``
- Changesets that are ancestors of the first set that are not ancestors
- of any other head in the repo. If a second set is specified, the result
- is ancestors of the first set that are not ancestors of the second set
- (i.e. ::<set1> - ::<set2>).
- """
- cl = repo.changelog
- # i18n: "only" is a keyword
- args = getargs(x, 1, 2, _('only takes one or two arguments'))
- include = getset(repo, spanset(repo), args[0])
- if len(args) == 1:
- if not include:
- return baseset()
-
- descendants = set(_revdescendants(repo, include, False))
- exclude = [rev for rev in cl.headrevs()
- if not rev in descendants and not rev in include]
- else:
- exclude = getset(repo, spanset(repo), args[1])
-
- results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
- return subset & results
-
def bisect(repo, subset, x):
"""``bisect(string)``
Changesets marked in the specified bisect status:
@@ -573,7 +547,7 @@
"""``children(set)``
Child changesets of changesets in set.
"""
- s = getset(repo, baseset(repo), x)
+ s = getset(repo, fullreposet(repo), x)
cs = _children(repo, subset, s)
return subset & cs
@@ -1140,6 +1114,30 @@
obsoletes = obsmod.getrevs(repo, 'obsolete')
return subset & obsoletes
+def only(repo, subset, x):
+ """``only(set, [set])``
+ Changesets that are ancestors of the first set that are not ancestors
+ of any other head in the repo. If a second set is specified, the result
+ is ancestors of the first set that are not ancestors of the second set
+ (i.e. ::<set1> - ::<set2>).
+ """
+ cl = repo.changelog
+ # i18n: "only" is a keyword
+ args = getargs(x, 1, 2, _('only takes one or two arguments'))
+ include = getset(repo, spanset(repo), args[0])
+ if len(args) == 1:
+ if not include:
+ return baseset()
+
+ descendants = set(_revdescendants(repo, include, False))
+ exclude = [rev for rev in cl.headrevs()
+ if not rev in descendants and not rev in include]
+ else:
+ exclude = getset(repo, spanset(repo), args[1])
+
+ results = set(cl.findmissingrevs(common=exclude, heads=include))
+ return subset & results
+
def origin(repo, subset, x):
"""``origin([set])``
Changesets that were specified as a source for the grafts, transplants or
@@ -1258,7 +1256,7 @@
raise error.ParseError(_("^ expects a number 0, 1, or 2"))
ps = set()
cl = repo.changelog
- for r in getset(repo, baseset(cl), x):
+ for r in getset(repo, fullreposet(repo), x):
if n == 0:
ps.add(r)
elif n == 1:
@@ -1384,7 +1382,7 @@
# i18n: "matching" is a keyword
l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
- revs = getset(repo, baseset(repo.changelog), l[0])
+ revs = getset(repo, fullreposet(repo), l[0])
fieldlist = ['metadata']
if len(l) > 1:
@@ -1689,7 +1687,6 @@
"ancestors": ancestors,
"_firstancestors": _firstancestors,
"author": author,
- "only": only,
"bisect": bisect,
"bisected": bisected,
"bookmark": bookmark,
@@ -1729,6 +1726,7 @@
"min": minrev,
"modifies": modifies,
"obsolete": obsolete,
+ "only": only,
"origin": origin,
"outgoing": outgoing,
"p1": p1,
@@ -1800,6 +1798,7 @@
"min",
"modifies",
"obsolete",
+ "only",
"origin",
"outgoing",
"p1",
@@ -2551,7 +2550,7 @@
return it()
def _trysetasclist(self):
- """populate the _asclist attribut if possible and necessary"""
+ """populate the _asclist attribute if possible and necessary"""
if self._genlist is not None and self._asclist is None:
self._asclist = sorted(self._genlist)
@@ -2744,7 +2743,7 @@
# We have to use this complex iteration strategy to allow multiple
# iterations at the same time. We need to be able to catch revision
- # removed from `consumegen` and added to genlist in another instance.
+ # removed from _consumegen and added to genlist in another instance.
#
# Getting rid of it would provide an about 15% speed up on this
# iteration.
@@ -2939,17 +2938,15 @@
class fullreposet(_spanset):
"""a set containing all revisions in the repo
- This class exists to host special optimisation.
+ This class exists to host special optimization.
"""
def __init__(self, repo):
super(fullreposet, self).__init__(repo)
def __and__(self, other):
- """fullrepo & other -> other
-
- As self contains the whole repo, all of the other set should also be in
- self. Therefor `self & other = other`.
+ """As self contains the whole repo, all of the other set should also be
+ in self. Therefore `self & other = other`.
This boldly assumes the other contains valid revs only.
"""
--- a/mercurial/scmutil.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/scmutil.py Tue Dec 09 13:32:19 2014 -0600
@@ -188,9 +188,25 @@
raise
return ""
- def open(self, path, mode="r", text=False, atomictemp=False):
+ def tryreadlines(self, path, mode='rb'):
+ '''gracefully return an empty array for missing files'''
+ try:
+ return self.readlines(path, mode=mode)
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ return []
+
+ def open(self, path, mode="r", text=False, atomictemp=False,
+ notindexed=False):
+ '''Open ``path`` file, which is relative to vfs root.
+
+ Newly created directories are marked as "not to be indexed by
+ the content indexing service", if ``notindexed`` is specified
+ for "write" mode access.
+ '''
self.open = self.__call__
- return self.__call__(path, mode, text, atomictemp)
+ return self.__call__(path, mode, text, atomictemp, notindexed)
def read(self, path):
fp = self(path, 'rb')
@@ -199,6 +215,13 @@
finally:
fp.close()
+ def readlines(self, path, mode='rb'):
+ fp = self(path, mode=mode)
+ try:
+ return fp.readlines()
+ finally:
+ fp.close()
+
def write(self, path, data):
fp = self(path, 'wb')
try:
@@ -206,6 +229,13 @@
finally:
fp.close()
+ def writelines(self, path, data, mode='wb', notindexed=False):
+ fp = self(path, mode=mode, notindexed=notindexed)
+ try:
+ return fp.writelines(data)
+ finally:
+ fp.close()
+
def append(self, path, data):
fp = self(path, 'ab')
try:
@@ -329,7 +359,14 @@
return
os.chmod(name, self.createmode & 0666)
- def __call__(self, path, mode="r", text=False, atomictemp=False):
+ def __call__(self, path, mode="r", text=False, atomictemp=False,
+ notindexed=False):
+ '''Open ``path`` file, which is relative to vfs root.
+
+ Newly created directories are marked as "not to be indexed by
+ the content indexing service", if ``notindexed`` is specified
+ for "write" mode access.
+ '''
if self._audit:
r = util.checkosfilename(path)
if r:
@@ -347,7 +384,7 @@
# to a directory. Let the posixfile() call below raise IOError.
if basename:
if atomictemp:
- util.ensuredirs(dirname, self.createmode)
+ util.ensuredirs(dirname, self.createmode, notindexed)
return util.atomictempfile(f, mode, self.createmode)
try:
if 'w' in mode:
@@ -365,7 +402,7 @@
if e.errno != errno.ENOENT:
raise
nlink = 0
- util.ensuredirs(dirname, self.createmode)
+ util.ensuredirs(dirname, self.createmode, notindexed)
if nlink > 0:
if self._trustnlink is None:
self._trustnlink = nlink > 1 or util.checknlink(f)
@@ -495,7 +532,13 @@
def osrcpath():
'''return default os-specific hgrc search path'''
- path = systemrcpath()
+ path = []
+ defaultpath = os.path.join(util.datapath, 'default.d')
+ if os.path.isdir(defaultpath):
+ for f, kind in osutil.listdir(defaultpath):
+ if f.endswith('.rc'):
+ path.append(os.path.join(defaultpath, f))
+ path.extend(systemrcpath())
path.extend(userrcpath())
path = [os.path.normpath(f) for f in path]
return path
@@ -680,25 +723,24 @@
rejected = []
m.bad = lambda x, y: rejected.append(x)
- added, unknown, deleted, removed = _interestingfiles(repo, m)
+ added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
- unknownset = set(unknown)
+ unknownset = set(unknown + forgotten)
toprint = unknownset.copy()
toprint.update(deleted)
for abs in sorted(toprint):
if repo.ui.verbose or not m.exact(abs):
- rel = m.rel(abs)
if abs in unknownset:
- status = _('adding %s\n') % ((pats and rel) or abs)
+ status = _('adding %s\n') % m.uipath(abs)
else:
- status = _('removing %s\n') % ((pats and rel) or abs)
+ status = _('removing %s\n') % m.uipath(abs)
repo.ui.status(status)
renames = _findrenames(repo, m, added + unknown, removed + deleted,
similarity)
if not dry_run:
- _markchanges(repo, unknown, deleted, renames)
+ _markchanges(repo, unknown + forgotten, deleted, renames)
for f in rejected:
if f in m.files():
@@ -712,10 +754,10 @@
rejected = []
m.bad = lambda x, y: rejected.append(x)
- added, unknown, deleted, removed = _interestingfiles(repo, m)
+ added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
if repo.ui.verbose:
- unknownset = set(unknown)
+ unknownset = set(unknown + forgotten)
toprint = unknownset.copy()
toprint.update(deleted)
for abs in sorted(toprint):
@@ -728,7 +770,7 @@
renames = _findrenames(repo, m, added + unknown, removed + deleted,
similarity)
- _markchanges(repo, unknown, deleted, renames)
+ _markchanges(repo, unknown + forgotten, deleted, renames)
for f in rejected:
if f in m.files():
@@ -741,7 +783,7 @@
This is different from dirstate.status because it doesn't care about
whether files are modified or clean.'''
- added, unknown, deleted, removed = [], [], [], []
+ added, unknown, deleted, removed, forgotten = [], [], [], [], []
audit_path = pathutil.pathauditor(repo.root)
ctx = repo[None]
@@ -754,13 +796,15 @@
unknown.append(abs)
elif dstate != 'r' and not st:
deleted.append(abs)
+ elif dstate == 'r' and st:
+ forgotten.append(abs)
# for finding renames
- elif dstate == 'r':
+ elif dstate == 'r' and not st:
removed.append(abs)
elif dstate == 'a':
added.append(abs)
- return added, unknown, deleted, removed
+ return added, unknown, deleted, removed, forgotten
def _findrenames(repo, matcher, added, removed, similarity):
'''Find renames from removed files to added ones.'''
--- a/mercurial/setdiscovery.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/setdiscovery.py Tue Dec 09 13:32:19 2014 -0600
@@ -40,7 +40,7 @@
classified with it (since all ancestors or descendants will be marked as well).
"""
-from node import nullid
+from node import nullid, nullrev
from i18n import _
import random
import util, dagutil
@@ -177,27 +177,23 @@
# own nodes where I don't know if remote knows them
undecided = dag.nodeset()
# own nodes I know we both know
- common = set()
+ # treat remote heads (and maybe own heads) as a first implicit sample
+ # response
+ common = cl.incrementalmissingrevs(srvheads)
+ commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
+ common.addbases(commoninsample)
+ undecided = set(common.missingancestors(ownheads))
# own nodes I know remote lacks
missing = set()
- # treat remote heads (and maybe own heads) as a first implicit sample
- # response
- common.update(dag.ancestorset(srvheads))
- undecided.difference_update(common)
-
full = False
while undecided:
if sample:
- commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
- common.update(dag.ancestorset(commoninsample, common))
-
missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
missing.update(dag.descendantset(missinginsample, missing))
undecided.difference_update(missing)
- undecided.difference_update(common)
if not undecided:
break
@@ -206,7 +202,7 @@
ui.note(_("sampling from both directions\n"))
sample = _takefullsample(dag, undecided, size=fullsamplesize)
targetsize = fullsamplesize
- elif common:
+ elif common.hasbases():
# use cheapish initial sample
ui.debug("taking initial sample\n")
sample = _takefullsample(dag, undecided, size=fullsamplesize)
@@ -228,7 +224,17 @@
yesno = remote.known(dag.externalizeall(sample))
full = True
- result = dag.headsetofconnecteds(common)
+ if sample:
+ commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
+ common.addbases(commoninsample)
+ common.removeancestorsfrom(undecided)
+
+ # heads(common) == heads(common.bases) since common represents common.bases
+ # and all its ancestors
+ result = dag.headsetofconnecteds(common.bases)
+ # common.bases can include nullrev, but our contract requires us to not
+ # return any heads in that case, so discard that
+ result.discard(nullrev)
ui.progress(_('searching'), None)
ui.debug("%d total queries\n" % roundtrips)
--- a/mercurial/sshpeer.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/sshpeer.py Tue Dec 09 13:32:19 2014 -0600
@@ -52,7 +52,7 @@
util.shellquote("%s init %s" %
(_serverquote(remotecmd), _serverquote(self.path))))
ui.debug('running %s\n' % cmd)
- res = util.system(cmd, out=ui.fout)
+ res = ui.system(cmd)
if res != 0:
self._abort(error.RepoError(_("could not create remote repo")))
--- a/mercurial/subrepo.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/subrepo.py Tue Dec 09 13:32:19 2014 -0600
@@ -32,16 +32,6 @@
'''get a unique filename for the store hash cache of a remote repository'''
return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
-def _calcfilehash(filename):
- data = ''
- if os.path.exists(filename):
- fd = open(filename, 'rb')
- try:
- data = fd.read()
- finally:
- fd.close()
- return util.sha1(data).hexdigest()
-
class SubrepoAbort(error.Abort):
"""Exception class used to avoid handling a subrepo error more than once"""
def __init__(self, *args, **kw):
@@ -501,6 +491,13 @@
def forget(self, ui, match, prefix):
return ([], [])
+ def removefiles(self, ui, matcher, prefix, after, force, subrepos):
+ """remove the matched files from the subrepository and the filesystem,
+ possibly by force and/or after the file has been removed from the
+ filesystem. Return 0 on success, 1 on any warning.
+ """
+ return 1
+
def revert(self, ui, substate, *pats, **opts):
ui.warn('%s: reverting %s subrepos is unsupported\n' \
% (substate[0], substate[2]))
@@ -515,10 +512,7 @@
self._state = state
r = ctx._repo
root = r.wjoin(path)
- create = False
- if not os.path.exists(os.path.join(root, '.hg')):
- create = True
- util.makedirs(root)
+ create = not r.wvfs.exists('%s/.hg' % path)
self._repo = hg.repository(r.baseui, root, create=create)
for s, k in [('ui', 'commitsubrepos')]:
v = r.ui.config(s, k)
@@ -562,26 +556,19 @@
# sort the files that will be hashed in increasing (likely) file size
filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
yield '# %s\n' % _expandedabspath(remotepath)
+ vfs = self._repo.vfs
for relname in filelist:
- absname = os.path.normpath(self._repo.join(relname))
- yield '%s = %s\n' % (relname, _calcfilehash(absname))
+ filehash = util.sha1(vfs.tryread(relname)).hexdigest()
+ yield '%s = %s\n' % (relname, filehash)
- def _getstorehashcachepath(self, remotepath):
- '''get a unique path for the store hash cache'''
- return self._repo.join(os.path.join(
- 'cache', 'storehash', _getstorehashcachename(remotepath)))
+ @propertycache
+ def _cachestorehashvfs(self):
+ return scmutil.vfs(self._repo.join('cache/storehash'))
def _readstorehashcache(self, remotepath):
'''read the store hash cache for a given remote repository'''
- cachefile = self._getstorehashcachepath(remotepath)
- if not os.path.exists(cachefile):
- return ''
- fd = open(cachefile, 'r')
- try:
- pullstate = fd.readlines()
- finally:
- fd.close()
- return pullstate
+ cachefile = _getstorehashcachename(remotepath)
+ return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
def _cachestorehash(self, remotepath):
'''cache the current store hash
@@ -589,18 +576,12 @@
Each remote repo requires its own store hash cache, because a subrepo
store may be "clean" versus a given remote repo, but not versus another
'''
- cachefile = self._getstorehashcachepath(remotepath)
+ cachefile = _getstorehashcachename(remotepath)
lock = self._repo.lock()
try:
storehash = list(self._calcstorehash(remotepath))
- cachedir = os.path.dirname(cachefile)
- if not os.path.exists(cachedir):
- util.makedirs(cachedir, notindexed=True)
- fd = open(cachefile, 'w')
- try:
- fd.writelines(storehash)
- finally:
- fd.close()
+ vfs = self._cachestorehashvfs
+ vfs.writelines(cachefile, storehash, mode='w', notindexed=True)
finally:
lock.release()
@@ -854,6 +835,12 @@
os.path.join(prefix, self._path), True)
@annotatesubrepoerror
+ def removefiles(self, ui, matcher, prefix, after, force, subrepos):
+ return cmdutil.remove(ui, self._repo, matcher,
+ os.path.join(prefix, self._path), after, force,
+ subrepos)
+
+ @annotatesubrepoerror
def revert(self, ui, substate, *pats, **opts):
# reverting a subrepo is a 2 step process:
# 1. if the no_backup is not set, revert all modified
@@ -1583,6 +1570,15 @@
removed.append(f)
deleted, unknown, ignored, clean = [], [], [], []
+
+ if not rev2:
+ command = ['ls-files', '--others', '--exclude-standard']
+ out = self._gitcommand(command)
+ for line in out.split('\n'):
+ if len(line) == 0:
+ continue
+ unknown.append(line)
+
return scmutil.status(modified, added, removed, deleted,
unknown, ignored, clean)
--- a/mercurial/tagmerge.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/tagmerge.py Tue Dec 09 13:32:19 2014 -0600
@@ -39,7 +39,7 @@
# and between base and p2, possibly on separate clones
# 4. for each tag found both on p1 and p2 perform the following merge algorithm:
# - the tags conflict if their tag "histories" have the same "rank" (i.e.
-# length) _AND_ the last (current) tag is _NOT_ the same
+# length) AND the last (current) tag is NOT the same
# - for non conflicting tags:
# - choose which are the high and the low ranking nodes
# - the high ranking list of nodes is the one that is longer.
@@ -57,7 +57,7 @@
# 5. write the merged tags taking into account to their positions in the first
# parent (i.e. try to keep the relative ordering of the nodes that come
# from p1). This minimizes the diff between the merged and the p1 tag files
-# This is donw by using the following algorithm
+# This is done by using the following algorithm
# - group the nodes for a given tag that must be written next to each other
# - A: nodes that come from consecutive lines on p1
# - B: nodes that come from p2 (i.e. whose associated line number is
@@ -81,9 +81,9 @@
def readtagsformerge(ui, repo, lines, fn='', keeplinenums=False):
'''read the .hgtags file into a structure that is suitable for merging
- Sepending on the keeplinenumbers flag, clear the line numbers associated
- with each tag. Rhis is done because only the line numbers of the first
- parent are useful for merging
+ Depending on the keeplinenums flag, clear the line numbers associated
+ with each tag. This is done because only the line numbers of the first
+ parent are useful for merging.
'''
filetags = tagsmod._readtaghist(ui, repo, lines, fn=fn, recode=None,
calcnodelines=True)[1]
--- a/mercurial/tags.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/tags.py Tue Dec 09 13:32:19 2014 -0600
@@ -87,7 +87,7 @@
def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
'''Read tag definitions from a file (or any source of lines).
This function returns two sortdicts with similar information:
- - the first dict, bingtaglist, contains the tag information as expected by
+ - the first dict, bintaghist, contains the tag information as expected by
the _readtags function, i.e. a mapping from tag name to (node, hist):
- node is the node id from the last line read for that name,
- hist is the list of node ids previously associated with it (in file
--- a/mercurial/transaction.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/transaction.py Tue Dec 09 13:32:19 2014 -0600
@@ -12,10 +12,11 @@
# GNU General Public License version 2 or any later version.
from i18n import _
+import os
import errno
import error, util
-version = 1
+version = 2
def active(func):
def _active(self, *args, **kwds):
@@ -25,7 +26,8 @@
return func(self, *args, **kwds)
return _active
-def _playback(journal, report, opener, entries, backupentries, unlink=True):
+def _playback(journal, report, opener, vfsmap, entries, backupentries,
+ unlink=True):
for f, o, _ignore in entries:
if o or not unlink:
try:
@@ -43,26 +45,46 @@
raise
backupfiles = []
- for f, b, _ignore in backupentries:
- filepath = opener.join(f)
- backuppath = opener.join(b)
+ for l, f, b, c in backupentries:
+ if l not in vfsmap and c:
+ report("couldn't handle %s: unknown cache location %s\n"
+ % (b, l))
+ vfs = vfsmap[l]
try:
- util.copyfile(backuppath, filepath)
- backupfiles.append(b)
- except IOError:
- report(_("failed to recover %s\n") % f)
- raise
+ if f and b:
+ filepath = vfs.join(f)
+ backuppath = vfs.join(b)
+ try:
+ util.copyfile(backuppath, filepath)
+ backupfiles.append(b)
+ except IOError:
+ report(_("failed to recover %s\n") % f)
+ else:
+ target = f or b
+ try:
+ vfs.unlink(target)
+ except (IOError, OSError), inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ except (IOError, OSError, util.Abort), inst:
+ if not c:
+ raise
opener.unlink(journal)
backuppath = "%s.backupfiles" % journal
if opener.exists(backuppath):
opener.unlink(backuppath)
- for f in backupfiles:
- opener.unlink(f)
+ try:
+ for f in backupfiles:
+ if opener.exists(f):
+ opener.unlink(f)
+ except (IOError, OSError, util.Abort), inst:
+ # only pure backup file remains, it is sage to ignore any error
+ pass
class transaction(object):
- def __init__(self, report, opener, journal, after=None, createmode=None,
- onclose=None, onabort=None):
+ def __init__(self, report, opener, vfsmap, journal, after=None,
+ createmode=None, onclose=None, onabort=None):
"""Begin a new transaction
Begins a new transaction that allows rolling back writes in the event of
@@ -78,29 +100,49 @@
self.count = 1
self.usages = 1
self.report = report
+ # a vfs to the store content
self.opener = opener
+ # a map to access file in various {location -> vfs}
+ vfsmap = vfsmap.copy()
+ vfsmap[''] = opener # set default value
+ self._vfsmap = vfsmap
self.after = after
self.onclose = onclose
self.onabort = onabort
self.entries = []
- self.backupentries = []
self.map = {}
- self.backupmap = {}
self.journal = journal
self._queue = []
# a dict of arguments to be passed to hooks
self.hookargs = {}
+ self.file = opener.open(self.journal, "w")
- self.backupjournal = "%s.backupfiles" % journal
- self.file = opener.open(self.journal, "w")
- self.backupsfile = opener.open(self.backupjournal, 'w')
- self.backupsfile.write('%d\n' % version)
+ # a list of ('location', 'path', 'backuppath', cache) entries.
+ # - if 'backuppath' is empty, no file existed at backup time
+ # - if 'path' is empty, this is a temporary transaction file
+ # - if 'location' is not empty, the path is outside main opener reach.
+ # use 'location' value as a key in a vfsmap to find the right 'vfs'
+ # (cache is currently unused)
+ self._backupentries = []
+ self._backupmap = {}
+ self._backupjournal = "%s.backupfiles" % journal
+ self._backupsfile = opener.open(self._backupjournal, 'w')
+ self._backupsfile.write('%d\n' % version)
+
if createmode is not None:
opener.chmod(self.journal, createmode & 0666)
- opener.chmod(self.backupjournal, createmode & 0666)
+ opener.chmod(self._backupjournal, createmode & 0666)
# hold file generations to be performed on commit
self._filegenerators = {}
+ # hold callbalk to write pending data for hooks
+ self._pendingcallback = {}
+ # True is any pending data have been written ever
+ self._anypending = False
+ # holds callback to call when writing the transaction
+ self._finalizecallback = {}
+ # hold callbalk for post transaction close
+ self._postclosecallback = {}
def __del__(self):
if self.journal:
@@ -108,38 +150,37 @@
@active
def startgroup(self):
- self._queue.append(([], []))
+ """delay registration of file entry
+
+ This is used by strip to delay vision of strip offset. The transaction
+ sees either none or all of the strip actions to be done."""
+ self._queue.append([])
@active
def endgroup(self):
- q = self._queue.pop()
- self.entries.extend(q[0])
- self.backupentries.extend(q[1])
-
- offsets = []
- backups = []
- for f, o, _data in q[0]:
- offsets.append((f, o))
+ """apply delayed registration of file entry.
- for f, b, _data in q[1]:
- backups.append((f, b))
-
- d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
- self.file.write(d)
- self.file.flush()
-
- d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
- self.backupsfile.write(d)
- self.backupsfile.flush()
+ This is used by strip to delay vision of strip offset. The transaction
+ sees either none or all of the strip actions to be done."""
+ q = self._queue.pop()
+ for f, o, data in q:
+ self._addentry(f, o, data)
@active
def add(self, file, offset, data=None):
- if file in self.map or file in self.backupmap:
+ """record the state of an append-only file before update"""
+ if file in self.map or file in self._backupmap:
return
if self._queue:
- self._queue[-1][0].append((file, offset, data))
+ self._queue[-1].append((file, offset, data))
return
+ self._addentry(file, offset, data)
+
+ def _addentry(self, file, offset, data):
+ """add a append-only entry to memory and on-disk state"""
+ if file in self.map or file in self._backupmap:
+ return
self.entries.append((file, offset, data))
self.map[file] = len(self.entries) - 1
# add enough data to the journal to do the truncate
@@ -147,7 +188,7 @@
self.file.flush()
@active
- def addbackup(self, file, hardlink=True, vfs=None):
+ def addbackup(self, file, hardlink=True, location=''):
"""Adds a backup of the file to the transaction
Calling addbackup() creates a hardlink backup of the specified file
@@ -157,31 +198,44 @@
* `file`: the file path, relative to .hg/store
* `hardlink`: use a hardlink to quickly create the backup
"""
+ if self._queue:
+ msg = 'cannot use transaction.addbackup inside "group"'
+ raise RuntimeError(msg)
- if file in self.map or file in self.backupmap:
+ if file in self.map or file in self._backupmap:
return
- backupfile = "%s.backup.%s" % (self.journal, file)
- if vfs is None:
- vfs = self.opener
+ dirname, filename = os.path.split(file)
+ backupfilename = "%s.backup.%s" % (self.journal, filename)
+ backupfile = os.path.join(dirname, backupfilename)
+ vfs = self._vfsmap[location]
if vfs.exists(file):
filepath = vfs.join(file)
- backuppath = self.opener.join(backupfile)
+ backuppath = vfs.join(backupfile)
util.copyfiles(filepath, backuppath, hardlink=hardlink)
else:
- self.add(file, 0)
- return
+ backupfile = ''
+
+ self._addbackupentry((location, file, backupfile, False))
- if self._queue:
- self._queue[-1][1].append((file, backupfile))
- return
-
- self.backupentries.append((file, backupfile, None))
- self.backupmap[file] = len(self.backupentries) - 1
- self.backupsfile.write("%s\0%s\n" % (file, backupfile))
- self.backupsfile.flush()
+ def _addbackupentry(self, entry):
+ """register a new backup entry and write it to disk"""
+ self._backupentries.append(entry)
+ self._backupmap[file] = len(self._backupentries) - 1
+ self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
+ self._backupsfile.flush()
@active
- def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
+ def registertmp(self, tmpfile, location=''):
+ """register a temporary transaction file
+
+ Such files will be deleted when the transaction exits (on both
+ failure and success).
+ """
+ self._addbackupentry((location, '', tmpfile, False))
+
+ @active
+ def addfilegenerator(self, genid, filenames, genfunc, order=0,
+ location=''):
"""add a function to generates some files at transaction commit
The `genfunc` argument is a function capable of generating proper
@@ -199,38 +253,43 @@
The `order` argument may be used to control the order in which multiple
generator will be executed.
+
+ The `location` arguments may be used to indicate the files are located
+ outside of the the standard directory for transaction. It should match
+ one of the key of the `transaction.vfsmap` dictionnary.
"""
# For now, we are unable to do proper backup and restore of custom vfs
# but for bookmarks that are handled outside this mechanism.
- assert vfs is None or filenames == ('bookmarks',)
- self._filegenerators[genid] = (order, filenames, genfunc, vfs)
+ self._filegenerators[genid] = (order, filenames, genfunc, location)
- def _generatefiles(self):
+ def _generatefiles(self, suffix=''):
# write files registered for generation
+ any = False
for entry in sorted(self._filegenerators.values()):
- order, filenames, genfunc, vfs = entry
- if vfs is None:
- vfs = self.opener
+ any = True
+ order, filenames, genfunc, location = entry
+ vfs = self._vfsmap[location]
files = []
try:
for name in filenames:
- # Some files are already backed up when creating the
- # localrepo. Until this is properly fixed we disable the
- # backup for them.
- if name not in ('phaseroots', 'bookmarks'):
- self.addbackup(name)
+ name += suffix
+ if suffix:
+ self.registertmp(name, location=location)
+ else:
+ self.addbackup(name, location=location)
files.append(vfs(name, 'w', atomictemp=True))
genfunc(*files)
finally:
for f in files:
f.close()
+ return any
@active
def find(self, file):
if file in self.map:
return self.entries[self.map[file]]
- if file in self.backupmap:
- return self.backupentries[self.backupmap[file]]
+ if file in self._backupmap:
+ return self._backupentries[self._backupmap[file]]
return None
@active
@@ -263,29 +322,111 @@
def running(self):
return self.count > 0
+ def addpending(self, category, callback):
+ """add a callback to be called when the transaction is pending
+
+ The transaction will be given as callback's first argument.
+
+ Category is a unique identifier to allow overwriting an old callback
+ with a newer callback.
+ """
+ self._pendingcallback[category] = callback
+
+ @active
+ def writepending(self):
+ '''write pending file to temporary version
+
+ This is used to allow hooks to view a transaction before commit'''
+ categories = sorted(self._pendingcallback)
+ for cat in categories:
+ # remove callback since the data will have been flushed
+ any = self._pendingcallback.pop(cat)(self)
+ self._anypending = self._anypending or any
+ self._anypending |= self._generatefiles(suffix='.pending')
+ return self._anypending
+
+ @active
+ def addfinalize(self, category, callback):
+ """add a callback to be called when the transaction is closed
+
+ The transaction will be given as callback's first argument.
+
+ Category is a unique identifier to allow overwriting old callbacks with
+ newer callbacks.
+ """
+ self._finalizecallback[category] = callback
+
+ @active
+ def addpostclose(self, category, callback):
+ """add a callback to be called after the transaction is closed
+
+ The transaction will be given as callback's first argument.
+
+ Category is a unique identifier to allow overwriting an old callback
+ with a newer callback.
+ """
+ self._postclosecallback[category] = callback
+
@active
def close(self):
'''commit the transaction'''
- if self.count == 1 and self.onclose is not None:
+ if self.count == 1:
self._generatefiles()
- self.onclose()
+ categories = sorted(self._finalizecallback)
+ for cat in categories:
+ self._finalizecallback[cat](self)
+ if self.onclose is not None:
+ self.onclose()
self.count -= 1
if self.count != 0:
return
self.file.close()
- self.backupsfile.close()
+ self._backupsfile.close()
+ # cleanup temporary files
+ for l, f, b, c in self._backupentries:
+ if l not in self._vfsmap and c:
+ self.report("couldn't remote %s: unknown cache location %s\n"
+ % (b, l))
+ continue
+ vfs = self._vfsmap[l]
+ if not f and b and vfs.exists(b):
+ try:
+ vfs.unlink(b)
+ except (IOError, OSError, util.Abort), inst:
+ if not c:
+ raise
+ # Abort may be raise by read only opener
+ self.report("couldn't remote %s: %s\n"
+ % (vfs.join(b), inst))
self.entries = []
if self.after:
self.after()
if self.opener.isfile(self.journal):
self.opener.unlink(self.journal)
- if self.opener.isfile(self.backupjournal):
- self.opener.unlink(self.backupjournal)
- for _f, b, _ignore in self.backupentries:
- self.opener.unlink(b)
- self.backupentries = []
+ if self.opener.isfile(self._backupjournal):
+ self.opener.unlink(self._backupjournal)
+ for _l, _f, b, c in self._backupentries:
+ if l not in self._vfsmap and c:
+ self.report("couldn't remote %s: unknown cache location"
+ "%s\n" % (b, l))
+ continue
+ vfs = self._vfsmap[l]
+ if b and vfs.exists(b):
+ try:
+ vfs.unlink(b)
+ except (IOError, OSError, util.Abort), inst:
+ if not c:
+ raise
+ # Abort may be raise by read only opener
+ self.report("couldn't remote %s: %s\n"
+ % (vfs.join(b), inst))
+ self._backupentries = []
self.journal = None
+ # run post close action
+ categories = sorted(self._postclosecallback)
+ for cat in categories:
+ self._postclosecallback[cat](self)
@active
def abort(self):
@@ -298,24 +439,24 @@
self.count = 0
self.usages = 0
self.file.close()
- self.backupsfile.close()
+ self._backupsfile.close()
if self.onabort is not None:
self.onabort()
try:
- if not self.entries and not self.backupentries:
+ if not self.entries and not self._backupentries:
if self.journal:
self.opener.unlink(self.journal)
- if self.backupjournal:
- self.opener.unlink(self.backupjournal)
+ if self._backupjournal:
+ self.opener.unlink(self._backupjournal)
return
self.report(_("transaction abort!\n"))
try:
- _playback(self.journal, self.report, self.opener,
- self.entries, self.backupentries, False)
+ _playback(self.journal, self.report, self.opener, self._vfsmap,
+ self.entries, self._backupentries, False)
self.report(_("rollback completed\n"))
except Exception:
self.report(_("rollback failed - please run hg recover\n"))
@@ -323,7 +464,7 @@
self.journal = None
-def rollback(opener, file, report):
+def rollback(opener, vfsmap, file, report):
"""Rolls back the transaction contained in the given file
Reads the entries in the specified file, and the corresponding
@@ -359,10 +500,10 @@
if line:
# Shave off the trailing newline
line = line[:-1]
- f, b = line.split('\0')
- backupentries.append((f, b, None))
+ l, f, b, c = line.split('\0')
+ backupentries.append((l, f, b, bool(c)))
else:
- report(_("journal was created by a newer version of "
+ report(_("journal was created by a different version of "
"Mercurial"))
- _playback(file, report, opener, entries, backupentries)
+ _playback(file, report, opener, vfsmap, entries, backupentries)
--- a/mercurial/ui.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/ui.py Tue Dec 09 13:32:19 2014 -0600
@@ -537,7 +537,7 @@
return path or loc
def pushbuffer(self, error=False):
- """install a buffer to capture standar output of the ui object
+ """install a buffer to capture standard output of the ui object
If error is True, the error output will be captured too."""
self._buffers.append([])
@@ -814,10 +814,9 @@
editor = self.geteditor()
- util.system("%s \"%s\"" % (editor, name),
+ self.system("%s \"%s\"" % (editor, name),
environ=environ,
- onerr=util.Abort, errprefix=_("edit failed"),
- out=self.fout)
+ onerr=util.Abort, errprefix=_("edit failed"))
f = open(name)
t = f.read()
@@ -827,6 +826,13 @@
return t
+ def system(self, cmd, environ={}, cwd=None, onerr=None, errprefix=None):
+ '''execute shell command with appropriate output stream. command
+ output will be redirected if fout is not stdout.
+ '''
+ return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
+ errprefix=errprefix, out=self.fout)
+
def traceback(self, exc=None, force=False):
'''print exception traceback if traceback printing enabled or forced.
only to call in exception handler. returns true if traceback
--- a/mercurial/util.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/util.py Tue Dec 09 13:32:19 2014 -0600
@@ -20,6 +20,7 @@
import re as remod
import os, time, datetime, calendar, textwrap, signal, collections
import imp, socket, urllib
+import gc
if os.name == 'nt':
import windows as platform
@@ -369,6 +370,12 @@
return self._list
def iterkeys(self):
return self._list.__iter__()
+ def iteritems(self):
+ for k in self._list:
+ yield k, self[k]
+ def insert(self, index, key, val):
+ self._list.insert(index, key)
+ dict.__setitem__(self, key, val)
class lrucachedict(object):
'''cache most recent gets from or sets to this dictionary'''
@@ -538,6 +545,28 @@
def never(fn):
return False
+def nogc(func):
+ """disable garbage collector
+
+ Python's garbage collector triggers a GC each time a certain number of
+ container objects (the number being defined by gc.get_threshold()) are
+ allocated even when marked not to be tracked by the collector. Tracking has
+ no effect on when GCs are triggered, only on what objects the GC looks
+ into. As a workaround, disable GC while building complexe (huge)
+ containers.
+
+ This garbage collector issue have been fixed in 2.7.
+ """
+ def wrapper(*args, **kwargs):
+ gcenabled = gc.isenabled()
+ gc.disable()
+ try:
+ return func(*args, **kwargs)
+ finally:
+ if gcenabled:
+ gc.enable()
+ return wrapper
+
def pathto(root, n1, n2):
'''return the relative path from one place to another.
root should use os.sep to separate directories
@@ -613,9 +642,8 @@
'''enhanced shell command execution.
run with environment maybe modified, maybe in different dir.
- if command fails and onerr is None, return status. if ui object,
- print error message and return status, else raise onerr object as
- exception.
+ if command fails and onerr is None, return status, else raise onerr
+ object as exception.
if out is specified, it is assumed to be a file-like object that has a
write() method. stdout and stderr will be redirected to out.'''
@@ -664,10 +692,7 @@
explainexit(rc)[0])
if errprefix:
errmsg = '%s: %s' % (errprefix, errmsg)
- try:
- onerr.warn(errmsg + '\n')
- except AttributeError:
- raise onerr(errmsg)
+ raise onerr(errmsg)
return rc
def checksignature(func):
@@ -1086,15 +1111,20 @@
if mode is not None:
os.chmod(name, mode)
-def ensuredirs(name, mode=None):
- """race-safe recursive directory creation"""
+def ensuredirs(name, mode=None, notindexed=False):
+ """race-safe recursive directory creation
+
+ Newly created directories are marked as "not to be indexed by
+ the content indexing service", if ``notindexed`` is specified
+ for "write" mode access.
+ """
if os.path.isdir(name):
return
parent = os.path.dirname(os.path.abspath(name))
if parent != name:
- ensuredirs(parent, mode)
+ ensuredirs(parent, mode, notindexed)
try:
- os.mkdir(name)
+ makedir(name, notindexed)
except OSError, err:
if err.errno == errno.EEXIST and os.path.isdir(name):
# someone else seems to have won a directory creation race
@@ -1148,7 +1178,7 @@
"""Read L bytes of data from the iterator of chunks of data.
Returns less than L bytes if the iterator runs dry.
- If size parameter is ommited, read everything"""
+ If size parameter is omitted, read everything"""
left = l
buf = []
queue = self._queue
--- a/mercurial/wireproto.py Mon Dec 08 15:41:54 2014 -0800
+++ b/mercurial/wireproto.py Tue Dec 09 13:32:19 2014 -0600
@@ -827,7 +827,7 @@
r = exchange.unbundle(repo, gen, their_heads, 'serve',
proto._client())
if util.safehasattr(r, 'addpart'):
- # The return looks streameable, we are in the bundle2 case and
+ # The return looks streamable, we are in the bundle2 case and
# should return a stream.
return streamres(r.getchunks())
return pushres(r)
--- a/setup.py Mon Dec 08 15:41:54 2014 -0800
+++ b/setup.py Tue Dec 09 13:32:19 2014 -0600
@@ -141,7 +141,8 @@
py2exeloaded = False
def runcmd(cmd, env):
- if sys.platform == 'plan9':
+ if (sys.platform == 'plan9'
+ and (sys.version_info[0] == 2 and sys.version_info[1] < 7)):
# subprocess kludge to work around issues in half-baked Python
# ports, notably bichued/python:
_, out, err = os.popen3(cmd)
@@ -517,6 +518,7 @@
packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo',
'help/*.txt',
+ 'default.d/*.rc',
'dummycert.pem']}
def ordinarypath(p):
Binary file tests/bundles/issue4438-r1.hg has changed
Binary file tests/bundles/issue4438-r2.hg has changed
--- a/tests/dumbhttp.py Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/dumbhttp.py Tue Dec 09 13:32:19 2014 -0600
@@ -5,15 +5,18 @@
"""
from optparse import OptionParser
-import BaseHTTPServer, SimpleHTTPServer, os, signal, subprocess, sys
+import BaseHTTPServer, SimpleHTTPServer, signal, sys
+from mercurial import cmdutil
-def run(server_class=BaseHTTPServer.HTTPServer,
- handler_class=SimpleHTTPServer.SimpleHTTPRequestHandler,
- server_address=('localhost', 8000)):
- httpd = server_class(server_address, handler_class)
- httpd.serve_forever()
-
+class simplehttpservice(object):
+ def __init__(self, host, port):
+ self.address = (host, port)
+ def init(self):
+ self.httpd = BaseHTTPServer.HTTPServer(
+ self.address, SimpleHTTPServer.SimpleHTTPRequestHandler)
+ def run(self):
+ self.httpd.serve_forever()
if __name__ == '__main__':
parser = OptionParser()
@@ -26,6 +29,7 @@
parser.add_option('-f', '--foreground', dest='foreground',
action='store_true',
help='do not start the HTTP server in the background')
+ parser.add_option('--daemon-pipefds')
(options, args) = parser.parse_args()
@@ -34,21 +38,9 @@
if options.foreground and options.pid:
parser.error("options --pid and --foreground are mutually exclusive")
- if options.foreground:
- run(server_address=(options.host, options.port))
- else:
- # This doesn't attempt to cleanly detach the process, as it's not
- # meant to be a long-lived, independent process. As a consequence,
- # it's still part of the same process group, and keeps any file
- # descriptors it might have inherited besided stdin/stdout/stderr.
- # Trying to do things cleanly is more complicated, requires
- # OS-dependent code, and is not worth the effort.
- proc = subprocess.Popen([sys.executable, __file__, '-f',
- '-H', options.host, '-p', str(options.port)],
- stdin=open(os.devnull, 'r'),
- stdout=open(os.devnull, 'w'),
- stderr=subprocess.STDOUT)
- if options.pid:
- fp = file(options.pid, 'wb')
- fp.write(str(proc.pid) + '\n')
- fp.close()
+ opts = {'pid_file': options.pid,
+ 'daemon': not options.foreground,
+ 'daemon_pipefds': options.daemon_pipefds}
+ service = simplehttpservice(options.host, options.port)
+ cmdutil.service(opts, initfn=service.init, runfn=service.run,
+ runargs=[sys.executable, __file__] + sys.argv[1:])
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/generate-working-copy-states.py Tue Dec 09 13:32:19 2014 -0600
@@ -0,0 +1,86 @@
+# Helper script used for generating history and working copy files and content.
+# The file's name corresponds to its history. The number of changesets can
+# be specified on the command line. With 2 changesets, files with names like
+# content1_content2_content1-untracked are generated. The first two filename
+# segments describe the contents in the two changesets. The third segment
+# ("content1-untracked") describes the state in the working copy, i.e.
+# the file has content "content1" and is untracked (since it was previously
+# tracked, it has been forgotten).
+#
+# This script generates the filenames and their content, but it's up to the
+# caller to tell hg about the state.
+#
+# There are two subcommands:
+# filelist <numchangesets>
+# state <numchangesets> (<changeset>|wc)
+#
+# Typical usage:
+#
+# $ python $TESTDIR/generate-working-copy-states.py state 2 1
+# $ hg addremove --similarity 0
+# $ hg commit -m 'first'
+#
+# $ python $TESTDIR/generate-working-copy-states.py state 2 1
+# $ hg addremove --similarity 0
+# $ hg commit -m 'second'
+#
+# $ python $TESTDIR/generate-working-copy-states.py state 2 wc
+# $ hg addremove --similarity 0
+# $ hg forget *_*_*-untracked
+# $ rm *_*_missing-*
+
+import sys
+import os
+
+# Generates pairs of (filename, contents), where 'contents' is a list
+# describing the file's content at each revision (or in the working copy).
+# At each revision, it is either None or the file's actual content. When not
+# None, it may be either new content or the same content as an earlier
+# revisions, so all of (modified,clean,added,removed) can be tested.
+def generatestates(maxchangesets, parentcontents):
+ depth = len(parentcontents)
+ if depth == maxchangesets + 1:
+ for tracked in ('untracked', 'tracked'):
+ filename = "_".join([(content is None and 'missing' or content) for
+ content in parentcontents]) + "-" + tracked
+ yield (filename, parentcontents)
+ else:
+ for content in (set([None, 'content' + str(depth + 1)]) |
+ set(parentcontents)):
+ for combination in generatestates(maxchangesets,
+ parentcontents + [content]):
+ yield combination
+
+# retrieve the command line arguments
+target = sys.argv[1]
+maxchangesets = int(sys.argv[2])
+if target == 'state':
+ depth = sys.argv[3]
+
+# sort to make sure we have stable output
+combinations = sorted(generatestates(maxchangesets, []))
+
+# compute file content
+content = []
+for filename, states in combinations:
+ if target == 'filelist':
+ print filename
+ elif target == 'state':
+ if depth == 'wc':
+ # Make sure there is content so the file gets written and can be
+ # tracked. It will be deleted outside of this script.
+ content.append((filename, states[maxchangesets] or 'TOBEDELETED'))
+ else:
+ content.append((filename, states[int(depth) - 1]))
+ else:
+ print >> sys.stderr, "unknown target:", target
+ sys.exit(1)
+
+# write actual content
+for filename, data in content:
+ if data is not None:
+ f = open(filename, 'wb')
+ f.write(data + '\n')
+ f.close()
+ elif os.path.exists(filename):
+ os.remove(filename)
--- a/tests/hghave.py Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/hghave.py Tue Dec 09 13:32:19 2014 -0600
@@ -289,14 +289,17 @@
@check("json", "some json module available")
def has_json():
try:
- if sys.version_info < (2, 7):
- import simplejson as json
- else:
- import json
+ import json
json.dumps
return True
except ImportError:
- return False
+ try:
+ import simplejson as json
+ json.dumps
+ return True
+ except ImportError:
+ pass
+ return False
@check("outer-repo", "outer repo")
def has_outer_repo():
--- a/tests/run-tests.py Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/run-tests.py Tue Dec 09 13:32:19 2014 -0600
@@ -61,12 +61,12 @@
import unittest
try:
- if sys.version_info < (2, 7):
+ import json
+except ImportError:
+ try:
import simplejson as json
- else:
- import json
-except ImportError:
- json = None
+ except ImportError:
+ json = None
processlock = threading.Lock()
@@ -500,7 +500,7 @@
except self.failureException, e:
# This differs from unittest in that we don't capture
# the stack trace. This is for historical reasons and
- # this decision could be revisted in the future,
+ # this decision could be revisited in the future,
# especially for PythonTest instances.
if result.addFailure(self, str(e)):
success = True
@@ -649,7 +649,8 @@
env["HGPORT2"] = str(self._startport + 2)
env["HGRCPATH"] = os.path.join(self._threadtmp, '.hgrc')
env["DAEMON_PIDS"] = os.path.join(self._threadtmp, 'daemon.pids')
- env["HGEDITOR"] = sys.executable + ' -c "import sys; sys.exit(0)"'
+ env["HGEDITOR"] = ('"' + sys.executable + '"'
+ + ' -c "import sys; sys.exit(0)"')
env["HGMERGE"] = "internal:merge"
env["HGUSER"] = "test"
env["HGENCODING"] = "ascii"
@@ -688,6 +689,10 @@
hgrc.write('commit = -d "0 0"\n')
hgrc.write('shelve = --date "0 0"\n')
hgrc.write('tag = -d "0 0"\n')
+ hgrc.write('[largefiles]\n')
+ hgrc.write('usercache = %s\n' %
+ (os.path.join(self._testtmp, '.cache/largefiles')))
+
for opt in self._extraconfigopts:
section, key = opt.split('.', 1)
assert '=' in key, ('extra config opt %s must '
@@ -720,6 +725,15 @@
return result
+# This script may want to drop globs from lines matching these patterns on
+# Windows, but check-code.py wants a glob on these lines unconditionally. Don't
+# warn if that is the case for anything matching these lines.
+checkcodeglobpats = [
+ re.compile(r'^pushing to \$TESTTMP/.*[^)]$'),
+ re.compile(r'^moving \S+/.*[^)]$'),
+ re.compile(r'^pulling from \$TESTTMP/.*[^)]$')
+]
+
class TTest(Test):
"""A "t test" is a test backed by a .t file."""
@@ -976,6 +990,9 @@
if el + '\n' == l:
if os.altsep:
# matching on "/" is not needed for this line
+ for pat in checkcodeglobpats:
+ if pat.match(el):
+ return True
return '-glob'
return True
i, n = 0, len(el)
@@ -1263,7 +1280,7 @@
iolock.release()
class TestSuite(unittest.TestSuite):
- """Custom unitest TestSuite that knows how to execute Mercurial tests."""
+ """Custom unittest TestSuite that knows how to execute Mercurial tests."""
def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
retest=False, keywords=None, loop=False,
@@ -1895,8 +1912,8 @@
the one we expect it to be. If not, print a warning to stderr."""
if ((self._bindir == self._pythondir) and
(self._bindir != self._tmpbindir)):
- # The pythondir has been infered from --with-hg flag.
- # We cannot expect anything sensible here
+ # The pythondir has been inferred from --with-hg flag.
+ # We cannot expect anything sensible here.
return
expecthg = os.path.join(self._pythondir, 'mercurial')
actualhg = self._gethgpath()
--- a/tests/silenttestrunner.py Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/silenttestrunner.py Tue Dec 09 13:32:19 2014 -0600
@@ -1,4 +1,4 @@
-import unittest, sys
+import unittest, sys, os
def main(modulename):
'''run the tests found in module, printing nothing when all tests pass'''
@@ -16,3 +16,6 @@
print
sys.stdout.write(exc)
sys.exit(1)
+
+if os.environ.get('SILENT_BE_NOISY'):
+ main = unittest.main
--- a/tests/test-abort-checkin.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-abort-checkin.t Tue Dec 09 13:32:19 2014 -0600
@@ -7,9 +7,11 @@
> EOF
$ abspath=`pwd`/abortcommit.py
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "abortcommit = $abspath" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > abortcommit = $abspath
+ > EOF
$ hg init foo
$ cd foo
--- a/tests/test-add.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-add.t Tue Dec 09 13:32:19 2014 -0600
@@ -126,6 +126,19 @@
M a
? a.orig
+Forgotten file can be added back (as either clean or modified)
+
+ $ hg forget b
+ $ hg add b
+ $ hg st -A b
+ C b
+ $ hg forget b
+ $ echo modified > b
+ $ hg add b
+ $ hg st -A b
+ M b
+ $ hg revert -qC b
+
$ hg add c && echo "unexpected addition of missing file"
c: * (glob)
[1]
--- a/tests/test-addremove.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-addremove.t Tue Dec 09 13:32:19 2014 -0600
@@ -18,7 +18,26 @@
dir/bar_2
foo_2
committed changeset 1:e65414bf35c5
- $ cd ../..
+ $ cd ..
+ $ hg forget foo
+ $ hg -v addremove
+ adding foo
+ $ cd ..
+
+ $ hg init subdir
+ $ cd subdir
+ $ mkdir dir
+ $ cd dir
+ $ touch a.py
+ $ hg addremove 'glob:*.py'
+ adding a.py
+ $ hg forget a.py
+ $ hg addremove -I 'glob:*.py'
+ adding a.py
+ $ hg forget a.py
+ $ hg addremove
+ adding dir/a.py
+ $ cd ..
$ hg init sim
$ cd sim
@@ -45,4 +64,9 @@
adding d
recording removal of a as rename to b (100% similar)
$ hg commit -mb
+ $ cp b c
+ $ hg forget b
+ $ hg addremove -s 50
+ adding b
+ adding c
$ cd ..
--- a/tests/test-ancestor.py Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-ancestor.py Tue Dec 09 13:32:19 2014 -0600
@@ -1,4 +1,133 @@
from mercurial import ancestor, commands, hg, ui, util
+from mercurial.node import nullrev
+import binascii, getopt, math, os, random, sys, time
+
+def buildgraph(rng, nodes=100, rootprob=0.05, mergeprob=0.2, prevprob=0.7):
+ '''nodes: total number of nodes in the graph
+ rootprob: probability that a new node (not 0) will be a root
+ mergeprob: probability that, excluding a root a node will be a merge
+ prevprob: probability that p1 will be the previous node
+
+ return value is a graph represented as an adjacency list.
+ '''
+ graph = [None] * nodes
+ for i in xrange(nodes):
+ if i == 0 or rng.random() < rootprob:
+ graph[i] = [nullrev]
+ elif i == 1:
+ graph[i] = [0]
+ elif rng.random() < mergeprob:
+ if i == 2 or rng.random() < prevprob:
+ # p1 is prev
+ p1 = i - 1
+ else:
+ p1 = rng.randrange(i - 1)
+ p2 = rng.choice(range(0, p1) + range(p1 + 1, i))
+ graph[i] = [p1, p2]
+ elif rng.random() < prevprob:
+ graph[i] = [i - 1]
+ else:
+ graph[i] = [rng.randrange(i - 1)]
+
+ return graph
+
+def buildancestorsets(graph):
+ ancs = [None] * len(graph)
+ for i in xrange(len(graph)):
+ ancs[i] = set([i])
+ if graph[i] == [nullrev]:
+ continue
+ for p in graph[i]:
+ ancs[i].update(ancs[p])
+ return ancs
+
+class naiveincrementalmissingancestors(object):
+ def __init__(self, ancs, bases):
+ self.ancs = ancs
+ self.bases = set(bases)
+ def addbases(self, newbases):
+ self.bases.update(newbases)
+ def removeancestorsfrom(self, revs):
+ for base in self.bases:
+ if base != nullrev:
+ revs.difference_update(self.ancs[base])
+ revs.discard(nullrev)
+ def missingancestors(self, revs):
+ res = set()
+ for rev in revs:
+ if rev != nullrev:
+ res.update(self.ancs[rev])
+ for base in self.bases:
+ if base != nullrev:
+ res.difference_update(self.ancs[base])
+ return sorted(res)
+
+def test_missingancestors(seed, rng):
+ # empirically observed to take around 1 second
+ graphcount = 100
+ testcount = 10
+ inccount = 10
+ nerrs = [0]
+ # the default mu and sigma give us a nice distribution of mostly
+ # single-digit counts (including 0) with some higher ones
+ def lognormrandom(mu, sigma):
+ return int(math.floor(rng.lognormvariate(mu, sigma)))
+
+ def samplerevs(nodes, mu=1.1, sigma=0.8):
+ count = min(lognormrandom(mu, sigma), len(nodes))
+ return rng.sample(nodes, count)
+
+ def err(seed, graph, bases, seq, output, expected):
+ if nerrs[0] == 0:
+ print >> sys.stderr, 'seed:', hex(seed)[:-1]
+ if gerrs[0] == 0:
+ print >> sys.stderr, 'graph:', graph
+ print >> sys.stderr, '* bases:', bases
+ print >> sys.stderr, '* seq: ', seq
+ print >> sys.stderr, '* output: ', output
+ print >> sys.stderr, '* expected:', expected
+ nerrs[0] += 1
+ gerrs[0] += 1
+
+ for g in xrange(graphcount):
+ graph = buildgraph(rng)
+ ancs = buildancestorsets(graph)
+ gerrs = [0]
+ for _ in xrange(testcount):
+ # start from nullrev to include it as a possibility
+ graphnodes = range(nullrev, len(graph))
+ bases = samplerevs(graphnodes)
+
+ # fast algorithm
+ inc = ancestor.incrementalmissingancestors(graph.__getitem__, bases)
+ # reference slow algorithm
+ naiveinc = naiveincrementalmissingancestors(ancs, bases)
+ seq = []
+ revs = []
+ for _ in xrange(inccount):
+ if rng.random() < 0.2:
+ newbases = samplerevs(graphnodes)
+ seq.append(('addbases', newbases))
+ inc.addbases(newbases)
+ naiveinc.addbases(newbases)
+ if rng.random() < 0.4:
+ # larger set so that there are more revs to remove from
+ revs = samplerevs(graphnodes, mu=1.5)
+ seq.append(('removeancestorsfrom', revs))
+ hrevs = set(revs)
+ rrevs = set(revs)
+ inc.removeancestorsfrom(hrevs)
+ naiveinc.removeancestorsfrom(rrevs)
+ if hrevs != rrevs:
+ err(seed, graph, bases, seq, sorted(hrevs),
+ sorted(rrevs))
+ else:
+ revs = samplerevs(graphnodes)
+ seq.append(('missingancestors', revs))
+ h = inc.missingancestors(revs)
+ r = naiveinc.missingancestors(revs)
+ if h != r:
+ err(seed, graph, bases, seq, h, r)
# graph is a dict of child->parent adjacency lists for this graph:
# o 13
@@ -32,55 +161,16 @@
graph = {0: [-1], 1: [0], 2: [1], 3: [1], 4: [2], 5: [4], 6: [4],
7: [4], 8: [-1], 9: [6, 7], 10: [5], 11: [3, 7], 12: [9],
13: [8]}
-pfunc = graph.get
-
-class mockchangelog(object):
- parentrevs = graph.get
-
-def runmissingancestors(revs, bases):
- print "%% ancestors of %s and not of %s" % (revs, bases)
- print ancestor.missingancestors(revs, bases, pfunc)
-
-def test_missingancestors():
- # Empty revs
- runmissingancestors([], [1])
- runmissingancestors([], [])
-
- # If bases is empty, it's the same as if it were [nullrev]
- runmissingancestors([12], [])
-
- # Trivial case: revs == bases
- runmissingancestors([0], [0])
- runmissingancestors([4, 5, 6], [6, 5, 4])
-
- # With nullrev
- runmissingancestors([-1], [12])
- runmissingancestors([12], [-1])
-
- # 9 is a parent of 12. 7 is a parent of 9, so an ancestor of 12. 6 is an
- # ancestor of 12 but not of 7.
- runmissingancestors([12], [9])
- runmissingancestors([9], [12])
- runmissingancestors([12, 9], [7])
- runmissingancestors([7, 6], [12])
-
- # More complex cases
- runmissingancestors([10], [11, 12])
- runmissingancestors([11], [10])
- runmissingancestors([11], [10, 12])
- runmissingancestors([12], [10])
- runmissingancestors([12], [11])
- runmissingancestors([10, 11, 12], [13])
- runmissingancestors([13], [10, 11, 12])
def genlazyancestors(revs, stoprev=0, inclusive=False):
print ("%% lazy ancestor set for %s, stoprev = %s, inclusive = %s" %
(revs, stoprev, inclusive))
- return ancestor.lazyancestors(mockchangelog, revs, stoprev=stoprev,
+ return ancestor.lazyancestors(graph.get, revs, stoprev=stoprev,
inclusive=inclusive)
def printlazyancestors(s, l):
- print [n for n in l if n in s]
+ print 'membership: %r' % [n for n in l if n in s]
+ print 'iteration: %r' % list(s)
def test_lazyancestors():
# Empty revs
@@ -134,7 +224,23 @@
print " C returned: %s" % cgcas
print " Python returned: %s" % pygcas
-if __name__ == '__main__':
- test_missingancestors()
+def main():
+ seed = None
+ opts, args = getopt.getopt(sys.argv[1:], 's:', ['seed='])
+ for o, a in opts:
+ if o in ('-s', '--seed'):
+ seed = long(a, base=0) # accepts base 10 or 16 strings
+
+ if seed is None:
+ try:
+ seed = long(binascii.hexlify(os.urandom(16)), 16)
+ except AttributeError:
+ seed = long(time.time() * 1000)
+
+ rng = random.Random(seed)
+ test_missingancestors(seed, rng)
test_lazyancestors()
test_gca()
+
+if __name__ == '__main__':
+ main()
--- a/tests/test-ancestor.py.out Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-ancestor.py.out Tue Dec 09 13:32:19 2014 -0600
@@ -1,48 +1,18 @@
-% ancestors of [] and not of [1]
-[]
-% ancestors of [] and not of []
-[]
-% ancestors of [12] and not of []
-[0, 1, 2, 4, 6, 7, 9, 12]
-% ancestors of [0] and not of [0]
-[]
-% ancestors of [4, 5, 6] and not of [6, 5, 4]
-[]
-% ancestors of [-1] and not of [12]
-[]
-% ancestors of [12] and not of [-1]
-[0, 1, 2, 4, 6, 7, 9, 12]
-% ancestors of [12] and not of [9]
-[12]
-% ancestors of [9] and not of [12]
-[]
-% ancestors of [12, 9] and not of [7]
-[6, 9, 12]
-% ancestors of [7, 6] and not of [12]
-[]
-% ancestors of [10] and not of [11, 12]
-[5, 10]
-% ancestors of [11] and not of [10]
-[3, 7, 11]
-% ancestors of [11] and not of [10, 12]
-[3, 11]
-% ancestors of [12] and not of [10]
-[6, 7, 9, 12]
-% ancestors of [12] and not of [11]
-[6, 9, 12]
-% ancestors of [10, 11, 12] and not of [13]
-[0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12]
-% ancestors of [13] and not of [10, 11, 12]
-[8, 13]
% lazy ancestor set for [], stoprev = 0, inclusive = False
-[]
+membership: []
+iteration: []
% lazy ancestor set for [11, 13], stoprev = 0, inclusive = False
-[7, 8, 3, 4, 1, 0]
+membership: [7, 8, 3, 4, 1, 0]
+iteration: [3, 7, 8, 1, 4, 0, 2]
% lazy ancestor set for [1, 3], stoprev = 0, inclusive = False
-[1, 0]
+membership: [1, 0]
+iteration: [0, 1]
% lazy ancestor set for [11, 13], stoprev = 0, inclusive = True
-[11, 13, 7, 8, 3, 4, 1, 0]
+membership: [11, 13, 7, 8, 3, 4, 1, 0]
+iteration: [11, 13, 3, 7, 8, 1, 4, 0, 2]
% lazy ancestor set for [11, 13], stoprev = 6, inclusive = False
-[7, 8]
+membership: [7, 8]
+iteration: [7, 8]
% lazy ancestor set for [11, 13], stoprev = 6, inclusive = True
-[11, 13, 7, 8]
+membership: [11, 13, 7, 8]
+iteration: [11, 13, 7, 8]
--- a/tests/test-bad-extension.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-bad-extension.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,11 +1,13 @@
$ echo 'raise Exception("bit bucket overflow")' > badext.py
$ abspath=`pwd`/badext.py
- $ echo '[extensions]' >> $HGRCPATH
- $ echo "gpg =" >> $HGRCPATH
- $ echo "hgext.gpg =" >> $HGRCPATH
- $ echo "badext = $abspath" >> $HGRCPATH
- $ echo "badext2 =" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > gpg =
+ > hgext.gpg =
+ > badext = $abspath
+ > badext2 =
+ > EOF
$ hg -q help help
*** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
--- a/tests/test-basic.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-basic.t Tue Dec 09 13:32:19 2014 -0600
@@ -5,6 +5,7 @@
defaults.commit=-d "0 0"
defaults.shelve=--date "0 0"
defaults.tag=-d "0 0"
+ largefiles.usercache=$TESTTMP/.cache/largefiles (glob)
ui.slash=True
ui.interactive=False
ui.mergemarkers=detailed
--- a/tests/test-branches.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-branches.t Tue Dec 09 13:32:19 2014 -0600
@@ -419,10 +419,12 @@
default branch colors:
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "color =" >> $HGRCPATH
- $ echo "[color]" >> $HGRCPATH
- $ echo "mode = ansi" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > color =
+ > [color]
+ > mode = ansi
+ > EOF
$ hg up -C c
3 files updated, 0 files merged, 2 files removed, 0 files unresolved
@@ -444,14 +446,16 @@
\x1b[0;0ma\x1b[0m\x1b[0;33m 5:d8cbc61dbaa6\x1b[0m (inactive) (esc)
\x1b[0;0mdefault\x1b[0m\x1b[0;33m 0:19709c5a4e75\x1b[0m (inactive) (esc)
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "color =" >> $HGRCPATH
- $ echo "[color]" >> $HGRCPATH
- $ echo "branches.active = green" >> $HGRCPATH
- $ echo "branches.closed = blue" >> $HGRCPATH
- $ echo "branches.current = red" >> $HGRCPATH
- $ echo "branches.inactive = magenta" >> $HGRCPATH
- $ echo "log.changeset = cyan" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > color =
+ > [color]
+ > branches.active = green
+ > branches.closed = blue
+ > branches.current = red
+ > branches.inactive = magenta
+ > log.changeset = cyan
+ > EOF
custom branch colors:
--- a/tests/test-bundle2-exchange.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-bundle2-exchange.t Tue Dec 09 13:32:19 2014 -0600
@@ -21,7 +21,9 @@
> publish=False
> [hooks]
> changegroup = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" changegroup"
- > b2x-transactionclose = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" b2x-transactionclose"
+ > b2x-pretransactionclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
+ > b2x-transactionclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
+ > b2x-transactionclose.env = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" b2x-transactionclose"
> EOF
The extension requires a repo (currently unused)
@@ -59,8 +61,10 @@
adding file changes
added 2 changesets with 2 changes to 2 files
1 new obsolescence markers
+ pre-close-tip:9520eea781bc draft
+ postclose-tip:9520eea781bc draft
+ b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
- b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
updating to branch default
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg -R other log -G
@@ -82,8 +86,10 @@
adding file changes
added 1 changesets with 1 changes to 1 files (+1 heads)
1 new obsolescence markers
+ pre-close-tip:24b6387c8c8c draft
+ postclose-tip:24b6387c8c8c draft
+ b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
changegroup hook: HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
- b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
(run 'hg heads' to see heads, 'hg merge' to merge)
$ hg -R other log -G
o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
@@ -102,6 +108,8 @@
$ hg -R other pull -r 24b6387c8c8c
pulling from $TESTTMP/main (glob)
no changes found
+ pre-close-tip:000000000000 public
+ postclose-tip:24b6387c8c8c public
b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
$ hg -R other log -G
o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
@@ -119,6 +127,8 @@
$ hg -R other pull -r 24b6387c8c8c
pulling from $TESTTMP/main (glob)
no changes found
+ pre-close-tip:24b6387c8c8c public
+ postclose-tip:24b6387c8c8c public
b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
$ hg -R other log -G
o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
@@ -156,14 +166,19 @@
$ hg -R main push other --rev eea13746799a --bookmark book_eea1
pushing to other
searching for changes
+ pre-close-tip:eea13746799a public book_eea1
+ postclose-tip:eea13746799a public book_eea1
+ b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_URL=push
changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_SOURCE=push HG_URL=push
- b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_URL=push
remote: adding changesets
remote: adding manifests
remote: adding file changes
remote: added 1 changesets with 0 changes to 0 files (-1 heads)
remote: 1 new obsolescence markers
updating bookmark book_eea1
+ pre-close-tip:02de42196ebe draft book_02de
+ postclose-tip:02de42196ebe draft book_02de
+ b2x-transactionclose hook: HG_SOURCE=push-response HG_URL=file:$TESTTMP/other
$ hg -R other log -G
o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
|\
@@ -189,8 +204,10 @@
added 1 changesets with 1 changes to 1 files (+1 heads)
1 new obsolescence markers
updating bookmark book_02de
+ pre-close-tip:02de42196ebe draft book_02de
+ postclose-tip:02de42196ebe draft book_02de
+ b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
changegroup hook: HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
- b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
(run 'hg heads' to see heads, 'hg merge' to merge)
$ hg -R other debugobsolete
1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
@@ -212,8 +229,10 @@
added 1 changesets with 1 changes to 1 files (+1 heads)
1 new obsolescence markers
updating bookmark book_42cc
+ pre-close-tip:42ccdea3bb16 draft book_42cc
+ postclose-tip:42ccdea3bb16 draft book_42cc
+ b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
changegroup hook: HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
- b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
(run 'hg heads .' to see heads, 'hg merge' to merge)
$ cat main-error.log
$ hg -R other debugobsolete
@@ -234,8 +253,13 @@
remote: added 1 changesets with 1 changes to 1 files
remote: 1 new obsolescence markers
updating bookmark book_5fdd
+ remote: pre-close-tip:5fddd98957c8 draft book_5fdd
+ remote: postclose-tip:5fddd98957c8 draft book_5fdd
+ remote: b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
- remote: b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
+ pre-close-tip:02de42196ebe draft book_02de
+ postclose-tip:02de42196ebe draft book_02de
+ b2x-transactionclose hook: HG_SOURCE=push-response HG_URL=ssh://user@dummy/other
$ hg -R other log -G
o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
|
@@ -274,6 +298,9 @@
remote: added 1 changesets with 1 changes to 1 files
remote: 1 new obsolescence markers
updating bookmark book_32af
+ pre-close-tip:02de42196ebe draft book_02de
+ postclose-tip:02de42196ebe draft book_02de
+ b2x-transactionclose hook: HG_SOURCE=push-response HG_URL=http://localhost:$HGPORT2/
$ cat other-error.log
Check final content.
@@ -304,6 +331,15 @@
6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+(check that no 'pending' files remain)
+
+ $ ls -1 other/.hg/bookmarks*
+ other/.hg/bookmarks
+ $ ls -1 other/.hg/store/phaseroots*
+ other/.hg/store/phaseroots
+ $ ls -1 other/.hg/store/00changelog.i*
+ other/.hg/store/00changelog.i
+
Error Handling
==============
@@ -460,9 +496,9 @@
$ hg -R main push other -r e7ec4e813ba6
pushing to other
searching for changes
+ pre-close-tip:e7ec4e813ba6 draft
transaction abort!
rollback completed
- changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=e7ec4e813ba6b07be2a0516ce1a74bb4e503f91a HG_SOURCE=push HG_URL=push
abort: b2x-pretransactionclose.failpush hook exited with status 1
[255]
@@ -470,9 +506,9 @@
pushing to ssh://user@dummy/other
searching for changes
abort: b2x-pretransactionclose.failpush hook exited with status 1
+ remote: pre-close-tip:e7ec4e813ba6 draft
remote: transaction abort!
remote: rollback completed
- remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=e7ec4e813ba6b07be2a0516ce1a74bb4e503f91a HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
[255]
$ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
@@ -481,4 +517,12 @@
abort: b2x-pretransactionclose.failpush hook exited with status 1
[255]
+(check that no 'pending' files remain)
+ $ ls -1 other/.hg/bookmarks*
+ other/.hg/bookmarks
+ $ ls -1 other/.hg/store/phaseroots*
+ other/.hg/store/phaseroots
+ $ ls -1 other/.hg/store/00changelog.i*
+ other/.hg/store/00changelog.i
+
--- a/tests/test-bundle2-format.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-bundle2-format.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,4 +1,4 @@
-This test is decicated to test the bundle2 container format
+This test is dedicated to test the bundle2 container format
It test multiple existing parts to test different feature of the container. You
probably do not need to touch this test unless you change the binary encoding
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-bundle2-pushback.t Tue Dec 09 13:32:19 2014 -0600
@@ -0,0 +1,111 @@
+ $ cat > bundle2.py << EOF
+ > """A small extension to test bundle2 pushback parts.
+ > Current bundle2 implementation doesn't provide a way to generate those
+ > parts, so they must be created by extensions.
+ > """
+ > from mercurial import bundle2, pushkey, exchange, util
+ > def _newhandlechangegroup(op, inpart):
+ > """This function wraps the changegroup part handler for getbundle.
+ > It issues an additional b2x:pushkey part to send a new
+ > bookmark back to the client"""
+ > result = bundle2.handlechangegroup(op, inpart)
+ > if 'b2x:pushback' in op.reply.capabilities:
+ > params = {'namespace': 'bookmarks',
+ > 'key': 'new-server-mark',
+ > 'old': '',
+ > 'new': 'tip'}
+ > encodedparams = [(k, pushkey.encode(v)) for (k,v) in params.items()]
+ > op.reply.newpart('b2x:pushkey', mandatoryparams=encodedparams)
+ > else:
+ > op.reply.newpart('b2x:output', data='pushback not enabled')
+ > return result
+ > _newhandlechangegroup.params = bundle2.handlechangegroup.params
+ > bundle2.parthandlermapping['b2x:changegroup'] = _newhandlechangegroup
+ > EOF
+
+ $ cat >> $HGRCPATH <<EOF
+ > [ui]
+ > ssh = python "$TESTDIR/dummyssh"
+ > username = nobody <no.reply@example.com>
+ >
+ > [alias]
+ > tglog = log -G -T "{desc} [{phase}:{node|short}]"
+ > EOF
+
+Set up server repository
+
+ $ hg init server
+ $ cd server
+ $ echo c0 > f0
+ $ hg commit -Am 0
+ adding f0
+
+Set up client repository
+
+ $ cd ..
+ $ hg clone ssh://user@dummy/server client -q
+ $ cd client
+
+Enable extension
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > bundle2=$TESTTMP/bundle2.py
+ > [experimental]
+ > bundle2-exp = True
+ > EOF
+
+Without config
+
+ $ cd ../client
+ $ echo c1 > f1
+ $ hg commit -Am 1
+ adding f1
+ $ hg push
+ pushing to ssh://user@dummy/server
+ searching for changes
+ remote: pushback not enabled
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+ $ hg bookmark
+ no bookmarks set
+
+ $ cd ../server
+ $ hg tglog
+ o 1 [public:2b9c7234e035]
+ |
+ @ 0 [public:6cee5c8f3e5b]
+
+
+
+
+With config
+
+ $ cd ../client
+ $ echo '[experimental]' >> .hg/hgrc
+ $ echo 'bundle2.pushback = True' >> .hg/hgrc
+ $ echo c2 > f2
+ $ hg commit -Am 2
+ adding f2
+ $ hg push
+ pushing to ssh://user@dummy/server
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+ $ hg bookmark
+ new-server-mark 2:0a76dfb2e179
+
+ $ cd ../server
+ $ hg tglog
+ o 2 [public:0a76dfb2e179]
+ |
+ o 1 [public:2b9c7234e035]
+ |
+ @ 0 [public:6cee5c8f3e5b]
+
+
+
+
--- a/tests/test-check-code.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-check-code.t Tue Dec 09 13:32:19 2014 -0600
@@ -261,7 +261,7 @@
> print _("concatenating " " by " " space %s" % v)
> print _("concatenating " + " by " + " '+' %s" % v)
>
- > print _("maping operation in different line %s"
+ > print _("mapping operation in different line %s"
> % v)
>
> print _(
@@ -278,7 +278,7 @@
> print _("concatenating " + " by " + " '+' %s" % v)
don't use % inside _()
./map-inside-gettext.py:6:
- > print _("maping operation in different line %s"
+ > print _("mapping operation in different line %s"
don't use % inside _()
./map-inside-gettext.py:9:
> print _(
--- a/tests/test-command-template.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-command-template.t Tue Dec 09 13:32:19 2014 -0600
@@ -525,6 +525,25 @@
}
]
+honor --git but not format-breaking diffopts
+ $ hg --config diff.noprefix=True log --git -vpr . -Tjson
+ [
+ {
+ "rev": 8,
+ "node": "95c24699272ef57d062b8bccc32c878bf841784a",
+ "branch": "default",
+ "phase": "draft",
+ "user": "test",
+ "date": [1577872860, 0],
+ "desc": "third",
+ "bookmarks": [],
+ "tags": ["tip"],
+ "parents": ["29114dbae42b9f078cf2714dbe3a86bba8ec7453"],
+ "files": ["fourth", "second", "third"],
+ "diff": "diff --git a/second b/fourth\nrename from second\nrename to fourth\ndiff --git a/third b/third\nnew file mode 100644\n--- /dev/null\n+++ b/third\n@@ -0,0 +1,1 @@\n+third\n"
+ }
+ ]
+
$ hg log -T json
[
{
--- a/tests/test-commandserver.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-commandserver.t Tue Dec 09 13:32:19 2014 -0600
@@ -178,6 +178,7 @@
defaults.commit=-d "0 0"
defaults.shelve=--date "0 0"
defaults.tag=-d "0 0"
+ largefiles.usercache=$TESTTMP/.cache/largefiles
ui.slash=True
ui.interactive=False
ui.mergemarkers=detailed
@@ -492,6 +493,7 @@
foo
$ cat <<EOF > dbgui.py
+ > import os, sys
> from mercurial import cmdutil, commands
> cmdtable = {}
> command = cmdutil.command(cmdtable)
@@ -501,6 +503,14 @@
> @command("debugprompt", norepo=True)
> def debugprompt(ui):
> ui.write("%s\\n" % ui.prompt("prompt:"))
+ > @command("debugreadstdin", norepo=True)
+ > def debugreadstdin(ui):
+ > ui.write("read: %r\n" % sys.stdin.read(1))
+ > @command("debugwritestdout", norepo=True)
+ > def debugwritestdout(ui):
+ > os.write(1, "low-level stdout fd and\n")
+ > sys.stdout.write("stdout should be redirected to /dev/null\n")
+ > sys.stdout.flush()
> EOF
$ cat <<EOF >> .hg/hgrc
> [extensions]
@@ -518,10 +528,36 @@
... runcommand(server, ['debugprompt', '--config',
... 'ui.interactive=True'],
... input=cStringIO.StringIO('5678\n'))
+ ... runcommand(server, ['debugreadstdin'])
+ ... runcommand(server, ['debugwritestdout'])
*** runcommand debuggetpass --config ui.interactive=True
password: 1234
*** runcommand debugprompt --config ui.interactive=True
prompt: 5678
+ *** runcommand debugreadstdin
+ read: ''
+ *** runcommand debugwritestdout
+
+
+run commandserver in commandserver, which is silly but should work:
+
+ >>> import cStringIO
+ >>> from hgclient import readchannel, runcommand, check
+ >>> @check
+ ... def nested(server):
+ ... print '%c, %r' % readchannel(server)
+ ... class nestedserver(object):
+ ... stdin = cStringIO.StringIO('getencoding\n')
+ ... stdout = cStringIO.StringIO()
+ ... runcommand(server, ['serve', '--cmdserver', 'pipe'],
+ ... output=nestedserver.stdout, input=nestedserver.stdin)
+ ... nestedserver.stdout.seek(0)
+ ... print '%c, %r' % readchannel(nestedserver) # hello
+ ... print '%c, %r' % readchannel(nestedserver) # getencoding
+ o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
+ *** runcommand serve --cmdserver pipe
+ o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
+ r, '*' (glob)
start without repository:
--- a/tests/test-commit-amend.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-commit-amend.t Tue Dec 09 13:32:19 2014 -0600
@@ -447,7 +447,7 @@
$ hg up 11
5 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg graft 12
- grafting revision 12
+ grafting 12:2647734878ef "fork" (tip)
$ hg ci --amend -m 'graft amend'
saved backup bundle to $TESTTMP/.hg/strip-backup/bd010aea3f39-amend-backup.hg (glob)
$ hg log -r . --debug | grep extra
@@ -889,9 +889,9 @@
The way mercurial does amends is to create a temporary commit (rev 3) and then
fold the new and old commits together into another commit (rev 4). During this
-process, findlimit is called to check how far back to look for the transitive
+process, _findlimit is called to check how far back to look for the transitive
closure of file copy information, but due to the divergence of the filelog
-and changelog graph topologies, before findlimit was fixed, it returned a rev
+and changelog graph topologies, before _findlimit was fixed, it returned a rev
which was not far enough back in this case.
$ hg mv a1 a2
$ hg status --copies --rev 0
--- a/tests/test-completion.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-completion.t Tue Dec 09 13:32:19 2014 -0600
@@ -202,7 +202,7 @@
annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, ignore-all-space, ignore-space-change, ignore-blank-lines, include, exclude, template
clone: noupdate, updaterev, rev, branch, pull, uncompressed, ssh, remotecmd, insecure
commit: addremove, close-branch, amend, secret, edit, include, exclude, message, logfile, date, user, subrepos
- diff: rev, change, text, git, nodates, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, unified, stat, include, exclude, subrepos
+ diff: rev, change, text, git, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, unified, stat, include, exclude, subrepos
export: output, switch-parent, rev, text, git, nodates
forget: include, exclude
init: ssh, remotecmd, insecure
@@ -210,7 +210,7 @@
merge: force, rev, preview, tool
pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
push: force, rev, bookmark, branch, new-branch, ssh, remotecmd, insecure
- remove: after, force, include, exclude
+ remove: after, force, subrepos, include, exclude
serve: accesslog, daemon, daemon-pipefds, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate
status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, copies, print0, rev, change, include, exclude, subrepos, template
summary: remote
--- a/tests/test-config.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-config.t Tue Dec 09 13:32:19 2014 -0600
@@ -7,7 +7,7 @@
> novaluekey
> EOF
$ hg showconfig
- hg: parse error at $TESTTMP/.hg/hgrc:1: novaluekey
+ hg: parse error at $TESTTMP/.hg/hgrc:1: novaluekey (glob)
[255]
Invalid syntax: no key
@@ -16,7 +16,7 @@
> =nokeyvalue
> EOF
$ hg showconfig
- hg: parse error at $TESTTMP/.hg/hgrc:1: =nokeyvalue
+ hg: parse error at $TESTTMP/.hg/hgrc:1: =nokeyvalue (glob)
[255]
Test hint about invalid syntax from leading white space
@@ -25,7 +25,7 @@
> key=value
> EOF
$ hg showconfig
- hg: parse error at $TESTTMP/.hg/hgrc:1: key=value
+ hg: parse error at $TESTTMP/.hg/hgrc:1: key=value (glob)
unexpected leading whitespace
[255]
@@ -34,7 +34,7 @@
> key=value
> EOF
$ hg showconfig
- hg: parse error at $TESTTMP/.hg/hgrc:1: [section]
+ hg: parse error at $TESTTMP/.hg/hgrc:1: [section] (glob)
unexpected leading whitespace
[255]
@@ -44,9 +44,11 @@
Test case sensitive configuration
- $ echo '[Section]' >> $HGRCPATH
- $ echo 'KeY = Case Sensitive' >> $HGRCPATH
- $ echo 'key = lower case' >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [Section]
+ > KeY = Case Sensitive
+ > key = lower case
+ > EOF
$ hg showconfig Section
Section.KeY=Case Sensitive
--- a/tests/test-convert-clonebranches.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-convert-clonebranches.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,8 +1,10 @@
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "convert = " >> $HGRCPATH
- $ echo "[convert]" >> $HGRCPATH
- $ echo "hg.tagsbranch=0" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > convert =
+ > [convert]
+ > hg.tagsbranch = 0
+ > EOF
$ hg init source
$ cd source
$ echo a > a
--- a/tests/test-convert-cvs-branch.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-convert-cvs-branch.t Tue Dec 09 13:32:19 2014 -0600
@@ -7,10 +7,12 @@
> {
> cvs -f "$@" > /dev/null
> }
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "convert = " >> $HGRCPATH
- $ echo "[convert]" >> $HGRCPATH
- $ echo "cvsps.cache=0" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > convert =
+ > [convert]
+ > cvsps.cache = 0
+ > EOF
create cvs repository
--- a/tests/test-convert-cvs-detectmerge.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-convert-cvs-detectmerge.t Tue Dec 09 13:32:19 2014 -0600
@@ -23,11 +23,13 @@
XXX copied from test-convert-cvs-synthetic
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "convert = " >> $HGRCPATH
- $ echo "[convert]" >> $HGRCPATH
- $ echo "cvsps.cache=0" >> $HGRCPATH
- $ echo "cvsps.mergefrom=\[MERGE from (\S+)\]" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > convert =
+ > [convert]
+ > cvsps.cache = 0
+ > cvsps.mergefrom = \[MERGE from (\S+)\]
+ > EOF
create cvs repository with one project
--- a/tests/test-convert-cvs.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-convert-cvs.t Tue Dec 09 13:32:19 2014 -0600
@@ -18,9 +18,11 @@
> print "%s hook: %d changesets"%(hooktype,len(changesets))
> EOF
$ hookpath=`pwd`
- $ echo "[hooks]" >> $HGRCPATH
- $ echo "cvslog=python:$hookpath/cvshooks.py:cvslog" >> $HGRCPATH
- $ echo "cvschangesets=python:$hookpath/cvshooks.py:cvschangesets" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [hooks]
+ > cvslog = python:$hookpath/cvshooks.py:cvslog
+ > cvschangesets = python:$hookpath/cvshooks.py:cvschangesets
+ > EOF
create cvs repository
--- a/tests/test-convert-hg-svn.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-convert-hg-svn.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,8 +1,10 @@
#require svn svn-bindings
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "convert = " >> $HGRCPATH
- $ echo "mq = " >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > convert =
+ > mq =
+ > EOF
$ SVNREPOPATH=`pwd`/svn-repo
#if windows
--- a/tests/test-convert-tagsbranch-topology.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-convert-tagsbranch-topology.t Tue Dec 09 13:32:19 2014 -0600
@@ -4,11 +4,13 @@
$ echo "autocrlf = false" >> $HOME/.gitconfig
$ echo "[core]" >> $HOME/.gitconfig
$ echo "autocrlf = false" >> $HOME/.gitconfig
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "convert=" >> $HGRCPATH
- $ echo '[convert]' >> $HGRCPATH
- $ echo 'hg.usebranchnames = True' >> $HGRCPATH
- $ echo 'hg.tagsbranch = tags-update' >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > convert =
+ > [convert]
+ > hg.usebranchnames = True
+ > hg.tagsbranch = tags-update
+ > EOF
$ GIT_AUTHOR_NAME='test'; export GIT_AUTHOR_NAME
$ GIT_AUTHOR_EMAIL='test@example.org'; export GIT_AUTHOR_EMAIL
$ GIT_AUTHOR_DATE="2007-01-01 00:00:00 +0000"; export GIT_AUTHOR_DATE
--- a/tests/test-copy.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-copy.t Tue Dec 09 13:32:19 2014 -0600
@@ -184,7 +184,7 @@
rev offset length ..... linkrev nodeid p1 p2 (re)
0 0 69 ..... 1 7711d36246cc 000000000000 000000000000 (re)
1 69 6 ..... 2 bdf70a2b8d03 7711d36246cc 000000000000 (re)
- 2 75 81 ..... 3 b2558327ea8d 000000000000 000000000000 (re)
+ 2 75 71 ..... 3 b2558327ea8d 000000000000 000000000000 (re)
should match
$ hg debugindex foo
rev offset length ..... linkrev nodeid p1 p2 (re)
--- a/tests/test-debugcommands.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-debugcommands.t Tue Dec 09 13:32:19 2014 -0600
@@ -24,6 +24,40 @@
full revision size (min/max/avg) : 44 / 44 / 44
delta size (min/max/avg) : 0 / 0 / 0
+Test max chain len
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > maxchainlen=4
+ > EOF
+
+ $ printf "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
+ $ hg ci -m a
+ $ printf "b\n" >> a
+ $ hg ci -m a
+ $ printf "c\n" >> a
+ $ hg ci -m a
+ $ printf "d\n" >> a
+ $ hg ci -m a
+ $ printf "e\n" >> a
+ $ hg ci -m a
+ $ printf "f\n" >> a
+ $ hg ci -m a
+ $ printf 'g\n' >> a
+ $ hg ci -m a
+ $ printf 'h\n' >> a
+ $ hg ci -m a
+ $ hg debugrevlog -d a
+ # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
+ 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
+ 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
+ 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
+ 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
+ 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
+ 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
+ 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
+ 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
+ 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
+ $ cd ..
Test internal debugstacktrace command
--- a/tests/test-diff-color.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-diff-color.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,9 +1,11 @@
Setup
- $ echo "[color]" >> $HGRCPATH
- $ echo "mode = ansi" >> $HGRCPATH
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "color=" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [color]
+ > mode = ansi
+ > [extensions]
+ > color =
+ > EOF
$ hg init repo
$ cd repo
$ cat > a <<EOF
@@ -66,11 +68,13 @@
$ hg diff --stat --color=always
a | 2 \x1b[0;32m+\x1b[0m\x1b[0;31m-\x1b[0m (esc)
1 files changed, 1 insertions(+), 1 deletions(-)
- $ echo "record=" >> $HGRCPATH
- $ echo "[ui]" >> $HGRCPATH
- $ echo "interactive=true" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "git=True" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > record =
+ > [ui]
+ > interactive = true
+ > [diff]
+ > git = True
+ > EOF
#if execbit
--- a/tests/test-diff-unified.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-diff-unified.t Tue Dec 09 13:32:19 2014 -0600
@@ -89,6 +89,64 @@
abort: diff context lines count must be an integer, not 'foo'
[255]
+noprefix config and option
+
+ $ hg --config diff.noprefix=True diff --nodates
+ diff -r cf9f4ba66af2 a
+ --- a
+ +++ a
+ @@ -2,7 +2,7 @@
+ c
+ a
+ a
+ -b
+ +dd
+ a
+ a
+ c
+ $ hg diff --noprefix --nodates
+ diff -r cf9f4ba66af2 a
+ --- a
+ +++ a
+ @@ -2,7 +2,7 @@
+ c
+ a
+ a
+ -b
+ +dd
+ a
+ a
+ c
+
+noprefix config disabled in plain mode, but option still enabled
+
+ $ HGPLAIN=1 hg --config diff.noprefix=True diff --nodates
+ diff -r cf9f4ba66af2 a
+ --- a/a
+ +++ b/a
+ @@ -2,7 +2,7 @@
+ c
+ a
+ a
+ -b
+ +dd
+ a
+ a
+ c
+ $ HGPLAIN=1 hg diff --noprefix --nodates
+ diff -r cf9f4ba66af2 a
+ --- a
+ +++ a
+ @@ -2,7 +2,7 @@
+ c
+ a
+ a
+ -b
+ +dd
+ a
+ a
+ c
+
$ cd ..
@@ -171,6 +229,39 @@
-a
+b
+Git diff with noprefix
+
+ $ hg --config diff.noprefix=True diff --git --nodates
+ diff --git f1 f 1
+ rename from f1
+ rename to f 1
+ --- f1
+ +++ f 1
+ @@ -1,1 +1,1 @@
+ -a
+ +b
+
+noprefix config disabled in plain mode, but option still enabled
+
+ $ HGPLAIN=1 hg --config diff.noprefix=True diff --git --nodates
+ diff --git a/f1 b/f 1
+ rename from f1
+ rename to f 1
+ --- a/f1
+ +++ b/f 1
+ @@ -1,1 +1,1 @@
+ -a
+ +b
+ $ HGPLAIN=1 hg diff --git --noprefix --nodates
+ diff --git f1 f 1
+ rename from f1
+ rename to f 1
+ --- f1
+ +++ f 1
+ @@ -1,1 +1,1 @@
+ -a
+ +b
+
Regular diff --nodates, file deletion
$ hg ci -m addspace
--- a/tests/test-diff-upgrade.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-diff-upgrade.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,9 +1,11 @@
#require execbit
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "autodiff=$TESTDIR/autodiff.py" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "nodates=1" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > autodiff = $TESTDIR/autodiff.py
+ > [diff]
+ > nodates = 1
+ > EOF
$ hg init repo
$ cd repo
--- a/tests/test-eol.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-eol.t Tue Dec 09 13:32:19 2014 -0600
@@ -408,10 +408,12 @@
Test cleverencode: and cleverdecode: aliases for win32text extension
- $ echo '[encode]' >> $HGRCPATH
- $ echo '**.txt = cleverencode:' >> $HGRCPATH
- $ echo '[decode]' >> $HGRCPATH
- $ echo '**.txt = cleverdecode:' >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [encode]
+ > **.txt = cleverencode:
+ > [decode]
+ > **.txt = cleverdecode:
+ > EOF
$ hg init win32compat
$ cd win32compat
--- a/tests/test-eolfilename.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-eolfilename.t Tue Dec 09 13:32:19 2014 -0600
@@ -59,10 +59,12 @@
$ hg init bar
$ cd bar
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "color=" >> $HGRCPATH
- $ echo "[color]" >> $HGRCPATH
- $ echo "mode = ansi" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > color =
+ > [color]
+ > mode = ansi
+ > EOF
$ A=`printf 'foo\nbar'`
$ B=`printf 'foo\nbar.baz'`
$ touch "$A"
--- a/tests/test-export.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-export.t Tue Dec 09 13:32:19 2014 -0600
@@ -176,10 +176,12 @@
[255]
Check for color output
- $ echo "[color]" >> $HGRCPATH
- $ echo "mode = ansi" >> $HGRCPATH
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "color=" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [color]
+ > mode = ansi
+ > [extensions]
+ > color =
+ > EOF
$ hg export --color always --nodates tip
# HG changeset patch
--- a/tests/test-extdiff.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-extdiff.t Tue Dec 09 13:32:19 2014 -0600
@@ -16,11 +16,13 @@
Only in a: b
[1]
- $ echo "[extdiff]" >> $HGRCPATH
- $ echo "cmd.falabala=echo" >> $HGRCPATH
- $ echo "opts.falabala=diffing" >> $HGRCPATH
- $ echo "cmd.edspace=echo" >> $HGRCPATH
- $ echo 'opts.edspace="name <user@example.com>"' >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extdiff]
+ > cmd.falabala = echo
+ > opts.falabala = diffing
+ > cmd.edspace = echo
+ > opts.edspace = "name <user@example.com>"
+ > EOF
$ hg falabala
diffing a.000000000000 a
@@ -190,6 +192,26 @@
*/extdiff.*/a.8a5febb7f867/a a.34eed99112ab/a (glob)
[1]
+Fallback to merge-tools.tool.executable|regkey
+ $ mkdir dir
+ $ cat > 'dir/tool.sh' << EOF
+ > #!/bin/sh
+ > echo "** custom diff **"
+ > EOF
+ $ chmod +x dir/tool.sh
+ $ tool=`pwd`/dir/tool.sh
+ $ hg --debug tl --config extdiff.tl= --config merge-tools.tl.executable=$tool
+ making snapshot of 2 files from rev * (glob)
+ a
+ b
+ making snapshot of 2 files from working directory
+ a
+ b
+ running "'$TESTTMP/a/dir/tool.sh' 'a.*' 'a'" in */extdiff.* (glob)
+ ** custom diff **
+ cleaning up temp directory
+ [1]
+
$ cd ..
#endif
--- a/tests/test-extension.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-extension.t Tue Dec 09 13:32:19 2014 -0600
@@ -424,10 +424,9 @@
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
- # add new command called vdiff, runs kdiff3
- vdiff = kdiff3
-
- # add new command called meld, runs meld (no need to name twice)
+ # add new command called meld, runs meld (no need to name twice). If
+ # the meld executable is not available, the meld tool in [merge-tools]
+ # will be used, if available
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
@@ -558,11 +557,13 @@
> "yet another debug command"
> ui.write("%s\n" % '\n'.join([x for x, y in extensions.extensions()]))
> EOF
- $ echo "debugissue811 = $debugpath" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "strip=" >> $HGRCPATH
- $ echo "hgext.mq=" >> $HGRCPATH
- $ echo "hgext/mq=" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > debugissue811 = $debugpath
+ > mq =
+ > strip =
+ > hgext.mq =
+ > hgext/mq =
+ > EOF
Show extensions:
(note that mq force load strip, also checking it's not loaded twice)
@@ -813,9 +814,11 @@
$ hg -q -R pull-src1 pull src
reposetup() for $TESTTMP/reposetup-test/src (glob)
- $ echo '[extensions]' >> $HGRCPATH
- $ echo '# disable extension globally and explicitly' >> $HGRCPATH
- $ echo 'reposetuptest = !' >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > # disable extension globally and explicitly
+ > reposetuptest = !
+ > EOF
$ hg clone -U src clone-dst2
reposetup() for $TESTTMP/reposetup-test/src (glob)
$ hg init push-dst2
@@ -825,9 +828,11 @@
$ hg -q -R pull-src2 pull src
reposetup() for $TESTTMP/reposetup-test/src (glob)
- $ echo '[extensions]' >> $HGRCPATH
- $ echo '# enable extension globally' >> $HGRCPATH
- $ echo "reposetuptest = $TESTTMP/reposetuptest.py" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > # enable extension globally
+ > reposetuptest = $TESTTMP/reposetuptest.py
+ > EOF
$ hg clone -U src clone-dst3
reposetup() for $TESTTMP/reposetup-test/src (glob)
reposetup() for $TESTTMP/reposetup-test/clone-dst3 (glob)
@@ -863,9 +868,11 @@
$ hg --config extensions.reposetuptest=! init pull-src5
$ hg --config extensions.reposetuptest=! -q -R pull-src5 pull src
- $ echo '[extensions]' >> $HGRCPATH
- $ echo '# disable extension globally and explicitly' >> $HGRCPATH
- $ echo 'reposetuptest = !' >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > # disable extension globally and explicitly
+ > reposetuptest = !
+ > EOF
$ hg init parent
$ hg init parent/sub1
$ echo 1 > parent/sub1/1
--- a/tests/test-graft.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-graft.t Tue Dec 09 13:32:19 2014 -0600
@@ -50,20 +50,20 @@
Can't graft ancestor:
$ hg graft 1 2
- skipping ancestor revision 1
- skipping ancestor revision 2
+ skipping ancestor revision 1:5d205f8b35b6
+ skipping ancestor revision 2:5c095ad7e90f
[255]
Specify revisions with -r:
$ hg graft -r 1 -r 2
- skipping ancestor revision 1
- skipping ancestor revision 2
+ skipping ancestor revision 1:5d205f8b35b6
+ skipping ancestor revision 2:5c095ad7e90f
[255]
$ hg graft -r 1 2
- skipping ancestor revision 2
- skipping ancestor revision 1
+ skipping ancestor revision 2:5c095ad7e90f
+ skipping ancestor revision 1:5d205f8b35b6
[255]
Can't graft with dirty wd:
@@ -82,7 +82,7 @@
A b
R a
$ HGEDITOR=cat hg graft 2 -u foo --edit
- grafting revision 2
+ grafting 2:5c095ad7e90f "2"
merging a and b to b
2
@@ -132,17 +132,17 @@
$ hg graft 1 5 4 3 'merge()' 2 -n
skipping ungraftable merge revision 6
- skipping revision 2 (already grafted to 7)
- grafting revision 1
- grafting revision 5
- grafting revision 4
- grafting revision 3
+ skipping revision 2:5c095ad7e90f (already grafted to 7:ef0ef43d49e7)
+ grafting 1:5d205f8b35b6 "1"
+ grafting 5:97f8bfe72746 "5"
+ grafting 4:9c233e8e184d "4"
+ grafting 3:4c60f11aa304 "3"
$ HGEDITOR=cat hg graft 1 5 4 3 'merge()' 2 --debug
skipping ungraftable merge revision 6
scanning for duplicate grafts
- skipping revision 2 (already grafted to 7)
- grafting revision 1
+ skipping revision 2:5c095ad7e90f (already grafted to 7:ef0ef43d49e7)
+ grafting 1:5d205f8b35b6 "1"
searching for copies back to rev 1
unmatched files in local:
b
@@ -160,7 +160,7 @@
my b@ef0ef43d49e7+ other a@5d205f8b35b6 ancestor a@68795b066622
premerge successful
b
- grafting revision 5
+ grafting 5:97f8bfe72746 "5"
searching for copies back to rev 1
resolving manifests
branchmerge: True, force: True, partial: False
@@ -168,9 +168,9 @@
e: remote is newer -> g
getting e
updating: e 1/1 files (100.00%)
- b: keep -> k
+ b: remote unchanged -> k
e
- grafting revision 4
+ grafting 4:9c233e8e184d "4"
searching for copies back to rev 1
resolving manifests
branchmerge: True, force: True, partial: False
@@ -179,7 +179,7 @@
d: remote is newer -> g
getting d
updating: d 1/2 files (50.00%)
- b: keep -> k
+ b: remote unchanged -> k
e: versions differ -> m
updating: e 2/2 files (100.00%)
picked tool 'internal:merge' for e (binary False symlink False)
@@ -213,10 +213,10 @@
$ hg graft 1 5 4 3 'merge()' 2
skipping ungraftable merge revision 6
- skipping revision 2 (already grafted to 7)
- skipping revision 1 (already grafted to 8)
- skipping revision 5 (already grafted to 9)
- grafting revision 4
+ skipping revision 2:5c095ad7e90f (already grafted to 7:ef0ef43d49e7)
+ skipping revision 1:5d205f8b35b6 (already grafted to 8:6b9e5368ca4e)
+ skipping revision 5:97f8bfe72746 (already grafted to 9:1905859650ec)
+ grafting 4:9c233e8e184d "4"
merging e
warning: conflicts during merge.
merging e incomplete! (edit conflicts, then use 'hg resolve --mark')
@@ -227,7 +227,7 @@
Continue without resolve should fail:
$ hg graft -c
- grafting revision 4
+ grafting 4:9c233e8e184d "4"
abort: unresolved merge conflicts (see hg help resolve)
[255]
@@ -250,8 +250,8 @@
Continue for real, clobber usernames
$ hg graft -c -U
- grafting revision 4
- grafting revision 3
+ grafting 4:9c233e8e184d "4"
+ grafting 3:4c60f11aa304 "3"
Compare with original:
@@ -299,7 +299,7 @@
$ hg ci -m 7
created new head
$ hg graft 7
- grafting revision 7
+ grafting 7:ef0ef43d49e7 "2"
$ hg log -r 7 --template '{rev}:{node}\n'
7:ef0ef43d49e79e81ddafdc7997401ba0041efc82
@@ -326,31 +326,31 @@
Disallow grafting an already grafted cset onto its original branch
$ hg up -q 6
$ hg graft 7
- skipping already grafted revision 7 (was grafted from 2)
+ skipping already grafted revision 7:ef0ef43d49e7 (was grafted from 2:5c095ad7e90f)
[255]
Disallow grafting already grafted csets with the same origin onto each other
$ hg up -q 13
$ hg graft 2
- skipping revision 2 (already grafted to 13)
+ skipping revision 2:5c095ad7e90f (already grafted to 13:9db0f28fd374)
[255]
$ hg graft 7
- skipping already grafted revision 7 (13 also has origin 2)
+ skipping already grafted revision 7:ef0ef43d49e7 (13:9db0f28fd374 also has origin 2:5c095ad7e90f)
[255]
$ hg up -q 7
$ hg graft 2
- skipping revision 2 (already grafted to 7)
+ skipping revision 2:5c095ad7e90f (already grafted to 7:ef0ef43d49e7)
[255]
$ hg graft tip
- skipping already grafted revision 13 (7 also has origin 2)
+ skipping already grafted revision 13:9db0f28fd374 (7:ef0ef43d49e7 also has origin 2:5c095ad7e90f)
[255]
Graft with --log
$ hg up -Cq 1
$ hg graft 3 --log -u foo
- grafting revision 3
+ grafting 3:4c60f11aa304 "3"
warning: can't find ancestor for 'c' copied from 'b'!
$ hg log --template '{rev} {parents} {desc}\n' -r tip
14 1:5d205f8b35b6 3
@@ -361,39 +361,50 @@
$ echo b > a
$ hg ci -m 8
created new head
- $ echo a > a
+ $ echo c > a
$ hg ci -m 9
$ hg graft 1 --tool internal:fail
- grafting revision 1
+ grafting 1:5d205f8b35b6 "1"
abort: unresolved conflicts, can't continue
(use hg resolve and hg graft --continue)
[255]
$ hg resolve --all
merging a
+ warning: conflicts during merge.
+ merging a incomplete! (edit conflicts, then use 'hg resolve --mark')
+ [1]
+ $ cat a
+ <<<<<<< local: aaa4406d4f0a - test: 9
+ c
+ =======
+ b
+ >>>>>>> other: 5d205f8b35b6 - bar: 1
+ $ echo b > a
+ $ hg resolve -m a
(no more unresolved files)
$ hg graft -c
- grafting revision 1
+ grafting 1:5d205f8b35b6 "1"
$ hg export tip --git
# HG changeset patch
# User bar
# Date 0 0
# Thu Jan 01 00:00:00 1970 +0000
- # Node ID 64ecd9071ce83c6e62f538d8ce7709d53f32ebf7
- # Parent 4bdb9a9d0b84ffee1d30f0dfc7744cade17aa19c
+ # Node ID f67661df0c4804d301f064f332b57e7d5ddaf2be
+ # Parent aaa4406d4f0ae9befd6e58c82ec63706460cbca6
1
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,1 @@
- -a
+ -c
+b
Resolve conflicted graft with rename
$ echo c > a
$ hg ci -m 10
$ hg graft 2 --tool internal:fail
- grafting revision 2
+ grafting 2:5c095ad7e90f "2"
abort: unresolved conflicts, can't continue
(use hg resolve and hg graft --continue)
[255]
@@ -401,14 +412,14 @@
merging a and b to b
(no more unresolved files)
$ hg graft -c
- grafting revision 2
+ grafting 2:5c095ad7e90f "2"
$ hg export tip --git
# HG changeset patch
# User test
# Date 0 0
# Thu Jan 01 00:00:00 1970 +0000
- # Node ID 2e80e1351d6ed50302fe1e05f8bd1d4d412b6e11
- # Parent e5a51ae854a8bbaaf25cc5c6a57ff46042dadbb4
+ # Node ID 9627f653b421c61fc1ea4c4e366745070fa3d2bc
+ # Parent ee295f490a40b97f3d18dd4c4f1c8936c233b612
2
diff --git a/a b/b
@@ -537,12 +548,12 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: 3
- changeset: 17:64ecd9071ce8
+ changeset: 17:f67661df0c48
user: bar
date: Thu Jan 01 00:00:00 1970 +0000
summary: 1
- changeset: 19:2e80e1351d6e
+ changeset: 19:9627f653b421
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: 2
@@ -566,7 +577,7 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: 2
- changeset: 19:2e80e1351d6e
+ changeset: 19:9627f653b421
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: 2
@@ -608,7 +619,7 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: 2
- changeset: 19:2e80e1351d6e
+ changeset: 19:9627f653b421
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: 2
@@ -630,26 +641,26 @@
graft works on complex revset
$ hg graft 'origin(13) or destination(origin(13))'
- skipping ancestor revision 21
- skipping ancestor revision 22
- skipping revision 2 (already grafted to 22)
- grafting revision 7
- grafting revision 13
- grafting revision 19
+ skipping ancestor revision 21:7e61b508e709
+ skipping ancestor revision 22:1313d0a825e2
+ skipping revision 2:5c095ad7e90f (already grafted to 22:1313d0a825e2)
+ grafting 7:ef0ef43d49e7 "2"
+ grafting 13:9db0f28fd374 "2"
+ grafting 19:9627f653b421 "2"
merging b
graft with --force (still doesn't graft merges)
$ hg graft 19 0 6
skipping ungraftable merge revision 6
- skipping ancestor revision 0
- skipping already grafted revision 19 (22 also has origin 2)
+ skipping ancestor revision 0:68795b066622
+ skipping already grafted revision 19:9627f653b421 (22:1313d0a825e2 also has origin 2:5c095ad7e90f)
[255]
$ hg graft 19 0 6 --force
skipping ungraftable merge revision 6
- grafting revision 19
+ grafting 19:9627f653b421 "2"
merging b
- grafting revision 0
+ grafting 0:68795b066622 "0"
graft --force after backout
@@ -659,29 +670,33 @@
reverting a
changeset 29:484c03b8dfa4 backs out changeset 28:6c56f0f7f033
$ hg graft 28
- skipping ancestor revision 28
+ skipping ancestor revision 28:6c56f0f7f033
[255]
$ hg graft 28 --force
- grafting revision 28
+ grafting 28:6c56f0f7f033 "28"
merging a
$ cat a
abc
graft --continue after --force
- $ hg backout 30
- reverting a
- changeset 31:3b96c18b7a1b backs out changeset 30:8f539994be33
+ $ echo def > a
+ $ hg ci -m 31
$ hg graft 28 --force --tool internal:fail
- grafting revision 28
+ grafting 28:6c56f0f7f033 "28"
abort: unresolved conflicts, can't continue
(use hg resolve and hg graft --continue)
[255]
$ hg resolve --all
merging a
+ warning: conflicts during merge.
+ merging a incomplete! (edit conflicts, then use 'hg resolve --mark')
+ [1]
+ $ echo abc > a
+ $ hg resolve -m a
(no more unresolved files)
$ hg graft -c
- grafting revision 28
+ grafting 28:6c56f0f7f033 "28"
$ cat a
abc
@@ -693,5 +708,14 @@
$ hg --config extensions.mq= strip 2
saved backup bundle to $TESTTMP/a/.hg/strip-backup/5c095ad7e90f-backup.hg (glob)
$ hg graft tmp
- skipping already grafted revision 8 (2 also has unknown origin 5c095ad7e90f871700f02dd1fa5012cb4498a2d4)
+ skipping already grafted revision 8:9db0f28fd374 (2:ef0ef43d49e7 also has unknown origin 5c095ad7e90f)
[255]
+
+Empty graft
+
+ $ hg up -qr 26
+ $ hg tag -f something
+ $ hg graft -qr 27
+ $ hg graft -f 27
+ grafting 27:3aaa8b6725f0 "28"
+ note: graft of 27:3aaa8b6725f0 created no changes to commit
--- a/tests/test-help.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-help.t Tue Dec 09 13:32:19 2014 -0600
@@ -272,7 +272,7 @@
schemes extend schemes with shortcuts to repository swarms
share share a common history between several working directories
shelve save and restore changes to the working directory
- strip strip changesets and their descendents from history
+ strip strip changesets and their descendants from history
transplant command to transplant changesets from another branch
win32mbcs allow the use of MBCS paths with problematic encodings
zeroconf discover and advertise repositories on the local network
@@ -500,6 +500,7 @@
-a --text treat all files as text
-g --git use git extended diff format
--nodates omit dates from diff headers
+ --noprefix omit a/ and b/ prefixes from filenames
-p --show-function show which function each change is in
--reverse produce a diff that undoes the changes
-w --ignore-all-space ignore white space when comparing lines
@@ -1943,6 +1944,9 @@
<tr><td>-f</td>
<td>--force</td>
<td>remove (and delete) file even if added or modified</td></tr>
+ <tr><td>-S</td>
+ <td>--subrepos</td>
+ <td>recurse into subrepositories</td></tr>
<tr><td>-I</td>
<td>--include PATTERN [+]</td>
<td>include names matching the given patterns</td></tr>
--- a/tests/test-hgk.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-hgk.t Tue Dec 09 13:32:19 2014 -0600
@@ -16,5 +16,32 @@
phase draft
adda
+ $ echo b > b
+ $ hg ci -Am addb
+ adding b
+ $ hg log -T '{node}\n'
+ 102a90ea7b4a3361e4082ed620918c261189a36a
+ 07f4944404050f47db2e5c5071e0e84e7a27bba9
+
+ $ hg debug-diff-tree 07f494440405 102a90ea7b4a
+ :000000 100664 000000000000 1e88685f5dde N b b
+ $ hg debug-diff-tree 07f494440405 102a90ea7b4a --patch
+ diff --git a/b b/b
+ new file mode 100644
+ --- /dev/null
+ +++ b/b
+ @@ -0,0 +1,1 @@
+ +b
+
+Ensure that diff-tree output isn't affected by diffopts
+ $ hg --config diff.noprefix=True debug-diff-tree 07f494440405 102a90ea7b4a
+ :000000 100664 000000000000 1e88685f5dde N b b
+ $ hg --config diff.noprefix=True debug-diff-tree --patch 07f494440405 102a90ea7b4a
+ diff --git a/b b/b
+ new file mode 100644
+ --- /dev/null
+ +++ b/b
+ @@ -0,0 +1,1 @@
+ +b
$ cd ..
--- a/tests/test-hgweb-commands.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-hgweb-commands.t Tue Dec 09 13:32:19 2014 -0600
@@ -2201,12 +2201,12 @@
$ hg ci -m 'Babar is in the jungle!'
created new head
$ hg graft 0::
- grafting revision 0
- grafting revision 1
- grafting revision 2
- grafting revision 3
- grafting revision 4
- grafting revision 5
+ grafting 0:b4e73ffab476 "0"
+ grafting 1:e06180cbfb0c "1"
+ grafting 2:ab4f1438558b "2"
+ grafting 3:ada793dcc118 "3"
+ grafting 4:b60a39a85a01 "4" (secret)
+ grafting 5:aed2d9c1d0e7 "5"
(turning the initial root secret (filtered))
$ hg phase --force --secret 0
$ PATH_INFO=/graph/; export PATH_INFO
--- a/tests/test-histedit-fold.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-histedit-fold.t Tue Dec 09 13:32:19 2014 -0600
@@ -470,7 +470,14 @@
1:199b6bb90248 b
0:6c795aa153cb a
- $ hg histedit 6c795aa153cb --config hooks.commit="echo commit \$HG_NODE" --commands - 2>&1 << EOF | fixbundle
+Setup the proper environment variable symbol for the platform, to be subbed
+into the hook command.
+#if windows
+ $ NODE="%HG_NODE%"
+#else
+ $ NODE="\$HG_NODE"
+#endif
+ $ hg histedit 6c795aa153cb --config hooks.commit="echo commit $NODE" --commands - 2>&1 << EOF | fixbundle
> pick 199b6bb90248 b
> fold a1a953ffb4b0 c
> pick 6c795aa153cb a
--- a/tests/test-hook.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-hook.t Tue Dec 09 13:32:19 2014 -0600
@@ -148,6 +148,19 @@
$ hg -q tip
4:539e4b31b6dc
+(Check that no 'changelog.i.a' file were left behind)
+
+ $ ls -1 .hg/store/
+ 00changelog.i
+ 00manifest.i
+ data
+ fncache
+ journal.phaseroots
+ phaseroots
+ undo
+ undo.phaseroots
+
+
precommit hook can prevent commit
$ echo "precommit.forbid = python \"$TESTDIR/printenv.py\" precommit.forbid 1" >> .hg/hgrc
--- a/tests/test-hup.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-hup.t Tue Dec 09 13:32:19 2014 -0600
@@ -34,5 +34,14 @@
rollback completed
killed!
- $ echo .hg/* .hg/store/*
- .hg/00changelog.i .hg/journal.bookmarks .hg/journal.branch .hg/journal.desc .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a .hg/store/journal.phaseroots
+ $ ls -1d .hg/* .hg/store/*
+ .hg/00changelog.i
+ .hg/journal.bookmarks
+ .hg/journal.branch
+ .hg/journal.desc
+ .hg/journal.dirstate
+ .hg/requires
+ .hg/store
+ .hg/store/00changelog.i
+ .hg/store/00changelog.i.a
+ .hg/store/journal.phaseroots
--- a/tests/test-issue3084.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-issue3084.t Tue Dec 09 13:32:19 2014 -0600
@@ -42,9 +42,7 @@
$ echo "n" | hg merge --config ui.interactive=Yes
remote turned local normal file foo into a largefile
use (l)argefile or keep (n)ormal file? n
- getting changed largefiles
- 0 largefiles updated, 0 removed
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg status
@@ -116,8 +114,8 @@
remote turned local largefile foo into a normal file
keep (l)argefile or use (n)ormal file? l
getting changed largefiles
- 1 largefiles updated, 0 removed
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 0 largefiles updated, 0 removed
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg status
@@ -302,9 +300,7 @@
use (c)hanged version or (d)elete? c
remote turned local normal file f into a largefile
use (l)argefile or keep (n)ormal file? n
- getting changed largefiles
- 0 largefiles updated, 0 removed
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ cat f
normal2
@@ -329,8 +325,8 @@
remote turned local largefile f into a normal file
keep (l)argefile or use (n)ormal file? l
getting changed largefiles
- 1 largefiles updated, 0 removed
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 0 largefiles updated, 0 removed
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ cat f
large
@@ -409,8 +405,8 @@
remote turned local largefile f into a normal file
keep (l)argefile or use (n)ormal file? l
getting changed largefiles
- 1 largefiles updated, 0 removed
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 0 largefiles updated, 0 removed
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ cat f
large2
--- a/tests/test-issue672.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-issue672.t Tue Dec 09 13:32:19 2014 -0600
@@ -40,7 +40,7 @@
1a: remote created -> g
getting 1a
updating: 1a 2/2 files (100.00%)
- 2: keep -> k
+ 2: remote unchanged -> k
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)
--- a/tests/test-largefiles-misc.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-largefiles-misc.t Tue Dec 09 13:32:19 2014 -0600
@@ -67,6 +67,11 @@
dirc/baz/largefile
dirc/dirb
dirc/dirb/largefile
+
+ $ hg clone -q . ../fetch
+ $ hg --config extensions.fetch= fetch ../fetch
+ abort: uncommitted changes
+ [255]
$ hg up -qC
$ cd ..
@@ -482,7 +487,7 @@
b
$ hg -R clone2 outgoing --large --graph --template "{rev}"
- comparing with $TESTTMP/issue3651/src
+ comparing with $TESTTMP/issue3651/src (glob)
searching for changes
@ 1
@@ -589,7 +594,7 @@
89e6c98d92887913cadf06b2adb97f26cde4849b
-Pusing revision #1 causes uploading entity 89e6c98d9288, which is
+Pushing revision #1 causes uploading entity 89e6c98d9288, which is
shared also by largefiles b1, b2 in revision #2 and b in revision #5.
Then, entity 89e6c98d9288 is not treated as "outgoing entity" at "hg
@@ -832,4 +837,33 @@
$ cd ..
+Test "pull --rebase" when rebase is enabled before largefiles (issue3861)
+=========================================================================
+ $ hg showconfig extensions | grep largefiles
+ extensions.largefiles=!
+
+ $ mkdir issue3861
+ $ cd issue3861
+ $ hg init src
+ $ hg clone -q src dst
+ $ echo a > src/a
+ $ hg -R src commit -Aqm "#0"
+ Invoking status precommit hook
+ A a
+
+ $ cat >> dst/.hg/hgrc <<EOF
+ > [extensions]
+ > largefiles=
+ > EOF
+ $ hg -R dst pull --rebase
+ pulling from $TESTTMP/issue3861/src (glob)
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ nothing to rebase - working directory parent is already an ancestor of destination bf5e395ced2c
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cd ..
--- a/tests/test-largefiles-update.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-largefiles-update.t Tue Dec 09 13:32:19 2014 -0600
@@ -509,8 +509,25 @@
$ cat large1
large1 in #1
- $ hg rebase -q --abort
- rebase aborted
+Test that rebase updates standins for manually modified largefiles at
+the 1st commit of resuming.
+
+ $ echo "manually modified before 'hg rebase --continue'" > large1
+ $ hg resolve -m normal1
+ (no more unresolved files)
+ $ hg rebase --continue --config ui.interactive=True <<EOF
+ > c
+ > EOF
+ local changed .hglf/large1 which remote deleted
+ use (c)hanged version or (d)elete? c
+
+ $ hg diff -c "tip~1" --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
+ -e5bb990443d6a92aaf7223813720f7566c9dd05b
+ +8a4f783556e7dea21139ca0466eafce954c75c13
+ $ rm -f large1
+ $ hg update -q -C tip
+ $ cat large1
+ manually modified before 'hg rebase --continue'
Test that transplant updates largefiles, of which standins are safely
changed, even if it is aborted by conflict of other.
@@ -543,6 +560,20 @@
$ cat largeX
largeX
+Test that transplant updates standins for manually modified largefiles
+at the 1st commit of resuming.
+
+ $ echo "manually modified before 'hg transplant --continue'" > large1
+ $ hg transplant --continue
+ 07d6153b5c04 transplanted as f1bf30eb88cc
+ $ hg diff -c tip .hglf/large1 | grep '^[+-][0-9a-z]'
+ -e5bb990443d6a92aaf7223813720f7566c9dd05b
+ +6a4f36d4075fbe0f30ec1d26ca44e63c05903671
+ $ rm -f large1
+ $ hg update -q -C tip
+ $ cat large1
+ manually modified before 'hg transplant --continue'
+
Test that "hg status" doesn't show removal of largefiles not managed
in the target context.
@@ -605,3 +636,16 @@
#endif
$ cd ..
+
+Test that "hg convert" avoids copying largefiles from the working
+directory into store, because "hg convert" doesn't update largefiles
+in the working directory (removing files under ".cache/largefiles"
+forces "hg convert" to copy corresponding largefiles)
+
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > convert =
+ > EOF
+
+ $ rm $TESTTMP/.cache/largefiles/6a4f36d4075fbe0f30ec1d26ca44e63c05903671
+ $ hg convert -q repo repo.converted
--- a/tests/test-largefiles-wireproto.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-largefiles-wireproto.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,5 +1,5 @@
-This file contains testcases that tend to be related to the wireprotocol part of
-largefile.
+This file contains testcases that tend to be related to the wire protocol part
+of largefiles.
$ USERCACHE="$TESTTMP/cache"; export USERCACHE
$ mkdir "${USERCACHE}"
--- a/tests/test-largefiles.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-largefiles.t Tue Dec 09 13:32:19 2014 -0600
@@ -1172,12 +1172,11 @@
adding manifests
adding file changes
added 1 changesets with 2 changes to 2 files (+1 heads)
+ 0 largefiles cached
Invoking status precommit hook
M sub/normal4
M sub2/large6
saved backup bundle to $TESTTMP/d/.hg/strip-backup/f574fb32bb45-backup.hg (glob)
- 0 largefiles cached
- nothing to rebase - working directory parent is also destination
$ [ -f .hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928 ]
$ hg log --template '{rev}:{node|short} {desc|firstline}\n'
9:598410d3eb9a modify normal file largefile in repo d
@@ -1741,8 +1740,6 @@
adding manifests
adding file changes
added 1 changesets with 2 changes to 2 files
- getting changed largefiles
- 0 largefiles updated, 0 removed
$ hg log --template '{rev}:{node|short} {desc|firstline}\n'
9:598410d3eb9a modify normal file largefile in repo d
8:a381d2c8c80e modify normal file and largefile in repo b
@@ -1790,7 +1787,7 @@
$ hg cat .hglf/sub/large4
e166e74c7303192238d60af5a9c4ce9bef0b7928
$ hg cat .hglf/normal3
- .hglf/normal3: no such file in rev 598410d3eb9a
+ .hglf/normal3: no such file in rev 598410d3eb9a (glob)
[1]
Test that renaming a largefile results in correct output for status
--- a/tests/test-locate.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-locate.t Tue Dec 09 13:32:19 2014 -0600
@@ -96,10 +96,10 @@
$ hg files
b
- dir.h/foo
+ dir.h/foo (glob)
t.h
- t/e.h
- t/x
+ t/e.h (glob)
+ t/x (glob)
$ hg files b
b
@@ -130,11 +130,11 @@
../t/e.h (glob)
$ hg files
- ../b
- ../dir.h/foo
- ../t.h
- ../t/e.h
- ../t/x
+ ../b (glob)
+ ../dir.h/foo (glob)
+ ../t.h (glob)
+ ../t/e.h (glob)
+ ../t/x (glob)
$ hg files .
[1]
--- a/tests/test-manifest.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-manifest.t Tue Dec 09 13:32:19 2014 -0600
@@ -26,7 +26,7 @@
$ hg files -vr .
2 a
- 2 x b/a
+ 2 x b/a (glob)
1 l l
$ hg files -r . -X b
a
--- a/tests/test-merge-criss-cross.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-merge-criss-cross.t Tue Dec 09 13:32:19 2014 -0600
@@ -141,7 +141,7 @@
resolving manifests
branchmerge: True, force: False, partial: False
ancestor: 40663881a6dd, local: 3b08d01b0ab5+, remote: adfe50279922
- f2: keep -> k
+ f2: remote unchanged -> k
f1: versions differ -> m
auction for merging merge bids
@@ -152,7 +152,7 @@
f1: remote is newer -> g
getting f1
updating: f1 1/1 files (100.00%)
- f2: keep -> k
+ f2: remote unchanged -> k
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
@@ -176,7 +176,7 @@
resolving manifests
branchmerge: True, force: False, partial: False
ancestor: 0f6b37dbe527, local: adfe50279922+, remote: 3b08d01b0ab5
- f1: keep -> k
+ f1: remote unchanged -> k
f2: versions differ -> m
calculating bids for ancestor 40663881a6dd
@@ -195,7 +195,7 @@
f2: remote is newer -> g
getting f2
updating: f2 1/1 files (100.00%)
- f1: keep -> k
+ f1: remote unchanged -> k
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
@@ -249,7 +249,7 @@
resolving manifests
branchmerge: True, force: False, partial: False
ancestor: 40663881a6dd, local: 3b08d01b0ab5+, remote: adfe50279922
- f2: keep -> k
+ f2: remote unchanged -> k
f1: versions differ -> m
auction for merging merge bids
@@ -260,7 +260,7 @@
f1: remote is newer -> g
getting f1
updating: f1 1/1 files (100.00%)
- f2: keep -> k
+ f2: remote unchanged -> k
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
--- a/tests/test-merge-force.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-merge-force.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,45 +1,675 @@
+Set up a base, local, and remote changeset, as well as the working copy state.
+Files names are of the form base_remote_local_working-copy. For example,
+content1_content2_content1_content2-untracked represents a
+file that was modified in the remote changeset, left untouched in the
+local changeset, and then modified in the working copy to match the
+remote content, then finally forgotten.
+
$ hg init
- $ echo a > a
- $ hg ci -qAm 'add a'
+Create base changeset
+
+ $ python $TESTDIR/generate-working-copy-states.py state 3 1
+ $ hg addremove -q --similarity 0
+ $ hg commit -qm 'base'
+
+Create remote changeset
+
+ $ python $TESTDIR/generate-working-copy-states.py state 3 2
+ $ hg addremove -q --similarity 0
+ $ hg commit -qm 'remote'
+
+Create local changeset
+
+ $ hg update -q 0
+ $ python $TESTDIR/generate-working-copy-states.py state 3 3
+ $ hg addremove -q --similarity 0
+ $ hg commit -qm 'local'
+
+Set up working directory
+
+ $ python $TESTDIR/generate-working-copy-states.py state 3 wc
+ $ hg addremove -q --similarity 0
+ $ hg forget *_*_*_*-untracked
+ $ rm *_*_*_missing-*
- $ echo b > b
- $ hg ci -qAm 'add b'
+ $ hg status -A
+ M content1_content1_content1_content4-tracked
+ M content1_content1_content3_content1-tracked
+ M content1_content1_content3_content4-tracked
+ M content1_content2_content1_content2-tracked
+ M content1_content2_content1_content4-tracked
+ M content1_content2_content2_content1-tracked
+ M content1_content2_content2_content4-tracked
+ M content1_content2_content3_content1-tracked
+ M content1_content2_content3_content2-tracked
+ M content1_content2_content3_content4-tracked
+ M content1_missing_content1_content4-tracked
+ M content1_missing_content3_content1-tracked
+ M content1_missing_content3_content4-tracked
+ M missing_content2_content2_content4-tracked
+ M missing_content2_content3_content2-tracked
+ M missing_content2_content3_content4-tracked
+ M missing_missing_content3_content4-tracked
+ A content1_content1_missing_content1-tracked
+ A content1_content1_missing_content4-tracked
+ A content1_content2_missing_content1-tracked
+ A content1_content2_missing_content2-tracked
+ A content1_content2_missing_content4-tracked
+ A content1_missing_missing_content1-tracked
+ A content1_missing_missing_content4-tracked
+ A missing_content2_missing_content2-tracked
+ A missing_content2_missing_content4-tracked
+ A missing_missing_missing_content4-tracked
+ R content1_content1_content1_content1-untracked
+ R content1_content1_content1_content4-untracked
+ R content1_content1_content1_missing-untracked
+ R content1_content1_content3_content1-untracked
+ R content1_content1_content3_content3-untracked
+ R content1_content1_content3_content4-untracked
+ R content1_content1_content3_missing-untracked
+ R content1_content2_content1_content1-untracked
+ R content1_content2_content1_content2-untracked
+ R content1_content2_content1_content4-untracked
+ R content1_content2_content1_missing-untracked
+ R content1_content2_content2_content1-untracked
+ R content1_content2_content2_content2-untracked
+ R content1_content2_content2_content4-untracked
+ R content1_content2_content2_missing-untracked
+ R content1_content2_content3_content1-untracked
+ R content1_content2_content3_content2-untracked
+ R content1_content2_content3_content3-untracked
+ R content1_content2_content3_content4-untracked
+ R content1_content2_content3_missing-untracked
+ R content1_missing_content1_content1-untracked
+ R content1_missing_content1_content4-untracked
+ R content1_missing_content1_missing-untracked
+ R content1_missing_content3_content1-untracked
+ R content1_missing_content3_content3-untracked
+ R content1_missing_content3_content4-untracked
+ R content1_missing_content3_missing-untracked
+ R missing_content2_content2_content2-untracked
+ R missing_content2_content2_content4-untracked
+ R missing_content2_content2_missing-untracked
+ R missing_content2_content3_content2-untracked
+ R missing_content2_content3_content3-untracked
+ R missing_content2_content3_content4-untracked
+ R missing_content2_content3_missing-untracked
+ R missing_missing_content3_content3-untracked
+ R missing_missing_content3_content4-untracked
+ R missing_missing_content3_missing-untracked
+ ! content1_content1_content1_missing-tracked
+ ! content1_content1_content3_missing-tracked
+ ! content1_content1_missing_missing-tracked
+ ! content1_content2_content1_missing-tracked
+ ! content1_content2_content2_missing-tracked
+ ! content1_content2_content3_missing-tracked
+ ! content1_content2_missing_missing-tracked
+ ! content1_missing_content1_missing-tracked
+ ! content1_missing_content3_missing-tracked
+ ! content1_missing_missing_missing-tracked
+ ! missing_content2_content2_missing-tracked
+ ! missing_content2_content3_missing-tracked
+ ! missing_content2_missing_missing-tracked
+ ! missing_missing_content3_missing-tracked
+ ! missing_missing_missing_missing-tracked
+ ? content1_content1_missing_content1-untracked
+ ? content1_content1_missing_content4-untracked
+ ? content1_content2_missing_content1-untracked
+ ? content1_content2_missing_content2-untracked
+ ? content1_content2_missing_content4-untracked
+ ? content1_missing_missing_content1-untracked
+ ? content1_missing_missing_content4-untracked
+ ? missing_content2_missing_content2-untracked
+ ? missing_content2_missing_content4-untracked
+ ? missing_missing_missing_content4-untracked
+ C content1_content1_content1_content1-tracked
+ C content1_content1_content3_content3-tracked
+ C content1_content2_content1_content1-tracked
+ C content1_content2_content2_content2-tracked
+ C content1_content2_content3_content3-tracked
+ C content1_missing_content1_content1-tracked
+ C content1_missing_content3_content3-tracked
+ C missing_content2_content2_content2-tracked
+ C missing_content2_content3_content3-tracked
+ C missing_missing_content3_content3-tracked
+
+Merge with remote
- $ hg up -qC 0
- $ hg rm a
- $ hg ci -m 'rm a'
- created new head
+# Notes:
+# - local and remote changed content1_content2_*_content2-untracked
+# in the same way, so it could potentially be left alone
- $ hg up -qC 1
- $ rm a
+ $ hg merge -f --tool internal:merge3 'desc("remote")'
+ local changed content1_missing_content1_content4-tracked which remote deleted
+ use (c)hanged version or (d)elete? c
+ local changed content1_missing_content3_content3-tracked which remote deleted
+ use (c)hanged version or (d)elete? c
+ local changed content1_missing_content3_content4-tracked which remote deleted
+ use (c)hanged version or (d)elete? c
+ local changed content1_missing_missing_content4-tracked which remote deleted
+ use (c)hanged version or (d)elete? c
+ remote changed content1_content2_content1_content1-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content1_content2-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content1_content4-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content1_missing-tracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content1_missing-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content2_content1-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content2_content2-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content2_content4-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content2_missing-tracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content2_missing-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content3_content1-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content3_content2-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content3_content3-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content3_content4-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content3_missing-tracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_content3_missing-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_missing_content1-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_missing_content2-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_missing_content4-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_missing_missing-tracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ remote changed content1_content2_missing_missing-untracked which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ merging content1_content2_content1_content4-tracked
+ warning: conflicts during merge.
+ merging content1_content2_content1_content4-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging content1_content2_content2_content1-tracked
+ merging content1_content2_content2_content4-tracked
+ warning: conflicts during merge.
+ merging content1_content2_content2_content4-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging content1_content2_content3_content1-tracked
+ merging content1_content2_content3_content3-tracked
+ warning: conflicts during merge.
+ merging content1_content2_content3_content3-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging content1_content2_content3_content4-tracked
+ warning: conflicts during merge.
+ merging content1_content2_content3_content4-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging content1_content2_missing_content1-tracked
+ merging content1_content2_missing_content4-tracked
+ warning: conflicts during merge.
+ merging content1_content2_missing_content4-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging missing_content2_content2_content4-tracked
+ warning: conflicts during merge.
+ merging missing_content2_content2_content4-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging missing_content2_content3_content3-tracked
+ warning: conflicts during merge.
+ merging missing_content2_content3_content3-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging missing_content2_content3_content4-tracked
+ warning: conflicts during merge.
+ merging missing_content2_content3_content4-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging missing_content2_missing_content4-tracked
+ warning: conflicts during merge.
+ merging missing_content2_missing_content4-tracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ merging missing_content2_missing_content4-untracked
+ warning: conflicts during merge.
+ merging missing_content2_missing_content4-untracked incomplete! (edit conflicts, then use 'hg resolve --mark')
+ 39 files updated, 3 files merged, 8 files removed, 10 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
+ [1]
-Local deleted a file, remote removed
+Check which files need to be resolved (should correspond to the ouput above).
+This should be the files for which the base (1st filename segment), the remote
+(2nd segment) and the working copy (4th segment) are all different.
+
+Interestingly, one untracked file got merged and added, which corresponds the
+odd 'if force and branchmerge and different' case in manifestmerge().
-Should fail, since there are deleted files:
+ $ hg resolve -l
+ U content1_content2_content1_content4-tracked
+ R content1_content2_content2_content1-tracked
+ U content1_content2_content2_content4-tracked
+ R content1_content2_content3_content1-tracked
+ U content1_content2_content3_content3-tracked
+ U content1_content2_content3_content4-tracked
+ R content1_content2_missing_content1-tracked
+ U content1_content2_missing_content4-tracked
+ U missing_content2_content2_content4-tracked
+ U missing_content2_content3_content3-tracked
+ U missing_content2_content3_content4-tracked
+ U missing_content2_missing_content4-tracked
+ U missing_content2_missing_content4-untracked
+
+Check status and file content
+
+Some files get added (e.g. content1_content2_content1_content1-untracked)
+
+It is not intuitive that content1_content2_content1_content4-tracked gets
+merged while content1_content2_content1_content4-untracked gets overwritten.
+Any *_content2_*-untracked triggers the modified/deleted prompt and then gets
+overwritten.
+
+A lot of untracked files become tracked, for example
+content1_content2_content2_content2-untracked.
+
+*_missing_missing_missing-tracked is reported as removed ('R'), which
+doesn't make sense since the file did not exist in the parent, but on the
+other hand, merged-in additions are reported as modifications, which is
+almost as strange.
+
+missing_missing_content3_missing-tracked becomes removed ('R'), even though
+the remote side did not touch the file
- $ hg merge
- abort: uncommitted changes
- (use 'hg status' to list changes)
- [255]
-
-Should succeed with --force:
-
- $ hg -v merge --force
- resolving manifests
- removing a
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- (branch merge, don't forget to commit)
-
-Should show 'a' as removed:
-
- $ hg status
- R a
-
- $ hg ci -m merge
-
-Should not show 'a':
-
- $ hg manifest
- b
-
+ $ for f in `python $TESTDIR/generate-working-copy-states.py filelist 3`
+ > do
+ > echo
+ > hg status -A $f
+ > if test -f $f
+ > then
+ > cat $f
+ > else
+ > echo '<missing>'
+ > fi
+ > if test -f ${f}.orig
+ > then
+ > echo ${f}.orig:
+ > cat ${f}.orig
+ > fi
+ > done
+
+ C content1_content1_content1_content1-tracked
+ content1
+
+ R content1_content1_content1_content1-untracked
+ content1
+
+ M content1_content1_content1_content4-tracked
+ content4
+
+ R content1_content1_content1_content4-untracked
+ content4
+
+ ! content1_content1_content1_missing-tracked
+ <missing>
+
+ R content1_content1_content1_missing-untracked
+ <missing>
+
+ M content1_content1_content3_content1-tracked
+ content1
+
+ R content1_content1_content3_content1-untracked
+ content1
+
+ C content1_content1_content3_content3-tracked
+ content3
+
+ R content1_content1_content3_content3-untracked
+ content3
+
+ M content1_content1_content3_content4-tracked
+ content4
+
+ R content1_content1_content3_content4-untracked
+ content4
+
+ ! content1_content1_content3_missing-tracked
+ <missing>
+
+ R content1_content1_content3_missing-untracked
+ <missing>
+
+ A content1_content1_missing_content1-tracked
+ content1
+
+ ? content1_content1_missing_content1-untracked
+ content1
+
+ A content1_content1_missing_content4-tracked
+ content4
+
+ ? content1_content1_missing_content4-untracked
+ content4
+
+ ! content1_content1_missing_missing-tracked
+ <missing>
+
+ content1_content1_missing_missing-untracked: * (glob)
+ <missing>
+
+ M content1_content2_content1_content1-tracked
+ content2
+
+ M content1_content2_content1_content1-untracked
+ content2
+
+ M content1_content2_content1_content2-tracked
+ content2
+
+ M content1_content2_content1_content2-untracked
+ content2
+
+ M content1_content2_content1_content4-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content4
+ ||||||| base
+ content1
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ content1_content2_content1_content4-tracked.orig:
+ content4
+
+ M content1_content2_content1_content4-untracked
+ content2
+
+ M content1_content2_content1_missing-tracked
+ content2
+
+ M content1_content2_content1_missing-untracked
+ content2
+
+ M content1_content2_content2_content1-tracked
+ content2
+
+ M content1_content2_content2_content1-untracked
+ content2
+
+ C content1_content2_content2_content2-tracked
+ content2
+
+ M content1_content2_content2_content2-untracked
+ content2
+
+ M content1_content2_content2_content4-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content4
+ ||||||| base
+ content1
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ content1_content2_content2_content4-tracked.orig:
+ content4
+
+ M content1_content2_content2_content4-untracked
+ content2
+
+ M content1_content2_content2_missing-tracked
+ content2
+
+ M content1_content2_content2_missing-untracked
+ content2
+
+ M content1_content2_content3_content1-tracked
+ content2
+
+ M content1_content2_content3_content1-untracked
+ content2
+
+ M content1_content2_content3_content2-tracked
+ content2
+
+ M content1_content2_content3_content2-untracked
+ content2
+
+ M content1_content2_content3_content3-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content3
+ ||||||| base
+ content1
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ content1_content2_content3_content3-tracked.orig:
+ content3
+
+ M content1_content2_content3_content3-untracked
+ content2
+
+ M content1_content2_content3_content4-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content4
+ ||||||| base
+ content1
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ content1_content2_content3_content4-tracked.orig:
+ content4
+
+ M content1_content2_content3_content4-untracked
+ content2
+
+ M content1_content2_content3_missing-tracked
+ content2
+
+ M content1_content2_content3_missing-untracked
+ content2
+
+ M content1_content2_missing_content1-tracked
+ content2
+
+ M content1_content2_missing_content1-untracked
+ content2
+
+ M content1_content2_missing_content2-tracked
+ content2
+
+ M content1_content2_missing_content2-untracked
+ content2
+
+ M content1_content2_missing_content4-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content4
+ ||||||| base
+ content1
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ content1_content2_missing_content4-tracked.orig:
+ content4
+
+ M content1_content2_missing_content4-untracked
+ content2
+
+ M content1_content2_missing_missing-tracked
+ content2
+
+ M content1_content2_missing_missing-untracked
+ content2
+
+ R content1_missing_content1_content1-tracked
+ <missing>
+
+ R content1_missing_content1_content1-untracked
+ content1
+
+ M content1_missing_content1_content4-tracked
+ content4
+
+ R content1_missing_content1_content4-untracked
+ content4
+
+ R content1_missing_content1_missing-tracked
+ <missing>
+
+ R content1_missing_content1_missing-untracked
+ <missing>
+
+ R content1_missing_content3_content1-tracked
+ <missing>
+
+ R content1_missing_content3_content1-untracked
+ content1
+
+ C content1_missing_content3_content3-tracked
+ content3
+
+ R content1_missing_content3_content3-untracked
+ content3
+
+ M content1_missing_content3_content4-tracked
+ content4
+
+ R content1_missing_content3_content4-untracked
+ content4
+
+ R content1_missing_content3_missing-tracked
+ <missing>
+
+ R content1_missing_content3_missing-untracked
+ <missing>
+
+ R content1_missing_missing_content1-tracked
+ <missing>
+
+ ? content1_missing_missing_content1-untracked
+ content1
+
+ A content1_missing_missing_content4-tracked
+ content4
+
+ ? content1_missing_missing_content4-untracked
+ content4
+
+ R content1_missing_missing_missing-tracked
+ <missing>
+
+ content1_missing_missing_missing-untracked: * (glob)
+ <missing>
+
+ C missing_content2_content2_content2-tracked
+ content2
+
+ M missing_content2_content2_content2-untracked
+ content2
+
+ M missing_content2_content2_content4-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content4
+ ||||||| base
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ missing_content2_content2_content4-tracked.orig:
+ content4
+
+ M missing_content2_content2_content4-untracked
+ content2
+
+ M missing_content2_content2_missing-tracked
+ content2
+
+ M missing_content2_content2_missing-untracked
+ content2
+
+ M missing_content2_content3_content2-tracked
+ content2
+
+ M missing_content2_content3_content2-untracked
+ content2
+
+ M missing_content2_content3_content3-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content3
+ ||||||| base
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ missing_content2_content3_content3-tracked.orig:
+ content3
+
+ M missing_content2_content3_content3-untracked
+ content2
+
+ M missing_content2_content3_content4-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content4
+ ||||||| base
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ missing_content2_content3_content4-tracked.orig:
+ content4
+
+ M missing_content2_content3_content4-untracked
+ content2
+
+ M missing_content2_content3_missing-tracked
+ content2
+
+ M missing_content2_content3_missing-untracked
+ content2
+
+ M missing_content2_missing_content2-tracked
+ content2
+
+ M missing_content2_missing_content2-untracked
+ content2
+
+ M missing_content2_missing_content4-tracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content4
+ ||||||| base
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ missing_content2_missing_content4-tracked.orig:
+ content4
+
+ M missing_content2_missing_content4-untracked
+ <<<<<<< local: 0447570f1af6 - test: local
+ content4
+ ||||||| base
+ =======
+ content2
+ >>>>>>> other: 85100b8c675b - test: remote
+ missing_content2_missing_content4-untracked.orig:
+ content4
+
+ M missing_content2_missing_missing-tracked
+ content2
+
+ M missing_content2_missing_missing-untracked
+ content2
+
+ C missing_missing_content3_content3-tracked
+ content3
+
+ R missing_missing_content3_content3-untracked
+ content3
+
+ M missing_missing_content3_content4-tracked
+ content4
+
+ R missing_missing_content3_content4-untracked
+ content4
+
+ R missing_missing_content3_missing-tracked
+ <missing>
+
+ R missing_missing_content3_missing-untracked
+ <missing>
+
+ A missing_missing_missing_content4-tracked
+ content4
+
+ ? missing_missing_missing_content4-untracked
+ content4
+
+ R missing_missing_missing_missing-tracked
+ <missing>
+
+ missing_missing_missing_missing-untracked: * (glob)
+ <missing>
--- a/tests/test-mq-eol.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-eol.t Tue Dec 09 13:32:19 2014 -0600
@@ -2,10 +2,12 @@
Test interactions between mq and patch.eol
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "nodates=1" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [diff]
+ > nodates = 1
+ > EOF
$ cat > makepatch.py <<EOF
> f = file('eol.diff', 'wb')
--- a/tests/test-mq-git.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-git.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,10 +1,12 @@
# Test the plumbing of mq.git option
# Automatic upgrade itself is tested elsewhere.
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "nodates=1" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [diff]
+ > nodates = 1
+ > EOF
$ hg init repo-auto
$ cd repo-auto
--- a/tests/test-mq-guards.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-guards.t Tue Dec 09 13:32:19 2014 -0600
@@ -568,7 +568,7 @@
3 G b.patch
test that "qselect --reapply" checks applied patches correctly when no
-applied patche becomes guarded but some of unapplied ones become
+applied patches becomes guarded but some of unapplied ones become
unguarded.
$ hg qpop -q -a
--- a/tests/test-mq-header-date.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-header-date.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,8 +1,10 @@
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "nodates=true" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [diff]
+ > nodates = true
+ > EOF
$ catpatch() {
> cat .hg/patches/$1.patch | sed -e "s/^diff \-r [0-9a-f]* /diff -r ... /" \
> -e "s/^\(# Parent \).*/\1/"
@@ -410,8 +412,8 @@
1: Three (again) - test
0: [mq]: 1.patch - test
==== qref -d
+ From: jane
Date: 12 0
- From: jane
diff -r ... 6
--- /dev/null
@@ -463,8 +465,8 @@
1: Three (again) - test
0: [mq]: 1.patch - test
==== qref -u -d
+ From: john
Date: 14 0
- From: john
diff -r ... 8
--- /dev/null
@@ -493,8 +495,8 @@
1: Three (again) - test
0: [mq]: 1.patch - test
==== qref -u -d
+ From: john
Date: 15 0
- From: john
Nine
--- a/tests/test-mq-merge.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-merge.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,9 +1,11 @@
Setup extension:
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq =" >> $HGRCPATH
- $ echo "[mq]" >> $HGRCPATH
- $ echo "git = keep" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [mq]
+ > git = keep
+ > EOF
Test merge with mq changeset as the second parent:
--- a/tests/test-mq-qdiff.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-qdiff.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,7 +1,9 @@
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "[mq]" >> $HGRCPATH
- $ echo "git=keep" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [mq]
+ > git = keep
+ > EOF
$ hg init a
$ cd a
--- a/tests/test-mq-qfold.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-qfold.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,9 +1,11 @@
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "[mq]" >> $HGRCPATH
- $ echo "git=keep" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "nodates=1" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [mq]
+ > git = keep
+ > [diff]
+ > nodates = 1
+ > EOF
init:
--- a/tests/test-mq-qimport.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-qimport.t Tue Dec 09 13:32:19 2014 -0600
@@ -15,10 +15,12 @@
> f.close()
>
> EOF
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "git=1" >> $HGRCPATH
+ > cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [diff]
+ > git = 1
+ > EOF
$ hg init repo
$ cd repo
--- a/tests/test-mq-qrefresh.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-qrefresh.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,7 +1,9 @@
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "nodates=1" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [diff]
+ > nodates = 1
+ > EOF
$ hg init a
$ cd a
--- a/tests/test-mq-subrepo-svn.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-subrepo-svn.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,9 +1,11 @@
#require svn13
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "nodates=1" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [diff]
+ > nodates = 1
+ > EOF
fn to create new repository, and cd into it
$ mkrepo() {
--- a/tests/test-mq-subrepo.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq-subrepo.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,10 +1,12 @@
- $ echo "[ui]" >> $HGRCPATH
- $ echo "commitsubrepos = Yes" >> $HGRCPATH
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "record=" >> $HGRCPATH
- $ echo "[diff]" >> $HGRCPATH
- $ echo "nodates=1" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [ui]
+ > commitsubrepos = Yes
+ > [extensions]
+ > mq =
+ > record =
+ > [diff]
+ > nodates = 1
+ > EOF
$ stdin=`pwd`/stdin.tmp
@@ -241,7 +243,7 @@
[255]
$ hg revert sub
reverting subrepo sub
- adding sub/a
+ adding sub/a (glob)
$ hg qpop
popping 1.diff
now at: 0.diff
@@ -260,7 +262,7 @@
[255]
$ hg revert sub
reverting subrepo sub
- adding sub/a
+ adding sub/a (glob)
$ hg qpush
applying 1.diff
subrepository sub diverged (local revision: b2fdb12cd82b, remote revision: aa037b301eba)
--- a/tests/test-mq.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-mq.t Tue Dec 09 13:32:19 2014 -0600
@@ -5,11 +5,12 @@
> fi
> }
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
-
- $ echo "[mq]" >> $HGRCPATH
- $ echo "plain=true" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > [mq]
+ > plain = true
+ > EOF
help
@@ -1582,7 +1583,7 @@
$ cd ..
-Test interraction with revset (issue4426)
+Test interaction with revset (issue4426)
$ hg init issue4426
$ cd issue4426
--- a/tests/test-obsolete.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-obsolete.t Tue Dec 09 13:32:19 2014 -0600
@@ -90,8 +90,8 @@
# rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
1 0 -1 59 118 59 59 0 0 58 116 0 1 0
- 2 1 -1 118 204 59 59 59 0 76 192 0 1 1
- 3 1 -1 204 271 204 204 59 0 66 258 0 2 0
+ 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
+ 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
$ hg debugobsolete
245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
--- a/tests/test-patchbomb.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-patchbomb.t Tue Dec 09 13:32:19 2014 -0600
@@ -86,6 +86,100 @@
abort: patchbomb canceled
[255]
+ $ hg --config ui.interactive=1 --config patchbomb.confirm=true email -n -f quux -t foo -c bar -r tip<<EOF
+ > n
+ > EOF
+ this patch series consists of 1 patches.
+
+
+ Final summary:
+
+ From: quux
+ To: foo
+ Cc: bar
+ Subject: [PATCH] a
+ a | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+ are you sure you want to send (yn)? n
+ abort: patchbomb canceled
+ [255]
+
+
+Test diff.git is respected
+ $ hg --config diff.git=True email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -r tip
+ this patch series consists of 1 patches.
+
+
+ displaying [PATCH] a ...
+ Content-Type: text/plain; charset="us-ascii"
+ MIME-Version: 1.0
+ Content-Transfer-Encoding: 7bit
+ Subject: [PATCH] a
+ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
+ X-Mercurial-Series-Index: 1
+ X-Mercurial-Series-Total: 1
+ Message-Id: <8580ff50825a50c8f716.60@*> (glob)
+ X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+ User-Agent: Mercurial-patchbomb/* (glob)
+ Date: Thu, 01 Jan 1970 00:01:00 +0000
+ From: quux
+ To: foo
+ Cc: bar
+
+ # HG changeset patch
+ # User test
+ # Date 1 0
+ # Thu Jan 01 00:00:01 1970 +0000
+ # Node ID 8580ff50825a50c8f716709acdf8de0deddcd6ab
+ # Parent 0000000000000000000000000000000000000000
+ a
+
+ diff --git a/a b/a
+ new file mode 100644
+ --- /dev/null
+ +++ b/a
+ @@ -0,0 +1,1 @@
+ +a
+
+
+
+Test breaking format changes aren't
+ $ hg --config diff.noprefix=True email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -r tip
+ this patch series consists of 1 patches.
+
+
+ displaying [PATCH] a ...
+ Content-Type: text/plain; charset="us-ascii"
+ MIME-Version: 1.0
+ Content-Transfer-Encoding: 7bit
+ Subject: [PATCH] a
+ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
+ X-Mercurial-Series-Index: 1
+ X-Mercurial-Series-Total: 1
+ Message-Id: <8580ff50825a50c8f716.60@*> (glob)
+ X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+ User-Agent: Mercurial-patchbomb/* (glob)
+ Date: Thu, 01 Jan 1970 00:01:00 +0000
+ From: quux
+ To: foo
+ Cc: bar
+
+ # HG changeset patch
+ # User test
+ # Date 1 0
+ # Thu Jan 01 00:00:01 1970 +0000
+ # Node ID 8580ff50825a50c8f716709acdf8de0deddcd6ab
+ # Parent 0000000000000000000000000000000000000000
+ a
+
+ diff -r 000000000000 -r 8580ff50825a a
+ --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+ +++ b/a Thu Jan 01 00:00:01 1970 +0000
+ @@ -0,0 +1,1 @@
+ +a
+
+
$ echo b > b
$ hg commit -Amb -d '2 0'
adding b
@@ -2589,4 +2683,127 @@
+d
- $ cd ..
+Test introduction configuration
+=================================
+
+ $ echo '[patchbomb]' >> $HGRCPATH
+
+"auto" setting
+----------------
+
+ $ echo 'intro=auto' >> $HGRCPATH
+
+single rev
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '10' | grep "Write the introductory message for the patch series."
+ [1]
+
+single rev + flag
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '10' --intro | grep "Write the introductory message for the patch series."
+ Write the introductory message for the patch series.
+
+
+Multi rev
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '9::' | grep "Write the introductory message for the patch series."
+ Write the introductory message for the patch series.
+
+"never" setting
+-----------------
+
+ $ echo 'intro=never' >> $HGRCPATH
+
+single rev
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '10' | grep "Write the introductory message for the patch series."
+ [1]
+
+single rev + flag
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '10' --intro | grep "Write the introductory message for the patch series."
+ Write the introductory message for the patch series.
+
+
+Multi rev
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '9::' | grep "Write the introductory message for the patch series."
+ [1]
+
+Multi rev + flag
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '9::' --intro | grep "Write the introductory message for the patch series."
+ Write the introductory message for the patch series.
+
+"always" setting
+-----------------
+
+ $ echo 'intro=always' >> $HGRCPATH
+
+single rev
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '10' | grep "Write the introductory message for the patch series."
+ Write the introductory message for the patch series.
+
+single rev + flag
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '10' --intro | grep "Write the introductory message for the patch series."
+ Write the introductory message for the patch series.
+
+
+Multi rev
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '9::' | grep "Write the introductory message for the patch series."
+ Write the introductory message for the patch series.
+
+Multi rev + flag
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '9::' --intro | grep "Write the introductory message for the patch series."
+ Write the introductory message for the patch series.
+
+bad value setting
+-----------------
+
+ $ echo 'intro=mpmwearaclownnose' >> $HGRCPATH
+
+single rev
+
+ $ hg email --date '1980-1-1 0:1' -n -t foo -s test -r '10'
+ From [test]: test
+ this patch series consists of 1 patches.
+
+ warning: invalid patchbomb.intro value "mpmwearaclownnose"
+ (should be one of always, never, auto)
+ Cc:
+
+ displaying [PATCH] test ...
+ Content-Type: text/plain; charset="us-ascii"
+ MIME-Version: 1.0
+ Content-Transfer-Encoding: 7bit
+ Subject: [PATCH] test
+ X-Mercurial-Node: 3b6f1ec9dde933a40a115a7990f8b320477231af
+ X-Mercurial-Series-Index: 1
+ X-Mercurial-Series-Total: 1
+ Message-Id: <3b6f1ec9dde933a40a11*> (glob)
+ X-Mercurial-Series-Id: <3b6f1ec9dde933a40a11.*> (glob)
+ User-Agent: Mercurial-patchbomb/* (glob)
+ Date: Tue, 01 Jan 1980 00:01:00 +0000
+ From: test
+ To: foo
+
+ # HG changeset patch
+ # User test
+ # Date 5 0
+ # Thu Jan 01 00:00:05 1970 +0000
+ # Branch test
+ # Node ID 3b6f1ec9dde933a40a115a7990f8b320477231af
+ # Parent 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268
+ dd
+
+ diff -r 2f9fa9b998c5 -r 3b6f1ec9dde9 d
+ --- a/d Thu Jan 01 00:00:04 1970 +0000
+ +++ b/d Thu Jan 01 00:00:05 1970 +0000
+ @@ -1,1 +1,2 @@
+ d
+ +d
+
--- a/tests/test-phases-exchange.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-phases-exchange.t Tue Dec 09 13:32:19 2014 -0600
@@ -755,7 +755,7 @@
Bare push with next changeset and common changeset needing sync (issue3575)
-(reset some stat on remot repo to not confused other test)
+(reset some stat on remote repo to avoid confusing other tests)
$ hg -R ../alpha --config extensions.strip= strip --no-backup 967b449fbc94
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-progress.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-progress.t Tue Dec 09 13:32:19 2014 -0600
@@ -296,7 +296,7 @@
\xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88 [=====> ]\r (no-eol) (esc)
\r (no-eol) (esc)
-test triming progress items, when they contain multi-byte characters,
+test trimming progress items, when they contain multi-byte characters,
of which length of byte sequence and columns in display are different
from each other.
--- a/tests/test-push-hook-lock.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-push-hook-lock.t Tue Dec 09 13:32:19 2014 -0600
@@ -15,8 +15,12 @@
updating to branch default
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat <<EOF > $TESTTMP/debuglocks-pretxn-hook.sh
+ > hg debuglocks
+ > true
+ > EOF
$ echo '[hooks]' >> 2/.hg/hgrc
- $ echo 'pretxnchangegroup.a = hg debuglocks; true' >> 2/.hg/hgrc
+ $ echo "pretxnchangegroup.a = sh $TESTTMP/debuglocks-pretxn-hook.sh" >> 2/.hg/hgrc
$ echo 'changegroup.push = hg push -qf ../1' >> 2/.hg/hgrc
$ echo bar >> 3/foo
--- a/tests/test-rebase-newancestor.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-rebase-newancestor.t Tue Dec 09 13:32:19 2014 -0600
@@ -53,3 +53,254 @@
$ cd ..
+
+
+Test rebasing of merges with ancestors of the rebase destination - a situation
+that often happens when trying to recover from repeated merging with a mainline
+branch.
+
+The test case creates a dev branch that contains a couple of merges from the
+default branch. When rebasing to the default branch, these merges would be
+merges with ancestors on the same branch. The merges _could_ contain some
+interesting conflict resolutions or additional changes in the merge commit, but
+that is mixed up with the actual merge stuff and there is in general no way to
+separate them.
+
+Note: The dev branch contains _no_ changes to f-default. It might be unclear
+how rebasing of ancestor merges should be handled, but the current behavior
+with spurious prompts for conflicts in files that didn't change seems very
+wrong.
+
+ $ hg init ancestor-merge
+ $ cd ancestor-merge
+
+ $ touch f-default
+ $ hg ci -Aqm 'default: create f-default'
+
+ $ hg branch -q dev
+ $ hg ci -qm 'dev: create branch'
+
+ $ echo stuff > f-dev
+ $ hg ci -Aqm 'dev: f-dev stuff'
+
+ $ hg up -q default
+ $ echo stuff > f-default
+ $ hg ci -m 'default: f-default stuff'
+
+ $ hg up -q dev
+ $ hg merge -q default
+ $ hg ci -m 'dev: merge default'
+
+ $ hg up -q default
+ $ hg rm f-default
+ $ hg ci -m 'default: remove f-default'
+
+ $ hg up -q dev
+ $ hg merge -q default
+ $ hg ci -m 'dev: merge default'
+
+ $ hg up -q default
+ $ echo stuff > f-other
+ $ hg ci -Aqm 'default: f-other stuff'
+
+ $ hg tglog
+ @ 7: 'default: f-other stuff'
+ |
+ | o 6: 'dev: merge default' dev
+ |/|
+ o | 5: 'default: remove f-default'
+ | |
+ | o 4: 'dev: merge default' dev
+ |/|
+ o | 3: 'default: f-default stuff'
+ | |
+ | o 2: 'dev: f-dev stuff' dev
+ | |
+ | o 1: 'dev: create branch' dev
+ |/
+ o 0: 'default: create f-default'
+
+ $ hg clone -qU . ../ancestor-merge-2
+
+Full rebase all the way back from branching point:
+
+ $ hg rebase -r 'only(dev,default)' -d default
+ remote changed f-default which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ saved backup bundle to $TESTTMP/ancestor-merge/.hg/strip-backup/1d1a643d390e-backup.hg (glob)
+ $ hg tglog
+ o 6: 'dev: merge default'
+ |
+ o 5: 'dev: merge default'
+ |
+ o 4: 'dev: f-dev stuff'
+ |
+ @ 3: 'default: f-other stuff'
+ |
+ o 2: 'default: remove f-default'
+ |
+ o 1: 'default: f-default stuff'
+ |
+ o 0: 'default: create f-default'
+
+Grafty cherry picking rebasing:
+
+ $ cd ../ancestor-merge-2
+
+ $ hg phase -fdr0:
+ $ hg rebase -r 'children(only(dev,default))' -d default
+ remote changed f-default which local deleted
+ use (c)hanged version or leave (d)eleted? c
+ saved backup bundle to $TESTTMP/ancestor-merge-2/.hg/strip-backup/ec2c14fb2984-backup.hg (glob)
+ $ hg tglog
+ o 7: 'dev: merge default'
+ |
+ o 6: 'dev: merge default'
+ |
+ o 5: 'dev: f-dev stuff'
+ |
+ o 4: 'default: f-other stuff'
+ |
+ o 3: 'default: remove f-default'
+ |
+ o 2: 'default: f-default stuff'
+ |
+ | o 1: 'dev: create branch' dev
+ |/
+ o 0: 'default: create f-default'
+
+ $ cd ..
+
+
+Test order of parents of rebased merged with un-rebased changes as p1.
+
+ $ hg init parentorder
+ $ cd parentorder
+ $ touch f
+ $ hg ci -Aqm common
+ $ touch change
+ $ hg ci -Aqm change
+ $ touch target
+ $ hg ci -Aqm target
+ $ hg up -qr 0
+ $ touch outside
+ $ hg ci -Aqm outside
+ $ hg merge -qr 1
+ $ hg ci -m 'merge p1 3=outside p2 1=ancestor'
+ $ hg par
+ changeset: 4:6990226659be
+ tag: tip
+ parent: 3:f59da8fc0fcf
+ parent: 1:dd40c13f7a6f
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: merge p1 3=outside p2 1=ancestor
+
+ $ hg up -qr 1
+ $ hg merge -qr 3
+ $ hg ci -qm 'merge p1 1=ancestor p2 3=outside'
+ $ hg par
+ changeset: 5:a57575f79074
+ tag: tip
+ parent: 1:dd40c13f7a6f
+ parent: 3:f59da8fc0fcf
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: merge p1 1=ancestor p2 3=outside
+
+ $ hg tglog
+ @ 5: 'merge p1 1=ancestor p2 3=outside'
+ |\
+ +---o 4: 'merge p1 3=outside p2 1=ancestor'
+ | |/
+ | o 3: 'outside'
+ | |
+ +---o 2: 'target'
+ | |
+ o | 1: 'change'
+ |/
+ o 0: 'common'
+
+ $ hg rebase -r 4 -d 2
+ saved backup bundle to $TESTTMP/parentorder/.hg/strip-backup/6990226659be-backup.hg (glob)
+ $ hg tip
+ changeset: 5:cca50676b1c5
+ tag: tip
+ parent: 2:a60552eb93fb
+ parent: 3:f59da8fc0fcf
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: merge p1 3=outside p2 1=ancestor
+
+ $ hg rebase -r 4 -d 2
+ saved backup bundle to $TESTTMP/parentorder/.hg/strip-backup/a57575f79074-backup.hg (glob)
+ $ hg tip
+ changeset: 5:f9daf77ffe76
+ tag: tip
+ parent: 2:a60552eb93fb
+ parent: 3:f59da8fc0fcf
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: merge p1 1=ancestor p2 3=outside
+
+ $ hg tglog
+ @ 5: 'merge p1 1=ancestor p2 3=outside'
+ |\
+ +---o 4: 'merge p1 3=outside p2 1=ancestor'
+ | |/
+ | o 3: 'outside'
+ | |
+ o | 2: 'target'
+ | |
+ o | 1: 'change'
+ |/
+ o 0: 'common'
+
+rebase of merge of ancestors
+
+ $ hg up -qr 2
+ $ hg merge -qr 3
+ $ echo 'other change while merging future "rebase ancestors"' > other
+ $ hg ci -Aqm 'merge rebase ancestors'
+ $ hg rebase -d 5 -v
+ resolving manifests
+ removing other
+ note: merging f9daf77ffe76+ and 4c5f12f25ebe using bids from ancestors a60552eb93fb and f59da8fc0fcf
+
+ calculating bids for ancestor a60552eb93fb
+ resolving manifests
+
+ calculating bids for ancestor f59da8fc0fcf
+ resolving manifests
+
+ auction for merging merge bids
+ other: consensus for g
+ end of auction
+
+ getting other
+ other
+ rebase merging completed
+ 1 changesets found
+ saved backup bundle to $TESTTMP/parentorder/.hg/strip-backup/4c5f12f25ebe-backup.hg (glob)
+ 1 changesets found
+ adding branch
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ rebase completed
+ $ hg tglog
+ @ 6: 'merge rebase ancestors'
+ |
+ o 5: 'merge p1 1=ancestor p2 3=outside'
+ |\
+ +---o 4: 'merge p1 3=outside p2 1=ancestor'
+ | |/
+ | o 3: 'outside'
+ | |
+ o | 2: 'target'
+ | |
+ o | 1: 'change'
+ |/
+ o 0: 'common'
+
--- a/tests/test-rebase-obsolete.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-rebase-obsolete.t Tue Dec 09 13:32:19 2014 -0600
@@ -118,8 +118,8 @@
set.
$ hg graft 42ccdea3bb16 32af7686d403
- grafting revision 1
- grafting revision 3
+ grafting 1:42ccdea3bb16 "B"
+ grafting 3:32af7686d403 "D"
$ hg rebase -s 42ccdea3bb16 -d .
$ hg log -G
o 10:5ae4c968c6ac C
--- a/tests/test-record.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-record.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,9 +1,11 @@
Set up a repo
- $ echo "[ui]" >> $HGRCPATH
- $ echo "interactive=true" >> $HGRCPATH
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "record=" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [ui]
+ > interactive = true
+ > [extensions]
+ > record =
+ > EOF
$ hg init a
$ cd a
@@ -557,9 +559,9 @@
> echo $i >> plain
> done
-Record beginning, middle
+Record beginning, middle, and test that format-breaking diffopts are ignored
- $ hg record -d '14 0' -m middle-only plain <<EOF
+ $ hg record --config diff.noprefix=True -d '14 0' -m middle-only plain <<EOF
> y
> y
> y
--- a/tests/test-relink.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-relink.t Tue Dec 09 13:32:19 2014 -0600
@@ -87,7 +87,7 @@
pruned down to 2 probably relinkable files
relinking: data/a.i 1/2 files (50.00%)
not linkable: data/dummy.i
- relinked 1 files (1.37 KB reclaimed)
+ relinked 1 files (1.36 KB reclaimed)
$ cd ..
--- a/tests/test-rename-dir-merge.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-rename-dir-merge.t Tue Dec 09 13:32:19 2014 -0600
@@ -105,6 +105,86 @@
$ hg debugrename b/c
b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
+Local directory rename with conflicting file added in remote source directory
+and untracked in local target directory.
+
+BROKEN: the uncommitted file is overwritten; we should abort
+
+ $ hg co -qC 1
+ $ echo target > b/c
+ $ hg merge 2
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg st -A
+ A b/c
+ a/c
+ ? a/d
+ C b/a
+ C b/b
+ $ cat b/c
+ baz
+
+Local directory rename with conflicting file added in remote source directory
+and committed in local target directory.
+
+ $ hg co -qC 1
+ $ echo target > b/c
+ $ hg add b/c
+ $ hg commit -qm 'new file in target directory'
+ $ hg merge 2
+ merging b/c and a/c to b/c
+ warning: conflicts during merge.
+ merging b/c incomplete! (edit conflicts, then use 'hg resolve --mark')
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
+ [1]
+ $ hg st -A
+ M b/c
+ a/c
+ ? a/d
+ ? b/c.orig
+ C b/a
+ C b/b
+ $ cat b/c
+ <<<<<<< local: f1c50ca4f127 - test: new file in target directory
+ target
+ =======
+ baz
+ >>>>>>> other: ce36d17b18fb - test: 2 add a/c
+ $ rm b/c.orig
+
+Remote directory rename with conflicting file added in remote target directory
+and committed in local source directory.
+
+ $ hg co -qC 2
+ $ hg st -A
+ ? a/d
+ C a/a
+ C a/b
+ C a/c
+ $ hg merge 5
+ merging a/c and b/c to b/c
+ warning: conflicts during merge.
+ merging b/c incomplete! (edit conflicts, then use 'hg resolve --mark')
+ 2 files updated, 0 files merged, 2 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
+ [1]
+ $ hg st -A
+ M b/a
+ M b/b
+ M b/c
+ a/c
+ R a/a
+ R a/b
+ R a/c
+ ? a/d
+ ? b/c.orig
+ $ cat b/c
+ <<<<<<< local: ce36d17b18fb - test: 2 add a/c
+ baz
+ =======
+ target
+ >>>>>>> other: f1c50ca4f127 - test: new file in target directory
Second scenario with two repos:
--- a/tests/test-rename-merge2.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-rename-merge2.t Tue Dec 09 13:32:19 2014 -0600
@@ -88,7 +88,7 @@
ancestor: 924404dff337, local: e300d1c794ec+, remote: 4ce40f5aca24
preserving a for resolve of b
preserving rev for resolve of rev
- a: keep -> k
+ a: remote unchanged -> k
b: remote copied from a -> m
updating: b 1/2 files (50.00%)
picked tool 'python ../merge' for b (binary False symlink False)
@@ -343,7 +343,7 @@
ancestor: 924404dff337, local: 62e7bf090eba+, remote: 49b6d8032493
preserving b for resolve of b
preserving rev for resolve of rev
- b: versions differ -> m
+ b: both renamed from a -> m
updating: b 1/2 files (50.00%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
@@ -413,7 +413,7 @@
ancestor: 924404dff337, local: 86a2aa42fc76+, remote: af30c7647fc7
preserving b for resolve of b
preserving rev for resolve of rev
- b: versions differ -> m
+ b: both created -> m
updating: b 1/2 files (50.00%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
@@ -446,7 +446,7 @@
a: other deleted -> r
removing a
updating: a 1/3 files (33.33%)
- b: versions differ -> m
+ b: both created -> m
updating: b 2/3 files (66.67%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
@@ -478,7 +478,7 @@
a: remote is newer -> g
getting a
updating: a 1/3 files (33.33%)
- b: versions differ -> m
+ b: both created -> m
updating: b 2/3 files (66.67%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
@@ -511,7 +511,7 @@
a: other deleted -> r
removing a
updating: a 1/3 files (33.33%)
- b: versions differ -> m
+ b: both created -> m
updating: b 2/3 files (66.67%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
@@ -543,7 +543,7 @@
a: remote is newer -> g
getting a
updating: a 1/3 files (33.33%)
- b: versions differ -> m
+ b: both created -> m
updating: b 2/3 files (66.67%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
@@ -573,8 +573,8 @@
ancestor: 924404dff337, local: 0b76e65c8289+, remote: 4ce40f5aca24
preserving b for resolve of b
preserving rev for resolve of rev
- a: keep -> k
- b: versions differ -> m
+ a: remote unchanged -> k
+ b: both created -> m
updating: b 1/2 files (50.00%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
@@ -609,7 +609,7 @@
a: prompt recreating -> g
getting a
updating: a 1/3 files (33.33%)
- b: versions differ -> m
+ b: both created -> m
updating: b 2/3 files (66.67%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
@@ -643,7 +643,7 @@
preserving rev for resolve of rev
a: prompt keep -> a
updating: a 1/3 files (33.33%)
- b: versions differ -> m
+ b: both created -> m
updating: b 2/3 files (66.67%)
picked tool 'python ../merge' for b (binary False symlink False)
merging b
--- a/tests/test-revert.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-revert.t Tue Dec 09 13:32:19 2014 -0600
@@ -404,157 +404,63 @@
Systematic behavior validation of most possible cases
=====================================================
-This section tests most of the possible combinations of working directory
-changes and inter-revision changes. The number of possible cases is significant
-but they all have a slighly different handling. So this section commits to
-generating and testing all of them to allow safe refactoring of the revert code.
+This section tests most of the possible combinations of revision states and
+working directory states. The number of possible cases is significant but they
+but they all have a slightly different handling. So this section commits to
+and testing all of them to allow safe refactoring of the revert code.
A python script is used to generate a file history for each combination of
-changes between, on one side the working directory and its parent and on
-the other side, changes between a revert target (--rev) and working directory
-parent. The three states generated are:
+states, on one side the content (or lack thereof) in two revisions, and
+on the other side, the content and "tracked-ness" of the working directory. The
+three states generated are:
- a "base" revision
- a "parent" revision
- the working directory (based on "parent")
-The file generated have names of the form:
+The files generated have names of the form:
- <changeset-state>_<working-copy-state>
-
-Here, "changeset-state" conveys the state in "base" and "parent" (or the change
-that happen between them), "working-copy-state" is self explanatory.
+ <rev1-content>_<rev2-content>_<working-copy-content>-<tracked-ness>
All known states are not tested yet. See inline documentation for details.
Special cases from merge and rename are not tested by this section.
-There are also multiple cases where the current revert implementation is known to
-slightly misbehave.
-
Write the python script to disk
-------------------------------
- $ cat << EOF > gen-revert-cases.py
- > # generate proper file state to test revert behavior
- > import sys
- > import os
- >
- > # content of the file in "base" and "parent"
- > # None means no file at all
- > ctxcontent = {
- > # clean: no change from base to parent
- > 'clean': ['base', 'base'],
- > # modified: file content change from base to parent
- > 'modified': ['base', 'parent'],
- > # added: file is missing from base and added in parent
- > 'added': [None, 'parent'],
- > # removed: file exist in base but is removed from parent
- > 'removed': ['base', None],
- > # file exist neither in base not in parent
- > 'missing': [None, None],
- > }
- >
- > # content of file in working copy
- > wccontent = {
- > # clean: wc content is the same as parent
- > 'clean': lambda cc: cc[1],
- > # revert: wc content is the same as base
- > 'revert': lambda cc: cc[0],
- > # wc: file exist with a content different from base and parent
- > 'wc': lambda cc: 'wc',
- > # removed: file is missing and marked as untracked
- > 'removed': lambda cc: None,
- > # deleted: file is recorded as tracked but missing
- > # rely on file deletion outside of this script
- > 'deleted': lambda cc:'TOBEDELETED',
- > }
- > # untracked-X is a version of X where the file is not tracked (? unknown)
- > wccontent['untracked-clean'] = wccontent['clean']
- > wccontent['untracked-revert'] = wccontent['revert']
- > wccontent['untracked-wc'] = wccontent['wc']
- >
- > # build the combination of possible states
- > combination = []
- > for ctxkey in ctxcontent:
- > for wckey in wccontent:
- > filename = "%s_%s" % (ctxkey, wckey)
- > combination.append((filename, ctxkey, wckey))
- >
- > # make sure we have stable output
- > combination.sort()
- >
- > # retrieve the state we must generate
- > target = sys.argv[1]
- >
- > # compute file content
- > content = []
- > for filename, ctxkey, wckey in combination:
- > cc = ctxcontent[ctxkey]
- > if target == 'filelist':
- > print filename
- > elif target == 'base':
- > content.append((filename, cc[0]))
- > elif target == 'parent':
- > content.append((filename, cc[1]))
- > elif target == 'wc':
- > content.append((filename, wccontent[wckey](cc)))
- > else:
- > print >> sys.stderr, "unknown target:", target
- > sys.exit(1)
- >
- > # write actual content
- > for filename, data in content:
- > if data is not None:
- > f = open(filename, 'w')
- > f.write(data + '\n')
- > f.close()
- > elif os.path.exists(filename):
- > os.remove(filename)
- > EOF
-
check list of planned files
- $ python gen-revert-cases.py filelist
- added_clean
- added_deleted
- added_removed
- added_revert
- added_untracked-clean
- added_untracked-revert
- added_untracked-wc
- added_wc
- clean_clean
- clean_deleted
- clean_removed
- clean_revert
- clean_untracked-clean
- clean_untracked-revert
- clean_untracked-wc
- clean_wc
- missing_clean
- missing_deleted
- missing_removed
- missing_revert
- missing_untracked-clean
- missing_untracked-revert
- missing_untracked-wc
- missing_wc
- modified_clean
- modified_deleted
- modified_removed
- modified_revert
- modified_untracked-clean
- modified_untracked-revert
- modified_untracked-wc
- modified_wc
- removed_clean
- removed_deleted
- removed_removed
- removed_revert
- removed_untracked-clean
- removed_untracked-revert
- removed_untracked-wc
- removed_wc
+ $ python $TESTDIR/generate-working-copy-states.py filelist 2
+ content1_content1_content1-tracked
+ content1_content1_content1-untracked
+ content1_content1_content3-tracked
+ content1_content1_content3-untracked
+ content1_content1_missing-tracked
+ content1_content1_missing-untracked
+ content1_content2_content1-tracked
+ content1_content2_content1-untracked
+ content1_content2_content2-tracked
+ content1_content2_content2-untracked
+ content1_content2_content3-tracked
+ content1_content2_content3-untracked
+ content1_content2_missing-tracked
+ content1_content2_missing-untracked
+ content1_missing_content1-tracked
+ content1_missing_content1-untracked
+ content1_missing_content3-tracked
+ content1_missing_content3-untracked
+ content1_missing_missing-tracked
+ content1_missing_missing-untracked
+ missing_content2_content2-tracked
+ missing_content2_content2-untracked
+ missing_content2_content3-tracked
+ missing_content2_content3-untracked
+ missing_content2_missing-tracked
+ missing_content2_missing-untracked
+ missing_missing_content3-tracked
+ missing_missing_content3-untracked
+ missing_missing_missing-tracked
+ missing_missing_missing-untracked
Script to make a simple text version of the content
---------------------------------------------------
@@ -579,268 +485,233 @@
Generate base changeset
- $ python ../gen-revert-cases.py base
+ $ python $TESTDIR/generate-working-copy-states.py state 2 1
$ hg addremove --similarity 0
- adding clean_clean
- adding clean_deleted
- adding clean_removed
- adding clean_revert
- adding clean_untracked-clean
- adding clean_untracked-revert
- adding clean_untracked-wc
- adding clean_wc
- adding modified_clean
- adding modified_deleted
- adding modified_removed
- adding modified_revert
- adding modified_untracked-clean
- adding modified_untracked-revert
- adding modified_untracked-wc
- adding modified_wc
- adding removed_clean
- adding removed_deleted
- adding removed_removed
- adding removed_revert
- adding removed_untracked-clean
- adding removed_untracked-revert
- adding removed_untracked-wc
- adding removed_wc
+ adding content1_content1_content1-tracked
+ adding content1_content1_content1-untracked
+ adding content1_content1_content3-tracked
+ adding content1_content1_content3-untracked
+ adding content1_content1_missing-tracked
+ adding content1_content1_missing-untracked
+ adding content1_content2_content1-tracked
+ adding content1_content2_content1-untracked
+ adding content1_content2_content2-tracked
+ adding content1_content2_content2-untracked
+ adding content1_content2_content3-tracked
+ adding content1_content2_content3-untracked
+ adding content1_content2_missing-tracked
+ adding content1_content2_missing-untracked
+ adding content1_missing_content1-tracked
+ adding content1_missing_content1-untracked
+ adding content1_missing_content3-tracked
+ adding content1_missing_content3-untracked
+ adding content1_missing_missing-tracked
+ adding content1_missing_missing-untracked
$ hg status
- A clean_clean
- A clean_deleted
- A clean_removed
- A clean_revert
- A clean_untracked-clean
- A clean_untracked-revert
- A clean_untracked-wc
- A clean_wc
- A modified_clean
- A modified_deleted
- A modified_removed
- A modified_revert
- A modified_untracked-clean
- A modified_untracked-revert
- A modified_untracked-wc
- A modified_wc
- A removed_clean
- A removed_deleted
- A removed_removed
- A removed_revert
- A removed_untracked-clean
- A removed_untracked-revert
- A removed_untracked-wc
- A removed_wc
+ A content1_content1_content1-tracked
+ A content1_content1_content1-untracked
+ A content1_content1_content3-tracked
+ A content1_content1_content3-untracked
+ A content1_content1_missing-tracked
+ A content1_content1_missing-untracked
+ A content1_content2_content1-tracked
+ A content1_content2_content1-untracked
+ A content1_content2_content2-tracked
+ A content1_content2_content2-untracked
+ A content1_content2_content3-tracked
+ A content1_content2_content3-untracked
+ A content1_content2_missing-tracked
+ A content1_content2_missing-untracked
+ A content1_missing_content1-tracked
+ A content1_missing_content1-untracked
+ A content1_missing_content3-tracked
+ A content1_missing_content3-untracked
+ A content1_missing_missing-tracked
+ A content1_missing_missing-untracked
$ hg commit -m 'base'
(create a simple text version of the content)
$ python ../dircontent.py > ../content-base.txt
$ cat ../content-base.txt
- base clean_clean
- base clean_deleted
- base clean_removed
- base clean_revert
- base clean_untracked-clean
- base clean_untracked-revert
- base clean_untracked-wc
- base clean_wc
- base modified_clean
- base modified_deleted
- base modified_removed
- base modified_revert
- base modified_untracked-clean
- base modified_untracked-revert
- base modified_untracked-wc
- base modified_wc
- base removed_clean
- base removed_deleted
- base removed_removed
- base removed_revert
- base removed_untracked-clean
- base removed_untracked-revert
- base removed_untracked-wc
- base removed_wc
+ content1 content1_content1_content1-tracked
+ content1 content1_content1_content1-untracked
+ content1 content1_content1_content3-tracked
+ content1 content1_content1_content3-untracked
+ content1 content1_content1_missing-tracked
+ content1 content1_content1_missing-untracked
+ content1 content1_content2_content1-tracked
+ content1 content1_content2_content1-untracked
+ content1 content1_content2_content2-tracked
+ content1 content1_content2_content2-untracked
+ content1 content1_content2_content3-tracked
+ content1 content1_content2_content3-untracked
+ content1 content1_content2_missing-tracked
+ content1 content1_content2_missing-untracked
+ content1 content1_missing_content1-tracked
+ content1 content1_missing_content1-untracked
+ content1 content1_missing_content3-tracked
+ content1 content1_missing_content3-untracked
+ content1 content1_missing_missing-tracked
+ content1 content1_missing_missing-untracked
Create parent changeset
- $ python ../gen-revert-cases.py parent
+ $ python $TESTDIR/generate-working-copy-states.py state 2 2
$ hg addremove --similarity 0
- adding added_clean
- adding added_deleted
- adding added_removed
- adding added_revert
- adding added_untracked-clean
- adding added_untracked-revert
- adding added_untracked-wc
- adding added_wc
- removing removed_clean
- removing removed_deleted
- removing removed_removed
- removing removed_revert
- removing removed_untracked-clean
- removing removed_untracked-revert
- removing removed_untracked-wc
- removing removed_wc
+ removing content1_missing_content1-tracked
+ removing content1_missing_content1-untracked
+ removing content1_missing_content3-tracked
+ removing content1_missing_content3-untracked
+ removing content1_missing_missing-tracked
+ removing content1_missing_missing-untracked
+ adding missing_content2_content2-tracked
+ adding missing_content2_content2-untracked
+ adding missing_content2_content3-tracked
+ adding missing_content2_content3-untracked
+ adding missing_content2_missing-tracked
+ adding missing_content2_missing-untracked
$ hg status
- M modified_clean
- M modified_deleted
- M modified_removed
- M modified_revert
- M modified_untracked-clean
- M modified_untracked-revert
- M modified_untracked-wc
- M modified_wc
- A added_clean
- A added_deleted
- A added_removed
- A added_revert
- A added_untracked-clean
- A added_untracked-revert
- A added_untracked-wc
- A added_wc
- R removed_clean
- R removed_deleted
- R removed_removed
- R removed_revert
- R removed_untracked-clean
- R removed_untracked-revert
- R removed_untracked-wc
- R removed_wc
+ M content1_content2_content1-tracked
+ M content1_content2_content1-untracked
+ M content1_content2_content2-tracked
+ M content1_content2_content2-untracked
+ M content1_content2_content3-tracked
+ M content1_content2_content3-untracked
+ M content1_content2_missing-tracked
+ M content1_content2_missing-untracked
+ A missing_content2_content2-tracked
+ A missing_content2_content2-untracked
+ A missing_content2_content3-tracked
+ A missing_content2_content3-untracked
+ A missing_content2_missing-tracked
+ A missing_content2_missing-untracked
+ R content1_missing_content1-tracked
+ R content1_missing_content1-untracked
+ R content1_missing_content3-tracked
+ R content1_missing_content3-untracked
+ R content1_missing_missing-tracked
+ R content1_missing_missing-untracked
$ hg commit -m 'parent'
(create a simple text version of the content)
$ python ../dircontent.py > ../content-parent.txt
$ cat ../content-parent.txt
- parent added_clean
- parent added_deleted
- parent added_removed
- parent added_revert
- parent added_untracked-clean
- parent added_untracked-revert
- parent added_untracked-wc
- parent added_wc
- base clean_clean
- base clean_deleted
- base clean_removed
- base clean_revert
- base clean_untracked-clean
- base clean_untracked-revert
- base clean_untracked-wc
- base clean_wc
- parent modified_clean
- parent modified_deleted
- parent modified_removed
- parent modified_revert
- parent modified_untracked-clean
- parent modified_untracked-revert
- parent modified_untracked-wc
- parent modified_wc
+ content1 content1_content1_content1-tracked
+ content1 content1_content1_content1-untracked
+ content1 content1_content1_content3-tracked
+ content1 content1_content1_content3-untracked
+ content1 content1_content1_missing-tracked
+ content1 content1_content1_missing-untracked
+ content2 content1_content2_content1-tracked
+ content2 content1_content2_content1-untracked
+ content2 content1_content2_content2-tracked
+ content2 content1_content2_content2-untracked
+ content2 content1_content2_content3-tracked
+ content2 content1_content2_content3-untracked
+ content2 content1_content2_missing-tracked
+ content2 content1_content2_missing-untracked
+ content2 missing_content2_content2-tracked
+ content2 missing_content2_content2-untracked
+ content2 missing_content2_content3-tracked
+ content2 missing_content2_content3-untracked
+ content2 missing_content2_missing-tracked
+ content2 missing_content2_missing-untracked
Setup working directory
- $ python ../gen-revert-cases.py wc | cat
+ $ python $TESTDIR/generate-working-copy-states.py state 2 wc
$ hg addremove --similarity 0
- removing added_removed
- removing added_revert
- removing added_untracked-revert
- removing clean_removed
- adding missing_deleted
- adding missing_untracked-wc
- adding missing_wc
- removing modified_removed
- adding removed_deleted
- adding removed_revert
- adding removed_untracked-revert
- adding removed_untracked-wc
- adding removed_wc
- $ hg forget *untracked*
- $ rm *deleted*
+ adding content1_missing_content1-tracked
+ adding content1_missing_content1-untracked
+ adding content1_missing_content3-tracked
+ adding content1_missing_content3-untracked
+ adding content1_missing_missing-tracked
+ adding content1_missing_missing-untracked
+ adding missing_missing_content3-tracked
+ adding missing_missing_content3-untracked
+ adding missing_missing_missing-tracked
+ adding missing_missing_missing-untracked
+ $ hg forget *_*_*-untracked
+ $ rm *_*_missing-*
$ hg status
- M added_wc
- M clean_wc
- M modified_revert
- M modified_wc
- A missing_wc
- A removed_revert
- A removed_wc
- R added_removed
- R added_revert
- R added_untracked-clean
- R added_untracked-revert
- R added_untracked-wc
- R clean_removed
- R clean_untracked-clean
- R clean_untracked-revert
- R clean_untracked-wc
- R modified_removed
- R modified_untracked-clean
- R modified_untracked-revert
- R modified_untracked-wc
- ! added_deleted
- ! clean_deleted
- ! missing_deleted
- ! modified_deleted
- ! removed_deleted
- ? missing_untracked-wc
- ? removed_untracked-revert
- ? removed_untracked-wc
+ M content1_content1_content3-tracked
+ M content1_content2_content1-tracked
+ M content1_content2_content3-tracked
+ M missing_content2_content3-tracked
+ A content1_missing_content1-tracked
+ A content1_missing_content3-tracked
+ A missing_missing_content3-tracked
+ R content1_content1_content1-untracked
+ R content1_content1_content3-untracked
+ R content1_content1_missing-untracked
+ R content1_content2_content1-untracked
+ R content1_content2_content2-untracked
+ R content1_content2_content3-untracked
+ R content1_content2_missing-untracked
+ R missing_content2_content2-untracked
+ R missing_content2_content3-untracked
+ R missing_content2_missing-untracked
+ ! content1_content1_missing-tracked
+ ! content1_content2_missing-tracked
+ ! content1_missing_missing-tracked
+ ! missing_content2_missing-tracked
+ ! missing_missing_missing-tracked
+ ? content1_missing_content1-untracked
+ ? content1_missing_content3-untracked
+ ? missing_missing_content3-untracked
$ hg status --rev 'desc("base")'
- M clean_wc
- M modified_clean
- M modified_wc
- M removed_wc
- A added_clean
- A added_wc
- A missing_wc
- R clean_removed
- R clean_untracked-clean
- R clean_untracked-revert
- R clean_untracked-wc
- R modified_removed
- R modified_untracked-clean
- R modified_untracked-revert
- R modified_untracked-wc
- R removed_clean
- R removed_deleted
- R removed_removed
- R removed_untracked-clean
- R removed_untracked-revert
- R removed_untracked-wc
- ! added_deleted
- ! clean_deleted
- ! missing_deleted
- ! modified_deleted
- ! removed_deleted
- ? missing_untracked-wc
+ M content1_content1_content3-tracked
+ M content1_content2_content2-tracked
+ M content1_content2_content3-tracked
+ M content1_missing_content3-tracked
+ A missing_content2_content2-tracked
+ A missing_content2_content3-tracked
+ A missing_missing_content3-tracked
+ R content1_content1_content1-untracked
+ R content1_content1_content3-untracked
+ R content1_content1_missing-untracked
+ R content1_content2_content1-untracked
+ R content1_content2_content2-untracked
+ R content1_content2_content3-untracked
+ R content1_content2_missing-untracked
+ R content1_missing_content1-untracked
+ R content1_missing_content3-untracked
+ R content1_missing_missing-tracked
+ R content1_missing_missing-untracked
+ ! content1_content1_missing-tracked
+ ! content1_content2_missing-tracked
+ ! content1_missing_missing-tracked
+ ! missing_content2_missing-tracked
+ ! missing_missing_missing-tracked
+ ? missing_missing_content3-untracked
(create a simple text version of the content)
$ python ../dircontent.py > ../content-wc.txt
$ cat ../content-wc.txt
- parent added_clean
- parent added_untracked-clean
- wc added_untracked-wc
- wc added_wc
- base clean_clean
- base clean_revert
- base clean_untracked-clean
- base clean_untracked-revert
- wc clean_untracked-wc
- wc clean_wc
- wc missing_untracked-wc
- wc missing_wc
- parent modified_clean
- base modified_revert
- parent modified_untracked-clean
- base modified_untracked-revert
- wc modified_untracked-wc
- wc modified_wc
- base removed_revert
- base removed_untracked-revert
- wc removed_untracked-wc
- wc removed_wc
+ content1 content1_content1_content1-tracked
+ content1 content1_content1_content1-untracked
+ content3 content1_content1_content3-tracked
+ content3 content1_content1_content3-untracked
+ content1 content1_content2_content1-tracked
+ content1 content1_content2_content1-untracked
+ content2 content1_content2_content2-tracked
+ content2 content1_content2_content2-untracked
+ content3 content1_content2_content3-tracked
+ content3 content1_content2_content3-untracked
+ content1 content1_missing_content1-tracked
+ content1 content1_missing_content1-untracked
+ content3 content1_missing_content3-tracked
+ content3 content1_missing_content3-untracked
+ content2 missing_content2_content2-tracked
+ content2 missing_content2_content2-untracked
+ content3 missing_content2_content3-tracked
+ content3 missing_content2_content3-untracked
+ content3 missing_missing_content3-tracked
+ content3 missing_missing_content3-untracked
$ cd ..
@@ -855,31 +726,28 @@
check revert output
$ hg revert --all
- reverting added_deleted
- undeleting added_removed
- undeleting added_revert
- undeleting added_untracked-clean
- undeleting added_untracked-revert
- undeleting added_untracked-wc
- reverting added_wc
- reverting clean_deleted
- undeleting clean_removed
- undeleting clean_untracked-clean
- undeleting clean_untracked-revert
- undeleting clean_untracked-wc
- reverting clean_wc
- forgetting missing_deleted
- forgetting missing_wc
- reverting modified_deleted
- undeleting modified_removed
- reverting modified_revert
- undeleting modified_untracked-clean
- undeleting modified_untracked-revert
- undeleting modified_untracked-wc
- reverting modified_wc
- forgetting removed_deleted
- forgetting removed_revert
- forgetting removed_wc
+ undeleting content1_content1_content1-untracked
+ reverting content1_content1_content3-tracked
+ undeleting content1_content1_content3-untracked
+ reverting content1_content1_missing-tracked
+ undeleting content1_content1_missing-untracked
+ reverting content1_content2_content1-tracked
+ undeleting content1_content2_content1-untracked
+ undeleting content1_content2_content2-untracked
+ reverting content1_content2_content3-tracked
+ undeleting content1_content2_content3-untracked
+ reverting content1_content2_missing-tracked
+ undeleting content1_content2_missing-untracked
+ forgetting content1_missing_content1-tracked
+ forgetting content1_missing_content3-tracked
+ forgetting content1_missing_missing-tracked
+ undeleting missing_content2_content2-untracked
+ reverting missing_content2_content3-tracked
+ undeleting missing_content2_content3-untracked
+ reverting missing_content2_missing-tracked
+ undeleting missing_content2_missing-untracked
+ forgetting missing_missing_content3-tracked
+ forgetting missing_missing_missing-tracked
Compare resulting directory with revert target.
@@ -889,20 +757,20 @@
$ python ../dircontent.py > ../content-parent-all.txt
$ cd ..
$ diff -U 0 -- content-parent.txt content-parent-all.txt | grep _
- +wc added_untracked-wc.orig
- +wc added_wc.orig
- +wc clean_untracked-wc.orig
- +wc clean_wc.orig
- +wc missing_untracked-wc
- +wc missing_wc
- +base modified_revert.orig
- +base modified_untracked-revert.orig
- +wc modified_untracked-wc.orig
- +wc modified_wc.orig
- +base removed_revert
- +base removed_untracked-revert
- +wc removed_untracked-wc
- +wc removed_wc
+ +content3 content1_content1_content3-tracked.orig
+ +content3 content1_content1_content3-untracked.orig
+ +content1 content1_content2_content1-tracked.orig
+ +content1 content1_content2_content1-untracked.orig
+ +content3 content1_content2_content3-tracked.orig
+ +content3 content1_content2_content3-untracked.orig
+ +content1 content1_missing_content1-tracked
+ +content1 content1_missing_content1-untracked
+ +content3 content1_missing_content3-tracked
+ +content3 content1_missing_content3-untracked
+ +content3 missing_content2_content3-tracked.orig
+ +content3 missing_content2_content3-untracked.orig
+ +content3 missing_missing_content3-tracked
+ +content3 missing_missing_content3-untracked
Test revert --all to "base" content
-----------------------------------
@@ -915,31 +783,28 @@
check revert output
$ hg revert --all --rev 'desc(base)'
- removing added_clean
- removing added_deleted
- removing added_wc
- reverting clean_deleted
- undeleting clean_removed
- undeleting clean_untracked-clean
- undeleting clean_untracked-revert
- undeleting clean_untracked-wc
- reverting clean_wc
- forgetting missing_deleted
- forgetting missing_wc
- reverting modified_clean
- reverting modified_deleted
- undeleting modified_removed
- undeleting modified_untracked-clean
- undeleting modified_untracked-revert
- undeleting modified_untracked-wc
- reverting modified_wc
- adding removed_clean
- reverting removed_deleted
- adding removed_removed
- adding removed_untracked-clean
- adding removed_untracked-revert
- adding removed_untracked-wc
- reverting removed_wc
+ undeleting content1_content1_content1-untracked
+ reverting content1_content1_content3-tracked
+ undeleting content1_content1_content3-untracked
+ reverting content1_content1_missing-tracked
+ undeleting content1_content1_missing-untracked
+ undeleting content1_content2_content1-untracked
+ reverting content1_content2_content2-tracked
+ undeleting content1_content2_content2-untracked
+ reverting content1_content2_content3-tracked
+ undeleting content1_content2_content3-untracked
+ reverting content1_content2_missing-tracked
+ undeleting content1_content2_missing-untracked
+ adding content1_missing_content1-untracked
+ reverting content1_missing_content3-tracked
+ adding content1_missing_content3-untracked
+ reverting content1_missing_missing-tracked
+ adding content1_missing_missing-untracked
+ removing missing_content2_content2-tracked
+ removing missing_content2_content3-tracked
+ removing missing_content2_missing-tracked
+ forgetting missing_missing_content3-tracked
+ forgetting missing_missing_missing-tracked
Compare resulting directory with revert target.
@@ -949,18 +814,18 @@
$ python ../dircontent.py > ../content-base-all.txt
$ cd ..
$ diff -U 0 -- content-base.txt content-base-all.txt | grep _
- +parent added_untracked-clean
- +wc added_untracked-wc
- +wc added_wc.orig
- +wc clean_untracked-wc.orig
- +wc clean_wc.orig
- +wc missing_untracked-wc
- +wc missing_wc
- +parent modified_untracked-clean.orig
- +wc modified_untracked-wc.orig
- +wc modified_wc.orig
- +wc removed_untracked-wc.orig
- +wc removed_wc.orig
+ +content3 content1_content1_content3-tracked.orig
+ +content3 content1_content1_content3-untracked.orig
+ +content2 content1_content2_content2-untracked.orig
+ +content3 content1_content2_content3-tracked.orig
+ +content3 content1_content2_content3-untracked.orig
+ +content3 content1_missing_content3-tracked.orig
+ +content3 content1_missing_content3-untracked.orig
+ +content2 missing_content2_content2-untracked
+ +content3 missing_content2_content3-tracked.orig
+ +content3 missing_content2_content3-untracked
+ +content3 missing_missing_content3-tracked
+ +content3 missing_missing_content3-untracked
Test revert to parent content with explicit file name
-----------------------------------------------------
@@ -973,108 +838,81 @@
revert all files individually and check the output
(output is expected to be different than in the --all case)
- $ for file in `python ../gen-revert-cases.py filelist`; do
+ $ for file in `python $TESTDIR/generate-working-copy-states.py filelist 2`; do
> echo '### revert for:' $file;
> hg revert $file;
> echo
> done
- ### revert for: added_clean
- no changes needed to added_clean
-
- ### revert for: added_deleted
+ ### revert for: content1_content1_content1-tracked
+ no changes needed to content1_content1_content1-tracked
- ### revert for: added_removed
+ ### revert for: content1_content1_content1-untracked
- ### revert for: added_revert
-
- ### revert for: added_untracked-clean
+ ### revert for: content1_content1_content3-tracked
- ### revert for: added_untracked-revert
+ ### revert for: content1_content1_content3-untracked
- ### revert for: added_untracked-wc
+ ### revert for: content1_content1_missing-tracked
- ### revert for: added_wc
+ ### revert for: content1_content1_missing-untracked
- ### revert for: clean_clean
- no changes needed to clean_clean
-
- ### revert for: clean_deleted
+ ### revert for: content1_content2_content1-tracked
- ### revert for: clean_removed
-
- ### revert for: clean_revert
- no changes needed to clean_revert
+ ### revert for: content1_content2_content1-untracked
- ### revert for: clean_untracked-clean
+ ### revert for: content1_content2_content2-tracked
+ no changes needed to content1_content2_content2-tracked
- ### revert for: clean_untracked-revert
-
- ### revert for: clean_untracked-wc
+ ### revert for: content1_content2_content2-untracked
- ### revert for: clean_wc
+ ### revert for: content1_content2_content3-tracked
- ### revert for: missing_clean
- missing_clean: no such file in rev * (glob)
+ ### revert for: content1_content2_content3-untracked
- ### revert for: missing_deleted
+ ### revert for: content1_content2_missing-tracked
- ### revert for: missing_removed
- missing_removed: no such file in rev * (glob)
+ ### revert for: content1_content2_missing-untracked
- ### revert for: missing_revert
- missing_revert: no such file in rev * (glob)
+ ### revert for: content1_missing_content1-tracked
- ### revert for: missing_untracked-clean
- missing_untracked-clean: no such file in rev * (glob)
+ ### revert for: content1_missing_content1-untracked
+ file not managed: content1_missing_content1-untracked
- ### revert for: missing_untracked-revert
- missing_untracked-revert: no such file in rev * (glob)
+ ### revert for: content1_missing_content3-tracked
- ### revert for: missing_untracked-wc
- file not managed: missing_untracked-wc
-
- ### revert for: missing_wc
+ ### revert for: content1_missing_content3-untracked
+ file not managed: content1_missing_content3-untracked
- ### revert for: modified_clean
- no changes needed to modified_clean
+ ### revert for: content1_missing_missing-tracked
- ### revert for: modified_deleted
+ ### revert for: content1_missing_missing-untracked
+ content1_missing_missing-untracked: no such file in rev * (glob)
- ### revert for: modified_removed
-
- ### revert for: modified_revert
+ ### revert for: missing_content2_content2-tracked
+ no changes needed to missing_content2_content2-tracked
- ### revert for: modified_untracked-clean
-
- ### revert for: modified_untracked-revert
+ ### revert for: missing_content2_content2-untracked
- ### revert for: modified_untracked-wc
+ ### revert for: missing_content2_content3-tracked
- ### revert for: modified_wc
+ ### revert for: missing_content2_content3-untracked
- ### revert for: removed_clean
- removed_clean: no such file in rev * (glob)
+ ### revert for: missing_content2_missing-tracked
- ### revert for: removed_deleted
-
- ### revert for: removed_removed
- removed_removed: no such file in rev * (glob)
+ ### revert for: missing_content2_missing-untracked
- ### revert for: removed_revert
+ ### revert for: missing_missing_content3-tracked
- ### revert for: removed_untracked-clean
- removed_untracked-clean: no such file in rev * (glob)
+ ### revert for: missing_missing_content3-untracked
+ file not managed: missing_missing_content3-untracked
- ### revert for: removed_untracked-revert
- file not managed: removed_untracked-revert
+ ### revert for: missing_missing_missing-tracked
- ### revert for: removed_untracked-wc
- file not managed: removed_untracked-wc
-
- ### revert for: removed_wc
+ ### revert for: missing_missing_missing-untracked
+ missing_missing_missing-untracked: no such file in rev * (glob)
-check resulting directory againt the --all run
+check resulting directory against the --all run
(There should be no difference)
$ python ../dircontent.py > ../content-parent-explicit.txt
@@ -1093,108 +931,81 @@
revert all files individually and check the output
(output is expected to be different than in the --all case)
- $ for file in `python ../gen-revert-cases.py filelist`; do
+ $ for file in `python $TESTDIR/generate-working-copy-states.py filelist 2`; do
> echo '### revert for:' $file;
> hg revert $file --rev 'desc(base)';
> echo
> done
- ### revert for: added_clean
-
- ### revert for: added_deleted
+ ### revert for: content1_content1_content1-tracked
+ no changes needed to content1_content1_content1-tracked
- ### revert for: added_removed
- no changes needed to added_removed
+ ### revert for: content1_content1_content1-untracked
- ### revert for: added_revert
- no changes needed to added_revert
+ ### revert for: content1_content1_content3-tracked
- ### revert for: added_untracked-clean
- no changes needed to added_untracked-clean
+ ### revert for: content1_content1_content3-untracked
+
+ ### revert for: content1_content1_missing-tracked
- ### revert for: added_untracked-revert
- no changes needed to added_untracked-revert
+ ### revert for: content1_content1_missing-untracked
- ### revert for: added_untracked-wc
- no changes needed to added_untracked-wc
-
- ### revert for: added_wc
+ ### revert for: content1_content2_content1-tracked
+ no changes needed to content1_content2_content1-tracked
- ### revert for: clean_clean
- no changes needed to clean_clean
+ ### revert for: content1_content2_content1-untracked
- ### revert for: clean_deleted
+ ### revert for: content1_content2_content2-tracked
- ### revert for: clean_removed
+ ### revert for: content1_content2_content2-untracked
- ### revert for: clean_revert
- no changes needed to clean_revert
-
- ### revert for: clean_untracked-clean
+ ### revert for: content1_content2_content3-tracked
- ### revert for: clean_untracked-revert
+ ### revert for: content1_content2_content3-untracked
- ### revert for: clean_untracked-wc
-
- ### revert for: clean_wc
+ ### revert for: content1_content2_missing-tracked
- ### revert for: missing_clean
- missing_clean: no such file in rev * (glob)
+ ### revert for: content1_content2_missing-untracked
- ### revert for: missing_deleted
-
- ### revert for: missing_removed
- missing_removed: no such file in rev * (glob)
+ ### revert for: content1_missing_content1-tracked
+ no changes needed to content1_missing_content1-tracked
- ### revert for: missing_revert
- missing_revert: no such file in rev * (glob)
+ ### revert for: content1_missing_content1-untracked
+
+ ### revert for: content1_missing_content3-tracked
- ### revert for: missing_untracked-clean
- missing_untracked-clean: no such file in rev * (glob)
+ ### revert for: content1_missing_content3-untracked
- ### revert for: missing_untracked-revert
- missing_untracked-revert: no such file in rev * (glob)
+ ### revert for: content1_missing_missing-tracked
- ### revert for: missing_untracked-wc
- file not managed: missing_untracked-wc
+ ### revert for: content1_missing_missing-untracked
- ### revert for: missing_wc
-
- ### revert for: modified_clean
+ ### revert for: missing_content2_content2-tracked
- ### revert for: modified_deleted
+ ### revert for: missing_content2_content2-untracked
+ no changes needed to missing_content2_content2-untracked
- ### revert for: modified_removed
-
- ### revert for: modified_revert
- no changes needed to modified_revert
+ ### revert for: missing_content2_content3-tracked
- ### revert for: modified_untracked-clean
-
- ### revert for: modified_untracked-revert
-
- ### revert for: modified_untracked-wc
+ ### revert for: missing_content2_content3-untracked
+ no changes needed to missing_content2_content3-untracked
- ### revert for: modified_wc
+ ### revert for: missing_content2_missing-tracked
- ### revert for: removed_clean
-
- ### revert for: removed_deleted
+ ### revert for: missing_content2_missing-untracked
+ no changes needed to missing_content2_missing-untracked
- ### revert for: removed_removed
+ ### revert for: missing_missing_content3-tracked
- ### revert for: removed_revert
- no changes needed to removed_revert
+ ### revert for: missing_missing_content3-untracked
+ file not managed: missing_missing_content3-untracked
- ### revert for: removed_untracked-clean
-
- ### revert for: removed_untracked-revert
+ ### revert for: missing_missing_missing-tracked
- ### revert for: removed_untracked-wc
-
- ### revert for: removed_wc
+ ### revert for: missing_missing_missing-untracked
+ missing_missing_missing-untracked: no such file in rev * (glob)
-check resulting directory againt the --all run
+check resulting directory against the --all run
(There should be no difference)
$ python ../dircontent.py > ../content-base-explicit.txt
--- a/tests/test-revlog-packentry.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-revlog-packentry.t Tue Dec 09 13:32:19 2014 -0600
@@ -18,6 +18,6 @@
$ hg debugindex foo
rev offset length ..... linkrev nodeid p1 p2 (re)
0 0 0 ..... 0 b80de5d13875 000000000000 000000000000 (re)
- 1 0 24 ..... 1 0376abec49b8 000000000000 000000000000 (re)
+ 1 0 13 ..... 1 0376abec49b8 000000000000 000000000000 (re)
$ cd ..
--- a/tests/test-run-tests.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-run-tests.t Tue Dec 09 13:32:19 2014 -0600
@@ -33,8 +33,8 @@
$ $TESTDIR/run-tests.py --with-hg=`which hg`
- --- $TESTTMP/test-failure.t (glob)
- +++ $TESTTMP/test-failure.t.err (glob)
+ --- $TESTTMP/test-failure.t
+ +++ $TESTTMP/test-failure.t.err
@@ -1,4 +1,4 @@
$ echo babar
- rataxes
@@ -87,8 +87,8 @@
$ $TESTDIR/run-tests.py --with-hg=`which hg` --retest
- --- $TESTTMP/test-failure.t (glob)
- +++ $TESTTMP/test-failure.t.err (glob)
+ --- $TESTTMP/test-failure.t
+ +++ $TESTTMP/test-failure.t.err
@@ -1,4 +1,4 @@
$ echo babar
- rataxes
@@ -121,8 +121,8 @@
$ $TESTDIR/run-tests.py --with-hg=`which hg` test-failure.t
- --- $TESTTMP/test-failure.t (glob)
- +++ $TESTTMP/test-failure.t.err (glob)
+ --- $TESTTMP/test-failure.t
+ +++ $TESTTMP/test-failure.t.err
@@ -1,4 +1,4 @@
$ echo babar
- rataxes
@@ -274,7 +274,7 @@
Interactive with custom view
$ echo 'n' | $TESTDIR/run-tests.py --with-hg=`which hg` -i --view echo
- $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
+ $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
Accept this change? [n]* (glob)
ERROR: test-failure.t output changed
!.
@@ -286,7 +286,7 @@
View the fix
$ echo 'y' | $TESTDIR/run-tests.py --with-hg=`which hg` --view echo
- $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
+ $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
ERROR: test-failure.t output changed
!.
--- a/tests/test-setdiscovery.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-setdiscovery.t Tue Dec 09 13:32:19 2014 -0600
@@ -357,3 +357,52 @@
$ cat errors.log
$ cd ..
+
+
+Issue 4438 - test coverage for 3ef893520a85 issues.
+
+ $ mkdir issue4438
+ $ cd issue4438
+#if false
+generate new bundles:
+ $ hg init r1
+ $ for i in `seq 101`; do hg -R r1 up -qr null && hg -R r1 branch -q b$i && hg -R r1 ci -qmb$i; done
+ $ hg clone -q r1 r2
+ $ for i in `seq 10`; do hg -R r1 up -qr null && hg -R r1 branch -q c$i && hg -R r1 ci -qmc$i; done
+ $ hg -R r2 branch -q r2change && hg -R r2 ci -qmr2change
+ $ hg -R r1 bundle -qa $TESTDIR/bundles/issue4438-r1.hg
+ $ hg -R r2 bundle -qa $TESTDIR/bundles/issue4438-r2.hg
+#else
+use existing bundles:
+ $ hg clone -q $TESTDIR/bundles/issue4438-r1.hg r1
+ $ hg clone -q $TESTDIR/bundles/issue4438-r2.hg r2
+#endif
+
+Set iteration order could cause wrong and unstable results - fixed in 73cfaa348650:
+
+ $ hg -R r1 outgoing r2 -T'{rev} '
+ comparing with r2
+ searching for changes
+ 101 102 103 104 105 106 107 108 109 110 (no-eol)
+
+The case where all the 'initialsamplesize' samples already were common would
+give 'all remote heads known locally' without checking the remaining heads -
+fixed in 86c35b7ae300:
+
+ $ cat >> $TESTTMP/unrandomsample.py << EOF
+ > import random
+ > def sample(population, k):
+ > return sorted(population)[:k]
+ > random.sample = sample
+ > EOF
+
+ $ cat >> r1/.hg/hgrc << EOF
+ > [extensions]
+ > unrandomsample = $TESTTMP/unrandomsample.py
+ > EOF
+
+ $ hg -R r1 outgoing r2 -T'{rev} '
+ comparing with r2
+ searching for changes
+ 101 102 103 104 105 106 107 108 109 110 (no-eol)
+ $ cd ..
--- a/tests/test-shelve.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-shelve.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,9 +1,11 @@
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "mq=" >> $HGRCPATH
- $ echo "shelve=" >> $HGRCPATH
- $ echo "[defaults]" >> $HGRCPATH
- $ echo "diff = --nodates --git" >> $HGRCPATH
- $ echo "qnew = --date '0 0'" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > shelve =
+ > [defaults]
+ > diff = --nodates --git
+ > qnew = --date '0 0'
+ > EOF
$ hg init repo
$ cd repo
--- a/tests/test-status-color.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-status-color.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,7 +1,9 @@
- $ echo "[extensions]" >> $HGRCPATH
- $ echo "color=" >> $HGRCPATH
- $ echo "[color]" >> $HGRCPATH
- $ echo "mode=ansi" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > color =
+ > [color]
+ > mode = ansi
+ > EOF
Terminfo codes compatibility fix
$ echo "color.none=0" >> $HGRCPATH
--- a/tests/test-status-rev.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-status-rev.t Tue Dec 09 13:32:19 2014 -0600
@@ -1,156 +1,166 @@
Tests of 'hg status --rev <rev>' to make sure status between <rev> and '.' get
combined correctly with the dirstate status.
-Sets up a history for a number of files where the filename describes the file's
-history. The first two letters of the filename describe the first two commits;
-the third letter describes the dirstate for the file. For example, a file called
-'amr' was added in the first commit, modified in the second and then removed in
-the dirstate.
+ $ hg init
-These codes are used for commits:
-x: does not exist
-a: added
-c: clean
-m: modified
-r: removed
+First commit
-These codes are used for dirstate:
-d: in dirstate, but deleted from disk
-f: removed from dirstate, but file exists (forgotten)
-r: removed from dirstate and disk
-q: added, but deleted from disk (q for q-rious?)
-u: not in dirstate, but file exists (unknown)
+ $ python $TESTDIR/generate-working-copy-states.py state 2 1
+ $ hg addremove --similarity 0
+ adding content1_content1_content1-tracked
+ adding content1_content1_content1-untracked
+ adding content1_content1_content3-tracked
+ adding content1_content1_content3-untracked
+ adding content1_content1_missing-tracked
+ adding content1_content1_missing-untracked
+ adding content1_content2_content1-tracked
+ adding content1_content2_content1-untracked
+ adding content1_content2_content2-tracked
+ adding content1_content2_content2-untracked
+ adding content1_content2_content3-tracked
+ adding content1_content2_content3-untracked
+ adding content1_content2_missing-tracked
+ adding content1_content2_missing-untracked
+ adding content1_missing_content1-tracked
+ adding content1_missing_content1-untracked
+ adding content1_missing_content3-tracked
+ adding content1_missing_content3-untracked
+ adding content1_missing_missing-tracked
+ adding content1_missing_missing-untracked
+ $ hg commit -m first
- $ hg init
- $ touch .hgignore
- $ hg add .hgignore
- $ hg commit -m initial
-
-First letter: first commit
+Second commit
- $ echo a >acc
- $ echo a >acd
- $ echo a >acf
- $ echo a >acm
- $ echo a >acr
- $ echo a >amc
- $ echo a >amd
- $ echo a >amf
- $ echo a >amm
- $ echo a >amr
- $ echo a >ara
- $ echo a >arq
- $ echo a >aru
- $ hg commit -Aqm first
-
-Second letter: second commit
+ $ python $TESTDIR/generate-working-copy-states.py state 2 2
+ $ hg addremove --similarity 0
+ removing content1_missing_content1-tracked
+ removing content1_missing_content1-untracked
+ removing content1_missing_content3-tracked
+ removing content1_missing_content3-untracked
+ removing content1_missing_missing-tracked
+ removing content1_missing_missing-untracked
+ adding missing_content2_content2-tracked
+ adding missing_content2_content2-untracked
+ adding missing_content2_content3-tracked
+ adding missing_content2_content3-untracked
+ adding missing_content2_missing-tracked
+ adding missing_content2_missing-untracked
+ $ hg commit -m second
- $ echo b >xad
- $ echo b >xaf
- $ echo b >xam
- $ echo b >xar
- $ echo b >amc
- $ echo b >amd
- $ echo b >amf
- $ echo b >amm
- $ echo b >amr
- $ hg rm ara
- $ hg rm arq
- $ hg rm aru
- $ hg commit -Aqm second
-
-Third letter: dirstate
+Working copy
- $ echo c >acm
- $ echo c >amm
- $ echo c >xam
- $ echo c >ara && hg add ara
- $ echo c >arq && hg add arq && rm arq
- $ echo c >aru
- $ hg rm amr
- $ hg rm acr
- $ hg rm xar
- $ rm acd
- $ rm amd
- $ rm xad
- $ hg forget acf
- $ hg forget amf
- $ hg forget xaf
- $ touch xxu
+ $ python $TESTDIR/generate-working-copy-states.py state 2 wc
+ $ hg addremove --similarity 0
+ adding content1_missing_content1-tracked
+ adding content1_missing_content1-untracked
+ adding content1_missing_content3-tracked
+ adding content1_missing_content3-untracked
+ adding content1_missing_missing-tracked
+ adding content1_missing_missing-untracked
+ adding missing_missing_content3-tracked
+ adding missing_missing_content3-untracked
+ adding missing_missing_missing-tracked
+ adding missing_missing_missing-untracked
+ $ hg forget *_*_*-untracked
+ $ rm *_*_missing-*
+
+Status compared to parent of the working copy, i.e. the dirstate status
-Status compared to one revision back
+ $ hg status -A --rev 1 'glob:missing_content2_content3-tracked'
+ M missing_content2_content3-tracked
+ $ hg status -A --rev 1 'glob:missing_content2_content2-tracked'
+ C missing_content2_content2-tracked
+ $ hg status -A --rev 1 'glob:missing_missing_content3-tracked'
+ A missing_missing_content3-tracked
+ $ hg status -A --rev 1 'glob:missing_missing_content3-untracked'
+ ? missing_missing_content3-untracked
+ $ hg status -A --rev 1 'glob:missing_content2_*-untracked'
+ R missing_content2_content2-untracked
+ R missing_content2_content3-untracked
+ R missing_content2_missing-untracked
+ $ hg status -A --rev 1 'glob:missing_*_missing-tracked'
+ ! missing_content2_missing-tracked
+ ! missing_missing_missing-tracked
+#if windows
+ $ hg status -A --rev 1 'glob:missing_missing_missing-untracked'
+ missing_missing_missing-untracked: The system cannot find the file specified
+#else
+ $ hg status -A --rev 1 'glob:missing_missing_missing-untracked'
+ missing_missing_missing-untracked: No such file or directory
+#endif
+
+Status between first and second commit. Should ignore dirstate status.
- $ hg status -A --rev 1 acc
- C acc
-BROKEN: file appears twice; should be '!'
- $ hg status -A --rev 1 acd
- ! acd
- C acd
- $ hg status -A --rev 1 acf
- R acf
- $ hg status -A --rev 1 acm
- M acm
- $ hg status -A --rev 1 acr
- R acr
- $ hg status -A --rev 1 amc
- M amc
-BROKEN: file appears twice; should be '!'
- $ hg status -A --rev 1 amd
- ! amd
- C amd
- $ hg status -A --rev 1 amf
- R amf
- $ hg status -A --rev 1 amm
- M amm
- $ hg status -A --rev 1 amr
- R amr
- $ hg status -A --rev 1 ara
- M ara
-BROKEN: file appears twice; should be '!'
- $ hg status -A --rev 1 arq
- R arq
- ! arq
- $ hg status -A --rev 1 aru
- R aru
- $ hg status -A --rev 1 xad
- ! xad
- $ hg status -A --rev 1 xaf
- $ hg status -A --rev 1 xam
- A xam
- $ hg status -A --rev 1 xar
- $ hg status -A --rev 1 xxu
- ? xxu
+ $ hg status -A --rev 0:1 'glob:content1_content2_*'
+ M content1_content2_content1-tracked
+ M content1_content2_content1-untracked
+ M content1_content2_content2-tracked
+ M content1_content2_content2-untracked
+ M content1_content2_content3-tracked
+ M content1_content2_content3-untracked
+ M content1_content2_missing-tracked
+ M content1_content2_missing-untracked
+ $ hg status -A --rev 0:1 'glob:content1_content1_*'
+ C content1_content1_content1-tracked
+ C content1_content1_content1-untracked
+ C content1_content1_content3-tracked
+ C content1_content1_content3-untracked
+ C content1_content1_missing-tracked
+ C content1_content1_missing-untracked
+ $ hg status -A --rev 0:1 'glob:missing_content2_*'
+ A missing_content2_content2-tracked
+ A missing_content2_content2-untracked
+ A missing_content2_content3-tracked
+ A missing_content2_content3-untracked
+ A missing_content2_missing-tracked
+ A missing_content2_missing-untracked
+ $ hg status -A --rev 0:1 'glob:content1_missing_*'
+ R content1_missing_content1-tracked
+ R content1_missing_content1-untracked
+ R content1_missing_content3-tracked
+ R content1_missing_content3-untracked
+ R content1_missing_missing-tracked
+ R content1_missing_missing-untracked
+ $ hg status -A --rev 0:1 'glob:missing_missing_*'
+
+Status compared to one revision back, checking that the dirstate status
+is correctly combined with the inter-revision status
-Status compared to two revisions back
-
- $ hg status -A --rev 0 acc
- A acc
- $ hg status -A --rev 0 acd
- ! acd
-BROKEN: file exists, so should be listed (as '?')
- $ hg status -A --rev 0 acf
- $ hg status -A --rev 0 acm
- A acm
- $ hg status -A --rev 0 acr
- $ hg status -A --rev 0 amc
- A amc
- $ hg status -A --rev 0 amd
- ! amd
-BROKEN: file exists, so should be listed (as '?')
- $ hg status -A --rev 0 amf
- $ hg status -A --rev 0 amm
- A amm
- $ hg status -A --rev 0 amr
- $ hg status -A --rev 0 ara
- A ara
- $ hg status -A --rev 0 arq
- ! arq
- $ hg status -A --rev 0 aru
- ? aru
- $ hg status -A --rev 0 xad
- ! xad
-BROKEN: file exists, so should be listed (as '?')
- $ hg status -A --rev 0 xaf
- $ hg status -A --rev 0 xam
- A xam
- $ hg status -A --rev 0 xar
+ $ hg status -A --rev 0 'glob:content1_*_content[23]-tracked'
+ M content1_content1_content3-tracked
+ M content1_content2_content2-tracked
+ M content1_content2_content3-tracked
+ M content1_missing_content3-tracked
+ $ hg status -A --rev 0 'glob:content1_*_content1-tracked'
+ C content1_content1_content1-tracked
+ C content1_content2_content1-tracked
+ C content1_missing_content1-tracked
+ $ hg status -A --rev 0 'glob:missing_*_content?-tracked'
+ A missing_content2_content2-tracked
+ A missing_content2_content3-tracked
+ A missing_missing_content3-tracked
+BROKEN: missing_content2_content[23]-untracked exist, so should be listed
+ $ hg status -A --rev 0 'glob:missing_*_content?-untracked'
+ ? missing_missing_content3-untracked
+ $ hg status -A --rev 0 'glob:content1_*_*-untracked'
+ R content1_content1_content1-untracked
+ R content1_content1_content3-untracked
+ R content1_content1_missing-untracked
+ R content1_content2_content1-untracked
+ R content1_content2_content2-untracked
+ R content1_content2_content3-untracked
+ R content1_content2_missing-untracked
+ R content1_missing_content1-untracked
+ R content1_missing_content3-untracked
+ R content1_missing_missing-untracked
+BROKEN: content1_*_missing-tracked appear twice; should just be '!'
+ $ hg status -A --rev 0 'glob:*_*_missing-tracked'
+ R content1_missing_missing-tracked
+ ! content1_content1_missing-tracked
+ ! content1_content2_missing-tracked
+ ! content1_missing_missing-tracked
+ ! missing_content2_missing-tracked
+ ! missing_missing_missing-tracked
+ C content1_content1_missing-tracked
+ C content1_content2_missing-tracked
+ $ hg status -A --rev 0 'glob:missing_*_missing-untracked'
--- a/tests/test-subrepo-deep-nested-change.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-subrepo-deep-nested-change.t Tue Dec 09 13:32:19 2014 -0600
@@ -110,6 +110,25 @@
$ hg ci -Sm "add test.txt"
committing subrepository sub1
committing subrepository sub1/sub2 (glob)
+
+.. but first take a detour through some deep removal testing
+
+ $ hg remove -S -I 're:.*.txt' .
+ removing sub1/sub2/folder/test.txt (glob)
+ removing sub1/sub2/test.txt (glob)
+ $ hg status -S
+ R sub1/sub2/folder/test.txt
+ R sub1/sub2/test.txt
+ $ hg update -Cq
+ $ hg remove -I 're:.*.txt' sub1
+ $ hg status -S
+ $ hg remove sub1/sub2/folder/test.txt
+ $ hg remove sub1/.hgsubstate
+ $ hg status -S
+ R sub1/.hgsubstate
+ R sub1/sub2/folder/test.txt
+ $ hg update -Cq
+
$ hg --config extensions.largefiles=! archive -S ../archive_all
$ find ../archive_all | sort
../archive_all
--- a/tests/test-subrepo-git.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-subrepo-git.t Tue Dec 09 13:32:19 2014 -0600
@@ -119,7 +119,10 @@
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ cd ../tb/s
+ $ hg status --subrepos
$ echo f > f
+ $ hg status --subrepos
+ ? s/f
$ git add f
$ cd ..
@@ -422,6 +425,7 @@
$ hg status -S
M s/g
A s/f1
+ ? s/f2
$ ls s
f
f1
@@ -430,6 +434,8 @@
$ hg update --clean
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg status -S
+ ? s/f1
+ ? s/f2
$ ls s
f
f1
--- a/tests/test-subrepo.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-subrepo.t Tue Dec 09 13:32:19 2014 -0600
@@ -1324,7 +1324,7 @@
$ echo phasecheck4 >> t/t
$ hg commit -S -m phasecheck4
committing subrepository s
- committing subrepository s/ss
+ committing subrepository s/ss (glob)
warning: changes are committed in secret phase from subrepository ss
committing subrepository t
warning: changes are committed in secret phase from subrepository s
--- a/tests/test-symlinks.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-symlinks.t Tue Dec 09 13:32:19 2014 -0600
@@ -3,12 +3,18 @@
== tests added in 0.7 ==
$ hg init test-symlinks-0.7; cd test-symlinks-0.7;
- $ touch foo; ln -s foo bar;
+ $ touch foo; ln -s foo bar; ln -s nonexistent baz
+
+import with add and addremove -- symlink walking should _not_ screwup.
-import with addremove -- symlink walking should _not_ screwup.
-
+ $ hg add
+ adding bar
+ adding baz
+ adding foo
+ $ hg forget bar baz foo
$ hg addremove
adding bar
+ adding baz
adding foo
commit -- the symlink should _not_ appear added to dir state
--- a/tests/test-tag.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-tag.t Tue Dec 09 13:32:19 2014 -0600
@@ -479,7 +479,7 @@
4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
79505d5360b07e3e79d1052e347e73c02b8afa5b t3
-check that the merge tried to minimize the diff witht he first merge parent
+check that the merge tried to minimize the diff with the first merge parent
$ hg diff --git -r 'p1()' .hgtags
diff --git a/.hgtags b/.hgtags
--- a/tests/test-transplant.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-transplant.t Tue Dec 09 13:32:19 2014 -0600
@@ -230,7 +230,8 @@
(transplanted from e234d668f844e1b1a765f01db83a32c0c7bfa170)
1 r2
0 r1
-remote transplant
+remote transplant, and also test that transplant doesn't break with
+format-breaking diffopts
$ hg clone -r 1 ../t ../remote
adding changesets
@@ -240,7 +241,7 @@
updating to branch default
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ cd ../remote
- $ hg transplant --log -s ../t 2 4
+ $ hg --config diff.noprefix=True transplant --log -s ../t 2 4
searching for changes
applying 37a1297eb21b
37a1297eb21b transplanted to c19cf0ccb069
--- a/tests/test-up-local-change.t Mon Dec 08 15:41:54 2014 -0800
+++ b/tests/test-up-local-change.t Tue Dec 09 13:32:19 2014 -0600
@@ -83,10 +83,6 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: 1
- $ hg --debug merge
- abort: nothing to merge
- (use 'hg update' instead)
- [255]
$ hg parents
changeset: 0:c19d34741b0a
user: test
@@ -170,52 +166,6 @@
abort: uncommitted changes
(commit and merge, or update --clean to discard changes)
[255]
- $ hg --debug merge
- abort: uncommitted changes
- (use 'hg status' to list changes)
- [255]
- $ hg --debug merge -f
- searching for copies back to rev 1
- unmatched files new in both:
- b
- resolving manifests
- branchmerge: True, force: True, partial: False
- ancestor: c19d34741b0a, local: 1e71731e6fbb+, remote: 83c51d0caff4
- preserving a for resolve of a
- preserving b for resolve of b
- a: versions differ -> m
- updating: a 1/2 files (50.00%)
- picked tool 'true' for a (binary False symlink False)
- merging a
- my a@1e71731e6fbb+ other a@83c51d0caff4 ancestor a@c19d34741b0a
- b: versions differ -> m
- updating: b 2/2 files (100.00%)
- picked tool 'true' for b (binary False symlink False)
- merging b
- my b@1e71731e6fbb+ other b@83c51d0caff4 ancestor b@000000000000
- 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
- (branch merge, don't forget to commit)
- $ hg parents
- changeset: 1:1e71731e6fbb
- user: test
- date: Thu Jan 01 00:00:00 1970 +0000
- summary: 2
-
- changeset: 2:83c51d0caff4
- tag: tip
- parent: 0:c19d34741b0a
- user: test
- date: Thu Jan 01 00:00:00 1970 +0000
- summary: 3
-
- $ hg diff --nodates
- diff -r 1e71731e6fbb a
- --- a/a
- +++ b/a
- @@ -1,1 +1,1 @@
- -a2
- +abc
-
test conflicting untracked files