changeset 31119:13bbcd56c57a

merge with stable
author Martin von Zweigbergk <martinvonz@google.com>
date Tue, 28 Feb 2017 11:13:25 -0800
parents 6483e49204ee (diff) a91c62752d08 (current diff)
children c4e8fa2b1c40
files mercurial/ui.py mercurial/worker.py
diffstat 196 files changed, 10885 insertions(+), 5126 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Sat Feb 25 12:48:50 2017 +0900
+++ b/Makefile	Tue Feb 28 11:13:25 2017 -0800
@@ -262,5 +262,9 @@
 .PHONY: help all local build doc cleanbutpackages clean install install-bin \
 	install-doc install-home install-home-bin install-home-doc \
 	dist dist-notests check tests check-code update-pot \
-	osx fedora20 docker-fedora20 fedora21 docker-fedora21 \
+	osx deb ppa docker-debian-jessie \
+	docker-ubuntu-trusty docker-ubuntu-trusty-ppa \
+	docker-ubuntu-xenial docker-ubuntu-xenial-ppa \
+	docker-ubuntu-yakkety docker-ubuntu-yakkety-ppa \
+	fedora20 docker-fedora20 fedora21 docker-fedora21 \
 	centos5 docker-centos5 centos6 docker-centos6 centos7 docker-centos7
--- a/contrib/check-code.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/check-code.py	Tue Feb 28 11:13:25 2017 -0800
@@ -237,7 +237,7 @@
     (r'lambda\s*\(.*,.*\)',
      "tuple parameter unpacking not available in Python 3+"),
     (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
-    (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
+    (r'(?<!\.)\breduce\s*\(.*', "reduce is not available in Python 3+"),
     (r'\bdict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
      'dict-from-generator'),
     (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
--- a/contrib/chg/chg.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/chg/chg.c	Tue Feb 28 11:13:25 2017 -0800
@@ -128,6 +128,24 @@
 		abortmsg("insecure sockdir %s", sockdir);
 }
 
+/*
+ * Check if a socket directory exists and is only owned by the current user.
+ * Return 1 if so, 0 if not. This is used to check if XDG_RUNTIME_DIR can be
+ * used or not. According to the specification [1], XDG_RUNTIME_DIR should be
+ * ignored if the directory is not owned by the user with mode 0700.
+ * [1]: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+ */
+static int checkruntimedir(const char *sockdir)
+{
+	struct stat st;
+	int r = lstat(sockdir, &st);
+	if (r < 0) /* ex. does not exist */
+		return 0;
+	if (!S_ISDIR(st.st_mode)) /* ex. is a file, not a directory */
+		return 0;
+	return st.st_uid == geteuid() && (st.st_mode & 0777) == 0700;
+}
+
 static void getdefaultsockdir(char sockdir[], size_t size)
 {
 	/* by default, put socket file in secure directory
@@ -135,7 +153,7 @@
 	 * (permission of socket file may be ignored on some Unices) */
 	const char *runtimedir = getenv("XDG_RUNTIME_DIR");
 	int r;
-	if (runtimedir) {
+	if (runtimedir && checkruntimedir(runtimedir)) {
 		r = snprintf(sockdir, size, "%s/chg", runtimedir);
 	} else {
 		const char *tmpdir = getenv("TMPDIR");
--- a/contrib/hgperf	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/hgperf	Tue Feb 28 11:13:25 2017 -0800
@@ -55,17 +55,15 @@
 import mercurial.util
 import mercurial.dispatch
 
-import time
-
 def timer(func, title=None):
     results = []
-    begin = time.time()
+    begin = mercurial.util.timer()
     count = 0
     while True:
         ostart = os.times()
-        cstart = time.time()
+        cstart = mercurial.util.timer()
         r = func()
-        cstop = time.time()
+        cstop = mercurial.util.timer()
         ostop = os.times()
         count += 1
         a, b = ostart, ostop
--- a/contrib/perf.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/perf.py	Tue Feb 28 11:13:25 2017 -0800
@@ -190,13 +190,13 @@
 
 def _timer(fm, func, title=None):
     results = []
-    begin = time.time()
+    begin = util.timer()
     count = 0
     while True:
         ostart = os.times()
-        cstart = time.time()
+        cstart = util.timer()
         r = func()
-        cstop = time.time()
+        cstop = util.timer()
         ostop = os.times()
         count += 1
         a, b = ostart, ostop
@@ -993,6 +993,26 @@
     node = r.lookup(rev)
     rev = r.rev(node)
 
+    def getrawchunks(data, chain):
+        start = r.start
+        length = r.length
+        inline = r._inline
+        iosize = r._io.size
+        buffer = util.buffer
+        offset = start(chain[0])
+
+        chunks = []
+        ladd = chunks.append
+
+        for rev in chain:
+            chunkstart = start(rev)
+            if inline:
+                chunkstart += (rev + 1) * iosize
+            chunklength = length(rev)
+            ladd(buffer(data, chunkstart - offset, chunklength))
+
+        return chunks
+
     def dodeltachain(rev):
         if not cache:
             r.clearcaches()
@@ -1003,24 +1023,15 @@
             r.clearcaches()
         r._chunkraw(chain[0], chain[-1])
 
-    def dodecompress(data, chain):
+    def dorawchunks(data, chain):
         if not cache:
             r.clearcaches()
-
-        start = r.start
-        length = r.length
-        inline = r._inline
-        iosize = r._io.size
-        buffer = util.buffer
-        offset = start(chain[0])
+        getrawchunks(data, chain)
 
-        for rev in chain:
-            chunkstart = start(rev)
-            if inline:
-                chunkstart += (rev + 1) * iosize
-            chunklength = length(rev)
-            b = buffer(data, chunkstart - offset, chunklength)
-            r.decompress(b)
+    def dodecompress(chunks):
+        decomp = r.decompress
+        for chunk in chunks:
+            decomp(chunk)
 
     def dopatch(text, bins):
         if not cache:
@@ -1039,6 +1050,7 @@
 
     chain = r._deltachain(rev)[0]
     data = r._chunkraw(chain[0], chain[-1])[1]
+    rawchunks = getrawchunks(data, chain)
     bins = r._chunks(chain)
     text = str(bins[0])
     bins = bins[1:]
@@ -1048,7 +1060,8 @@
         (lambda: dorevision(), 'full'),
         (lambda: dodeltachain(rev), 'deltachain'),
         (lambda: doread(chain), 'read'),
-        (lambda: dodecompress(data, chain), 'decompress'),
+        (lambda: dorawchunks(data, chain), 'rawchunks'),
+        (lambda: dodecompress(rawchunks), 'decompress'),
         (lambda: dopatch(text, bins), 'patch'),
         (lambda: dohash(text), 'hash'),
     ]
@@ -1256,6 +1269,17 @@
         timer(fn, title=title)
         fm.end()
 
+@command('perfwrite', formatteropts)
+def perfwrite(ui, repo, **opts):
+    """microbenchmark ui.write
+    """
+    timer, fm = gettimer(ui, opts)
+    def write():
+        for i in range(100000):
+            ui.write(('Testing write performance\n'))
+    timer(write)
+    fm.end()
+
 def uisetup(ui):
     if (util.safehasattr(cmdutil, 'openrevlog') and
         not util.safehasattr(commands, 'debugrevlogopts')):
--- a/contrib/python-zstandard/NEWS.rst	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/NEWS.rst	Tue Feb 28 11:13:25 2017 -0800
@@ -1,6 +1,33 @@
 Version History
 ===============
 
+0.7.0 (released 2017-02-07)
+---------------------------
+
+* Added zstd.get_frame_parameters() to obtain info about a zstd frame.
+* Added ZstdDecompressor.decompress_content_dict_chain() for efficient
+  decompression of *content-only dictionary chains*.
+* CFFI module fully implemented; all tests run against both C extension and
+  CFFI implementation.
+* Vendored version of zstd updated to 1.1.3.
+* Use ZstdDecompressor.decompress() now uses ZSTD_createDDict_byReference()
+  to avoid extra memory allocation of dict data.
+* Add function names to error messages (by using ":name" in PyArg_Parse*
+  functions).
+* Reuse decompression context across operations. Previously, we created a
+  new ZSTD_DCtx for each decompress(). This was measured to slow down
+  decompression by 40-200MB/s. The API guarantees say ZstdDecompressor
+  is not thread safe. So we reuse the ZSTD_DCtx across operations and make
+  things faster in the process.
+* ZstdCompressor.write_to()'s compress() and flush() methods now return number
+  of bytes written.
+* ZstdDecompressor.write_to()'s write() method now returns the number of bytes
+  written to the underlying output object.
+* CompressionParameters instances now expose their values as attributes.
+* CompressionParameters instances no longer are subscriptable nor behave
+  as tuples (backwards incompatible). Use attributes to obtain values.
+* DictParameters instances now expose their values as attributes.
+
 0.6.0 (released 2017-01-14)
 ---------------------------
 
--- a/contrib/python-zstandard/README.rst	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/README.rst	Tue Feb 28 11:13:25 2017 -0800
@@ -4,10 +4,11 @@
 
 This project provides Python bindings for interfacing with the
 `Zstandard <http://www.zstd.net>`_ compression library. A C extension
-and CFFI interface is provided.
+and CFFI interface are provided.
 
-The primary goal of the extension is to provide a Pythonic interface to
-the underlying C API. This means exposing most of the features and flexibility
+The primary goal of the project is to provide a rich interface to the
+underlying C API through a Pythonic interface while not sacrificing
+performance. This means exposing most of the features and flexibility
 of the C API while not sacrificing usability or safety that Python provides.
 
 The canonical home for this project is
@@ -23,6 +24,9 @@
 may be some backwards incompatible changes before 1.0. Though the author
 does not intend to make any major changes to the Python API.
 
+This project is vendored and distributed with Mercurial 4.1, where it is
+used in a production capacity.
+
 There is continuous integration for Python versions 2.6, 2.7, and 3.3+
 on Linux x86_x64 and Windows x86 and x86_64. The author is reasonably
 confident the extension is stable and works as advertised on these
@@ -48,14 +52,15 @@
 support compression without the framing headers. But the author doesn't
 believe it a high priority at this time.
 
-The CFFI bindings are half-baked and need to be finished.
+The CFFI bindings are feature complete and all tests run against both
+the C extension and CFFI bindings to ensure behavior parity.
 
 Requirements
 ============
 
-This extension is designed to run with Python 2.6, 2.7, 3.3, 3.4, and 3.5
-on common platforms (Linux, Windows, and OS X). Only x86_64 is currently
-well-tested as an architecture.
+This extension is designed to run with Python 2.6, 2.7, 3.3, 3.4, 3.5, and
+3.6 on common platforms (Linux, Windows, and OS X). Only x86_64 is
+currently well-tested as an architecture.
 
 Installing
 ==========
@@ -106,15 +111,11 @@
 Comparison to Other Python Bindings
 ===================================
 
-https://pypi.python.org/pypi/zstd is an alternative Python binding to
+https://pypi.python.org/pypi/zstd is an alternate Python binding to
 Zstandard. At the time this was written, the latest release of that
-package (1.0.0.2) had the following significant differences from this package:
-
-* It only exposes the simple API for compression and decompression operations.
-  This extension exposes the streaming API, dictionary training, and more.
-* It adds a custom framing header to compressed data and there is no way to
-  disable it. This means that data produced with that module cannot be used by
-  other Zstandard implementations.
+package (1.1.2) only exposed the simple APIs for compression and decompression.
+This package exposes much more of the zstd API, including streaming and
+dictionary compression. This package also has CFFI support.
 
 Bundling of Zstandard Source Code
 =================================
@@ -260,6 +261,10 @@
 compressor's internal state into the output object. This may result in 0 or
 more ``write()`` calls to the output object.
 
+Both ``write()`` and ``flush()`` return the number of bytes written to the
+object's ``write()``. In many cases, small inputs do not accumulate enough
+data to cause a write and ``write()`` will return ``0``.
+
 If the size of the data being fed to this streaming compressor is known,
 you can declare it before compression begins::
 
@@ -476,6 +481,10 @@
 the decompressor by calling ``write(data)`` and decompressed output is written
 to the output object by calling its ``write(data)`` method.
 
+Calls to ``write()`` will return the number of bytes written to the output
+object. Not all inputs will result in bytes being written, so return values
+of ``0`` are possible.
+
 The size of chunks being ``write()`` to the destination can be specified::
 
     dctx = zstd.ZstdDecompressor()
@@ -576,6 +585,53 @@
    data = dobj.decompress(compressed_chunk_0)
    data = dobj.decompress(compressed_chunk_1)
 
+Content-Only Dictionary Chain Decompression
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``decompress_content_dict_chain(frames)`` performs decompression of a list of
+zstd frames produced using chained *content-only* dictionary compression. Such
+a list of frames is produced by compressing discrete inputs where each
+non-initial input is compressed with a *content-only* dictionary consisting
+of the content of the previous input.
+
+For example, say you have the following inputs::
+
+   inputs = [b'input 1', b'input 2', b'input 3']
+
+The zstd frame chain consists of:
+
+1. ``b'input 1'`` compressed in standalone/discrete mode
+2. ``b'input 2'`` compressed using ``b'input 1'`` as a *content-only* dictionary
+3. ``b'input 3'`` compressed using ``b'input 2'`` as a *content-only* dictionary
+
+Each zstd frame **must** have the content size written.
+
+The following Python code can be used to produce a *content-only dictionary
+chain*::
+
+	def make_chain(inputs):
+	    frames = []
+
+		# First frame is compressed in standalone/discrete mode.
+		zctx = zstd.ZstdCompressor(write_content_size=True)
+		frames.append(zctx.compress(inputs[0]))
+
+		# Subsequent frames use the previous fulltext as a content-only dictionary
+		for i, raw in enumerate(inputs[1:]):
+		    dict_data = zstd.ZstdCompressionDict(inputs[i])
+			zctx = zstd.ZstdCompressor(write_content_size=True, dict_data=dict_data)
+			frames.append(zctx.compress(raw))
+
+		return frames
+
+``decompress_content_dict_chain()`` returns the uncompressed data of the last
+element in the input chain.
+
+It is possible to implement *content-only dictionary chain* decompression
+on top of other Python APIs. However, this function will likely be significantly
+faster, especially for long input chains, as it avoids the overhead of
+instantiating and passing around intermediate objects between C and Python.
+
 Choosing an API
 ---------------
 
@@ -634,6 +690,13 @@
 
    dict_data = zstd.ZstdCompressionDict(data)
 
+It is possible to construct a dictionary from *any* data. Unless the
+data begins with a magic header, the dictionary will be treated as
+*content-only*. *Content-only* dictionaries allow compression operations
+that follow to reference raw data within the content. For one use of
+*content-only* dictionaries, see
+``ZstdDecompressor.decompress_content_dict_chain()``.
+
 More interestingly, instances can be created by *training* on sample data::
 
    dict_data = zstd.train_dictionary(size, samples)
@@ -700,19 +763,57 @@
 
     cctx = zstd.ZstdCompressor(compression_params=params)
 
-The members of the ``CompressionParameters`` tuple are as follows::
+The members/attributes of ``CompressionParameters`` instances are as follows::
 
-* 0 - Window log
-* 1 - Chain log
-* 2 - Hash log
-* 3 - Search log
-* 4 - Search length
-* 5 - Target length
-* 6 - Strategy (one of the ``zstd.STRATEGY_`` constants)
+* window_log
+* chain_log
+* hash_log
+* search_log
+* search_length
+* target_length
+* strategy
+
+This is the order the arguments are passed to the constructor if not using
+named arguments.
 
 You'll need to read the Zstandard documentation for what these parameters
 do.
 
+Frame Inspection
+----------------
+
+Data emitted from zstd compression is encapsulated in a *frame*. This frame
+begins with a 4 byte *magic number* header followed by 2 to 14 bytes describing
+the frame in more detail. For more info, see
+https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md.
+
+``zstd.get_frame_parameters(data)`` parses a zstd *frame* header from a bytes
+instance and return a ``FrameParameters`` object describing the frame.
+
+Depending on which fields are present in the frame and their values, the
+length of the frame parameters varies. If insufficient bytes are passed
+in to fully parse the frame parameters, ``ZstdError`` is raised. To ensure
+frame parameters can be parsed, pass in at least 18 bytes.
+
+``FrameParameters`` instances have the following attributes:
+
+content_size
+   Integer size of original, uncompressed content. This will be ``0`` if the
+   original content size isn't written to the frame (controlled with the
+   ``write_content_size`` argument to ``ZstdCompressor``) or if the input
+   content size was ``0``.
+
+window_size
+   Integer size of maximum back-reference distance in compressed data.
+
+dict_id
+   Integer of dictionary ID used for compression. ``0`` if no dictionary
+   ID was used or if the dictionary ID was ``0``.
+
+has_checksum
+   Bool indicating whether a 4 byte content checksum is stored at the end
+   of the frame.
+
 Misc Functionality
 ------------------
 
@@ -776,19 +877,32 @@
 TARGETLENGTH_MAX
     Maximum value for compression parameter
 STRATEGY_FAST
-    Compression strategory
+    Compression strategy
 STRATEGY_DFAST
-    Compression strategory
+    Compression strategy
 STRATEGY_GREEDY
-    Compression strategory
+    Compression strategy
 STRATEGY_LAZY
-    Compression strategory
+    Compression strategy
 STRATEGY_LAZY2
-    Compression strategory
+    Compression strategy
 STRATEGY_BTLAZY2
-    Compression strategory
+    Compression strategy
 STRATEGY_BTOPT
-    Compression strategory
+    Compression strategy
+
+Performance Considerations
+--------------------------
+
+The ``ZstdCompressor`` and ``ZstdDecompressor`` types maintain state to a
+persistent compression or decompression *context*. Reusing a ``ZstdCompressor``
+or ``ZstdDecompressor`` instance for multiple operations is faster than
+instantiating a new ``ZstdCompressor`` or ``ZstdDecompressor`` for each
+operation. The differences are magnified as the size of data decreases. For
+example, the difference between *context* reuse and non-reuse for 100,000
+100 byte inputs will be significant (possiby over 10x faster to reuse contexts)
+whereas 10 1,000,000 byte inputs will be more similar in speed (because the
+time spent doing compression dwarfs time spent creating new *contexts*).
 
 Note on Zstandard's *Experimental* API
 ======================================
--- a/contrib/python-zstandard/c-ext/compressiondict.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/compressiondict.c	Tue Feb 28 11:13:25 2017 -0800
@@ -28,7 +28,8 @@
 	void* dict;
 	ZstdCompressionDict* result;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "nO!|O!", kwlist,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "nO!|O!:train_dictionary",
+		kwlist,
 		&capacity,
 		&PyList_Type, &samples,
 		(PyObject*)&DictParametersType, &parameters)) {
@@ -57,7 +58,6 @@
 		sampleItem = PyList_GetItem(samples, sampleIndex);
 		if (!PyBytes_Check(sampleItem)) {
 			PyErr_SetString(PyExc_ValueError, "samples must be bytes");
-			/* TODO probably need to perform DECREF here */
 			return NULL;
 		}
 		samplesSize += PyBytes_GET_SIZE(sampleItem);
@@ -133,10 +133,11 @@
 	self->dictSize = 0;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "y#:ZstdCompressionDict",
 #else
-	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "s#:ZstdCompressionDict",
 #endif
+		&source, &sourceSize)) {
 		return -1;
 	}
 
--- a/contrib/python-zstandard/c-ext/compressionparams.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/compressionparams.c	Tue Feb 28 11:13:25 2017 -0800
@@ -25,7 +25,8 @@
 	ZSTD_compressionParameters params;
 	CompressionParametersObject* result;
 
-	if (!PyArg_ParseTuple(args, "i|Kn", &compressionLevel, &sourceSize, &dictSize)) {
+	if (!PyArg_ParseTuple(args, "i|Kn:get_compression_parameters",
+		&compressionLevel, &sourceSize, &dictSize)) {
 		return NULL;
 	}
 
@@ -47,12 +48,85 @@
 	return result;
 }
 
+static int CompressionParameters_init(CompressionParametersObject* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"window_log",
+		"chain_log",
+		"hash_log",
+		"search_log",
+		"search_length",
+		"target_length",
+		"strategy",
+		NULL
+	};
+
+	unsigned windowLog;
+	unsigned chainLog;
+	unsigned hashLog;
+	unsigned searchLog;
+	unsigned searchLength;
+	unsigned targetLength;
+	unsigned strategy;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "IIIIIII:CompressionParameters",
+		kwlist, &windowLog, &chainLog, &hashLog, &searchLog, &searchLength,
+		&targetLength, &strategy)) {
+		return -1;
+	}
+
+	if (windowLog < ZSTD_WINDOWLOG_MIN || windowLog > ZSTD_WINDOWLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid window log value");
+		return -1;
+	}
+
+	if (chainLog < ZSTD_CHAINLOG_MIN || chainLog > ZSTD_CHAINLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid chain log value");
+		return -1;
+	}
+
+	if (hashLog < ZSTD_HASHLOG_MIN || hashLog > ZSTD_HASHLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid hash log value");
+		return -1;
+	}
+
+	if (searchLog < ZSTD_SEARCHLOG_MIN || searchLog > ZSTD_SEARCHLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid search log value");
+		return -1;
+	}
+
+	if (searchLength < ZSTD_SEARCHLENGTH_MIN || searchLength > ZSTD_SEARCHLENGTH_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid search length value");
+		return -1;
+	}
+
+	if (targetLength < ZSTD_TARGETLENGTH_MIN || targetLength > ZSTD_TARGETLENGTH_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid target length value");
+		return -1;
+	}
+
+	if (strategy < ZSTD_fast || strategy > ZSTD_btopt) {
+		PyErr_SetString(PyExc_ValueError, "invalid strategy value");
+		return -1;
+	}
+
+	self->windowLog = windowLog;
+	self->chainLog = chainLog;
+	self->hashLog = hashLog;
+	self->searchLog = searchLog;
+	self->searchLength = searchLength;
+	self->targetLength = targetLength;
+	self->strategy = strategy;
+
+	return 0;
+}
+
 PyObject* estimate_compression_context_size(PyObject* self, PyObject* args) {
 	CompressionParametersObject* params;
 	ZSTD_compressionParameters zparams;
 	PyObject* result;
 
-	if (!PyArg_ParseTuple(args, "O!", &CompressionParametersType, &params)) {
+	if (!PyArg_ParseTuple(args, "O!:estimate_compression_context_size",
+		&CompressionParametersType, &params)) {
 		return NULL;
 	}
 
@@ -64,113 +138,33 @@
 PyDoc_STRVAR(CompressionParameters__doc__,
 "CompressionParameters: low-level control over zstd compression");
 
-static PyObject* CompressionParameters_new(PyTypeObject* subtype, PyObject* args, PyObject* kwargs) {
-	CompressionParametersObject* self;
-	unsigned windowLog;
-	unsigned chainLog;
-	unsigned hashLog;
-	unsigned searchLog;
-	unsigned searchLength;
-	unsigned targetLength;
-	unsigned strategy;
-
-	if (!PyArg_ParseTuple(args, "IIIIIII", &windowLog, &chainLog, &hashLog, &searchLog,
-		&searchLength, &targetLength, &strategy)) {
-		return NULL;
-	}
-
-	if (windowLog < ZSTD_WINDOWLOG_MIN || windowLog > ZSTD_WINDOWLOG_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid window log value");
-		return NULL;
-	}
-
-	if (chainLog < ZSTD_CHAINLOG_MIN || chainLog > ZSTD_CHAINLOG_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid chain log value");
-		return NULL;
-	}
-
-	if (hashLog < ZSTD_HASHLOG_MIN || hashLog > ZSTD_HASHLOG_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid hash log value");
-		return NULL;
-	}
-
-	if (searchLog < ZSTD_SEARCHLOG_MIN || searchLog > ZSTD_SEARCHLOG_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid search log value");
-		return NULL;
-	}
-
-	if (searchLength < ZSTD_SEARCHLENGTH_MIN || searchLength > ZSTD_SEARCHLENGTH_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid search length value");
-		return NULL;
-	}
-
-	if (targetLength < ZSTD_TARGETLENGTH_MIN || targetLength > ZSTD_TARGETLENGTH_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid target length value");
-		return NULL;
-	}
-
-	if (strategy < ZSTD_fast || strategy > ZSTD_btopt) {
-		PyErr_SetString(PyExc_ValueError, "invalid strategy value");
-		return NULL;
-	}
-
-	self = (CompressionParametersObject*)subtype->tp_alloc(subtype, 1);
-	if (!self) {
-		return NULL;
-	}
-
-	self->windowLog = windowLog;
-	self->chainLog = chainLog;
-	self->hashLog = hashLog;
-	self->searchLog = searchLog;
-	self->searchLength = searchLength;
-	self->targetLength = targetLength;
-	self->strategy = strategy;
-
-	return (PyObject*)self;
-}
-
 static void CompressionParameters_dealloc(PyObject* self) {
 	PyObject_Del(self);
 }
 
-static Py_ssize_t CompressionParameters_length(PyObject* self) {
-	return 7;
-}
-
-static PyObject* CompressionParameters_item(PyObject* o, Py_ssize_t i) {
-	CompressionParametersObject* self = (CompressionParametersObject*)o;
-
-	switch (i) {
-	case 0:
-		return PyLong_FromLong(self->windowLog);
-	case 1:
-		return PyLong_FromLong(self->chainLog);
-	case 2:
-		return PyLong_FromLong(self->hashLog);
-	case 3:
-		return PyLong_FromLong(self->searchLog);
-	case 4:
-		return PyLong_FromLong(self->searchLength);
-	case 5:
-		return PyLong_FromLong(self->targetLength);
-	case 6:
-		return PyLong_FromLong(self->strategy);
-	default:
-		PyErr_SetString(PyExc_IndexError, "index out of range");
-		return NULL;
-	}
-}
-
-static PySequenceMethods CompressionParameters_sq = {
-	CompressionParameters_length, /* sq_length */
-	0,							  /* sq_concat */
-	0,                            /* sq_repeat */
-	CompressionParameters_item,   /* sq_item */
-	0,                            /* sq_ass_item */
-	0,                            /* sq_contains */
-	0,                            /* sq_inplace_concat */
-	0                             /* sq_inplace_repeat */
+static PyMemberDef CompressionParameters_members[] = {
+	{ "window_log", T_UINT,
+	  offsetof(CompressionParametersObject, windowLog), READONLY,
+	  "window log" },
+	{ "chain_log", T_UINT,
+	  offsetof(CompressionParametersObject, chainLog), READONLY,
+	  "chain log" },
+	{ "hash_log", T_UINT,
+	  offsetof(CompressionParametersObject, hashLog), READONLY,
+	  "hash log" },
+	{ "search_log", T_UINT,
+	  offsetof(CompressionParametersObject, searchLog), READONLY,
+	  "search log" },
+	{ "search_length", T_UINT,
+	  offsetof(CompressionParametersObject, searchLength), READONLY,
+	  "search length" },
+	{ "target_length", T_UINT,
+	  offsetof(CompressionParametersObject, targetLength), READONLY,
+	  "target length" },
+	{ "strategy", T_INT,
+	  offsetof(CompressionParametersObject, strategy), READONLY,
+	  "strategy" },
+	{ NULL }
 };
 
 PyTypeObject CompressionParametersType = {
@@ -185,7 +179,7 @@
 	0,                         /* tp_compare */
 	0,                         /* tp_repr */
 	0,                         /* tp_as_number */
-	&CompressionParameters_sq, /* tp_as_sequence */
+	0,                         /* tp_as_sequence */
 	0,                         /* tp_as_mapping */
 	0,                         /* tp_hash  */
 	0,                         /* tp_call */
@@ -193,7 +187,7 @@
 	0,                         /* tp_getattro */
 	0,                         /* tp_setattro */
 	0,                         /* tp_as_buffer */
-	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
 	CompressionParameters__doc__, /* tp_doc */
 	0,                         /* tp_traverse */
 	0,                         /* tp_clear */
@@ -202,16 +196,16 @@
 	0,                         /* tp_iter */
 	0,                         /* tp_iternext */
 	0,                         /* tp_methods */
-	0,                         /* tp_members */
+	CompressionParameters_members, /* tp_members */
 	0,                         /* tp_getset */
 	0,                         /* tp_base */
 	0,                         /* tp_dict */
 	0,                         /* tp_descr_get */
 	0,                         /* tp_descr_set */
 	0,                         /* tp_dictoffset */
-	0,                         /* tp_init */
+	(initproc)CompressionParameters_init, /* tp_init */
 	0,                         /* tp_alloc */
-	CompressionParameters_new, /* tp_new */
+	PyType_GenericNew,         /* tp_new */
 };
 
 void compressionparams_module_init(PyObject* mod) {
--- a/contrib/python-zstandard/c-ext/compressionwriter.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/compressionwriter.c	Tue Feb 28 11:13:25 2017 -0800
@@ -52,7 +52,7 @@
 	ZSTD_outBuffer output;
 	PyObject* res;
 
-	if (!PyArg_ParseTuple(args, "OOO", &exc_type, &exc_value, &exc_tb)) {
+	if (!PyArg_ParseTuple(args, "OOO:__exit__", &exc_type, &exc_value, &exc_tb)) {
 		return NULL;
 	}
 
@@ -119,11 +119,12 @@
 	ZSTD_inBuffer input;
 	ZSTD_outBuffer output;
 	PyObject* res;
+	Py_ssize_t totalWrite = 0;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "y#:write", &source, &sourceSize)) {
 #else
-	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "s#:write", &source, &sourceSize)) {
 #endif
 		return NULL;
 	}
@@ -164,20 +165,21 @@
 #endif
 				output.dst, output.pos);
 			Py_XDECREF(res);
+			totalWrite += output.pos;
 		}
 		output.pos = 0;
 	}
 
 	PyMem_Free(output.dst);
 
-	/* TODO return bytes written */
-	Py_RETURN_NONE;
+	return PyLong_FromSsize_t(totalWrite);
 }
 
 static PyObject* ZstdCompressionWriter_flush(ZstdCompressionWriter* self, PyObject* args) {
 	size_t zresult;
 	ZSTD_outBuffer output;
 	PyObject* res;
+	Py_ssize_t totalWrite = 0;
 
 	if (!self->entered) {
 		PyErr_SetString(ZstdError, "flush must be called from an active context manager");
@@ -215,14 +217,14 @@
 #endif
 				output.dst, output.pos);
 			Py_XDECREF(res);
+			totalWrite += output.pos;
 		}
 		output.pos = 0;
 	}
 
 	PyMem_Free(output.dst);
 
-	/* TODO return bytes written */
-	Py_RETURN_NONE;
+	return PyLong_FromSsize_t(totalWrite);
 }
 
 static PyMethodDef ZstdCompressionWriter_methods[] = {
--- a/contrib/python-zstandard/c-ext/compressobj.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/compressobj.c	Tue Feb 28 11:13:25 2017 -0800
@@ -42,9 +42,9 @@
 	}
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "y#:compress", &source, &sourceSize)) {
 #else
-	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "s#:compress", &source, &sourceSize)) {
 #endif
 		return NULL;
 	}
@@ -98,7 +98,7 @@
 	PyObject* result = NULL;
 	Py_ssize_t resultSize = 0;
 
-	if (!PyArg_ParseTuple(args, "|i", &flushMode)) {
+	if (!PyArg_ParseTuple(args, "|i:flush", &flushMode)) {
 		return NULL;
 	}
 
--- a/contrib/python-zstandard/c-ext/compressor.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/compressor.c	Tue Feb 28 11:13:25 2017 -0800
@@ -16,7 +16,7 @@
 	Py_BEGIN_ALLOW_THREADS
 	memset(&zmem, 0, sizeof(zmem));
 	compressor->cdict = ZSTD_createCDict_advanced(compressor->dict->dictData,
-		compressor->dict->dictSize, *zparams, zmem);
+		compressor->dict->dictSize, 1, *zparams, zmem);
 	Py_END_ALLOW_THREADS
 
 	if (!compressor->cdict) {
@@ -128,8 +128,8 @@
 	self->cparams = NULL;
 	self->cdict = NULL;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO!O!OOO", kwlist,
-		&level, &ZstdCompressionDictType, &dict,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO!O!OOO:ZstdCompressor",
+		kwlist,	&level, &ZstdCompressionDictType, &dict,
 		&CompressionParametersType, &params,
 		&writeChecksum, &writeContentSize, &writeDictID)) {
 		return -1;
@@ -243,8 +243,8 @@
 	PyObject* totalReadPy;
 	PyObject* totalWritePy;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|nkk", kwlist, &source, &dest, &sourceSize,
-		&inSize, &outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|nkk:copy_stream", kwlist,
+		&source, &dest, &sourceSize, &inSize, &outSize)) {
 		return NULL;
 	}
 
@@ -402,9 +402,9 @@
 	ZSTD_parameters zparams;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|O",
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|O:compress",
 #else
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|O",
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|O:compress",
 #endif
 		kwlist, &source, &sourceSize, &allowEmpty)) {
 		return NULL;
@@ -512,7 +512,7 @@
 		return NULL;
 	}
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|n", kwlist, &inSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|n:compressobj", kwlist, &inSize)) {
 		return NULL;
 	}
 
@@ -574,8 +574,8 @@
 	size_t outSize = ZSTD_CStreamOutSize();
 	ZstdCompressorIterator* result;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nkk", kwlist, &reader, &sourceSize,
-		&inSize, &outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nkk:read_from", kwlist,
+		&reader, &sourceSize, &inSize, &outSize)) {
 		return NULL;
 	}
 
@@ -693,8 +693,8 @@
 	Py_ssize_t sourceSize = 0;
 	size_t outSize = ZSTD_CStreamOutSize();
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nk", kwlist, &writer, &sourceSize,
-		&outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nk:write_to", kwlist,
+		&writer, &sourceSize, &outSize)) {
 		return NULL;
 	}
 
--- a/contrib/python-zstandard/c-ext/decompressionwriter.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/decompressionwriter.c	Tue Feb 28 11:13:25 2017 -0800
@@ -71,11 +71,12 @@
 	ZSTD_inBuffer input;
 	ZSTD_outBuffer output;
 	PyObject* res;
+	Py_ssize_t totalWrite = 0;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "y#:write", &source, &sourceSize)) {
 #else
-	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "s#:write", &source, &sourceSize)) {
 #endif
 		return NULL;
 	}
@@ -116,15 +117,15 @@
 #endif
 				output.dst, output.pos);
 			Py_XDECREF(res);
+			totalWrite += output.pos;
 			output.pos = 0;
 		}
 	}
 
 	PyMem_Free(output.dst);
 
-	/* TODO return bytes written */
-	Py_RETURN_NONE;
-	}
+	return PyLong_FromSsize_t(totalWrite);
+}
 
 static PyMethodDef ZstdDecompressionWriter_methods[] = {
 	{ "__enter__", (PyCFunction)ZstdDecompressionWriter_enter, METH_NOARGS,
--- a/contrib/python-zstandard/c-ext/decompressobj.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/decompressobj.c	Tue Feb 28 11:13:25 2017 -0800
@@ -41,9 +41,9 @@
 	}
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#",
+	if (!PyArg_ParseTuple(args, "y#:decompress",
 #else
-	if (!PyArg_ParseTuple(args, "s#",
+	if (!PyArg_ParseTuple(args, "s#:decompress",
 #endif
 		&source, &sourceSize)) {
 		return NULL;
--- a/contrib/python-zstandard/c-ext/decompressor.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/decompressor.c	Tue Feb 28 11:13:25 2017 -0800
@@ -59,23 +59,19 @@
 
 	ZstdCompressionDict* dict = NULL;
 
-	self->refdctx = NULL;
+	self->dctx = NULL;
 	self->dict = NULL;
 	self->ddict = NULL;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!", kwlist,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!:ZstdDecompressor", kwlist,
 		&ZstdCompressionDictType, &dict)) {
 		return -1;
 	}
 
-	/* Instead of creating a ZSTD_DCtx for every decompression operation,
-	   we create an instance at object creation time and recycle it via
-	   ZSTD_copyDCTx() on each use. This means each use is a malloc+memcpy
-	   instead of a malloc+init. */
 	/* TODO lazily initialize the reference ZSTD_DCtx on first use since
 	   not instances of ZstdDecompressor will use a ZSTD_DCtx. */
-	self->refdctx = ZSTD_createDCtx();
-	if (!self->refdctx) {
+	self->dctx = ZSTD_createDCtx();
+	if (!self->dctx) {
 		PyErr_NoMemory();
 		goto except;
 	}
@@ -88,17 +84,17 @@
 	return 0;
 
 except:
-	if (self->refdctx) {
-		ZSTD_freeDCtx(self->refdctx);
-		self->refdctx = NULL;
+	if (self->dctx) {
+		ZSTD_freeDCtx(self->dctx);
+		self->dctx = NULL;
 	}
 
 	return -1;
 }
 
 static void Decompressor_dealloc(ZstdDecompressor* self) {
-	if (self->refdctx) {
-		ZSTD_freeDCtx(self->refdctx);
+	if (self->dctx) {
+		ZSTD_freeDCtx(self->dctx);
 	}
 
 	Py_XDECREF(self->dict);
@@ -150,8 +146,8 @@
 	PyObject* totalReadPy;
 	PyObject* totalWritePy;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|kk", kwlist, &source,
-		&dest, &inSize, &outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|kk:copy_stream", kwlist,
+		&source, &dest, &inSize, &outSize)) {
 		return NULL;
 	}
 
@@ -243,7 +239,7 @@
 	Py_DecRef(totalReadPy);
 	Py_DecRef(totalWritePy);
 
-	finally:
+finally:
 	if (output.dst) {
 		PyMem_Free(output.dst);
 	}
@@ -291,28 +287,19 @@
 	unsigned long long decompressedSize;
 	size_t destCapacity;
 	PyObject* result = NULL;
-	ZSTD_DCtx* dctx = NULL;
 	void* dictData = NULL;
 	size_t dictSize = 0;
 	size_t zresult;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|n", kwlist,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|n:decompress",
 #else
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|n", kwlist,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|n:decompress",
 #endif
-		&source, &sourceSize, &maxOutputSize)) {
+		kwlist, &source, &sourceSize, &maxOutputSize)) {
 		return NULL;
 	}
 
-	dctx = PyMem_Malloc(ZSTD_sizeof_DCtx(self->refdctx));
-	if (!dctx) {
-		PyErr_NoMemory();
-		return NULL;
-	}
-
-	ZSTD_copyDCtx(dctx, self->refdctx);
-
 	if (self->dict) {
 		dictData = self->dict->dictData;
 		dictSize = self->dict->dictSize;
@@ -320,12 +307,12 @@
 
 	if (dictData && !self->ddict) {
 		Py_BEGIN_ALLOW_THREADS
-		self->ddict = ZSTD_createDDict(dictData, dictSize);
+		self->ddict = ZSTD_createDDict_byReference(dictData, dictSize);
 		Py_END_ALLOW_THREADS
 
 		if (!self->ddict) {
 			PyErr_SetString(ZstdError, "could not create decompression dict");
-			goto except;
+			return NULL;
 		}
 	}
 
@@ -335,7 +322,7 @@
 		if (0 == maxOutputSize) {
 			PyErr_SetString(ZstdError, "input data invalid or missing content size "
 				"in frame header");
-			goto except;
+			return NULL;
 		}
 		else {
 			result = PyBytes_FromStringAndSize(NULL, maxOutputSize);
@@ -348,45 +335,39 @@
 	}
 
 	if (!result) {
-		goto except;
+		return NULL;
 	}
 
 	Py_BEGIN_ALLOW_THREADS
 	if (self->ddict) {
-		zresult = ZSTD_decompress_usingDDict(dctx, PyBytes_AsString(result), destCapacity,
+		zresult = ZSTD_decompress_usingDDict(self->dctx,
+			PyBytes_AsString(result), destCapacity,
 			source, sourceSize, self->ddict);
 	}
 	else {
-		zresult = ZSTD_decompressDCtx(dctx, PyBytes_AsString(result), destCapacity, source, sourceSize);
+		zresult = ZSTD_decompressDCtx(self->dctx,
+			PyBytes_AsString(result), destCapacity, source, sourceSize);
 	}
 	Py_END_ALLOW_THREADS
 
 	if (ZSTD_isError(zresult)) {
 		PyErr_Format(ZstdError, "decompression error: %s", ZSTD_getErrorName(zresult));
-		goto except;
+		Py_DecRef(result);
+		return NULL;
 	}
 	else if (decompressedSize && zresult != decompressedSize) {
 		PyErr_Format(ZstdError, "decompression error: decompressed %zu bytes; expected %llu",
 			zresult, decompressedSize);
-		goto except;
+		Py_DecRef(result);
+		return NULL;
 	}
 	else if (zresult < destCapacity) {
 		if (_PyBytes_Resize(&result, zresult)) {
-			goto except;
+			Py_DecRef(result);
+			return NULL;
 		}
 	}
 
-	goto finally;
-
-except:
-	Py_DecRef(result);
-	result = NULL;
-
-finally:
-	if (dctx) {
-		PyMem_FREE(dctx);
-	}
-
 	return result;
 }
 
@@ -455,8 +436,8 @@
 	ZstdDecompressorIterator* result;
 	size_t skipBytes = 0;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|kkk", kwlist, &reader,
-		&inSize, &outSize, &skipBytes)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|kkk:read_from", kwlist,
+		&reader, &inSize, &outSize, &skipBytes)) {
 		return NULL;
 	}
 
@@ -534,19 +515,14 @@
 	goto finally;
 
 except:
-	if (result->reader) {
-		Py_DECREF(result->reader);
-		result->reader = NULL;
-	}
+	Py_CLEAR(result->reader);
 
 	if (result->buffer) {
 		PyBuffer_Release(result->buffer);
-		Py_DECREF(result->buffer);
-		result->buffer = NULL;
+		Py_CLEAR(result->buffer);
 	}
 
-	Py_DECREF(result);
-	result = NULL;
+	Py_CLEAR(result);
 
 finally:
 
@@ -577,7 +553,8 @@
 	size_t outSize = ZSTD_DStreamOutSize();
 	ZstdDecompressionWriter* result;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|k", kwlist, &writer, &outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|k:write_to", kwlist,
+		&writer, &outSize)) {
 		return NULL;
 	}
 
@@ -605,6 +582,200 @@
 	return result;
 }
 
+PyDoc_STRVAR(Decompressor_decompress_content_dict_chain__doc__,
+"Decompress a series of chunks using the content dictionary chaining technique\n"
+);
+
+static PyObject* Decompressor_decompress_content_dict_chain(PyObject* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"frames",
+		NULL
+	};
+
+	PyObject* chunks;
+	Py_ssize_t chunksLen;
+	Py_ssize_t chunkIndex;
+	char parity = 0;
+	PyObject* chunk;
+	char* chunkData;
+	Py_ssize_t chunkSize;
+	ZSTD_DCtx* dctx = NULL;
+	size_t zresult;
+	ZSTD_frameParams frameParams;
+	void* buffer1 = NULL;
+	size_t buffer1Size = 0;
+	size_t buffer1ContentSize = 0;
+	void* buffer2 = NULL;
+	size_t buffer2Size = 0;
+	size_t buffer2ContentSize = 0;
+	void* destBuffer = NULL;
+	PyObject* result = NULL;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!:decompress_content_dict_chain",
+		kwlist, &PyList_Type, &chunks)) {
+		return NULL;
+	}
+
+	chunksLen = PyList_Size(chunks);
+	if (!chunksLen) {
+		PyErr_SetString(PyExc_ValueError, "empty input chain");
+		return NULL;
+	}
+
+	/* The first chunk should not be using a dictionary. We handle it specially. */
+	chunk = PyList_GetItem(chunks, 0);
+	if (!PyBytes_Check(chunk)) {
+		PyErr_SetString(PyExc_ValueError, "chunk 0 must be bytes");
+		return NULL;
+	}
+
+	/* We require that all chunks be zstd frames and that they have content size set. */
+	PyBytes_AsStringAndSize(chunk, &chunkData, &chunkSize);
+	zresult = ZSTD_getFrameParams(&frameParams, (void*)chunkData, chunkSize);
+	if (ZSTD_isError(zresult)) {
+		PyErr_SetString(PyExc_ValueError, "chunk 0 is not a valid zstd frame");
+		return NULL;
+	}
+	else if (zresult) {
+		PyErr_SetString(PyExc_ValueError, "chunk 0 is too small to contain a zstd frame");
+		return NULL;
+	}
+
+	if (0 == frameParams.frameContentSize) {
+		PyErr_SetString(PyExc_ValueError, "chunk 0 missing content size in frame");
+		return NULL;
+	}
+
+	dctx = ZSTD_createDCtx();
+	if (!dctx) {
+		PyErr_NoMemory();
+		goto finally;
+	}
+
+	buffer1Size = frameParams.frameContentSize;
+	buffer1 = PyMem_Malloc(buffer1Size);
+	if (!buffer1) {
+		goto finally;
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	zresult = ZSTD_decompressDCtx(dctx, buffer1, buffer1Size, chunkData, chunkSize);
+	Py_END_ALLOW_THREADS
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "could not decompress chunk 0: %s", ZSTD_getErrorName(zresult));
+		goto finally;
+	}
+
+	buffer1ContentSize = zresult;
+
+	/* Special case of a simple chain. */
+	if (1 == chunksLen) {
+		result = PyBytes_FromStringAndSize(buffer1, buffer1Size);
+		goto finally;
+	}
+
+	/* This should ideally look at next chunk. But this is slightly simpler. */
+	buffer2Size = frameParams.frameContentSize;
+	buffer2 = PyMem_Malloc(buffer2Size);
+	if (!buffer2) {
+		goto finally;
+	}
+
+	/* For each subsequent chunk, use the previous fulltext as a content dictionary.
+	   Our strategy is to have 2 buffers. One holds the previous fulltext (to be
+	   used as a content dictionary) and the other holds the new fulltext. The
+	   buffers grow when needed but never decrease in size. This limits the
+	   memory allocator overhead.
+	*/
+	for (chunkIndex = 1; chunkIndex < chunksLen; chunkIndex++) {
+		chunk = PyList_GetItem(chunks, chunkIndex);
+		if (!PyBytes_Check(chunk)) {
+			PyErr_Format(PyExc_ValueError, "chunk %zd must be bytes", chunkIndex);
+			goto finally;
+		}
+
+		PyBytes_AsStringAndSize(chunk, &chunkData, &chunkSize);
+		zresult = ZSTD_getFrameParams(&frameParams, (void*)chunkData, chunkSize);
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(PyExc_ValueError, "chunk %zd is not a valid zstd frame", chunkIndex);
+			goto finally;
+		}
+		else if (zresult) {
+			PyErr_Format(PyExc_ValueError, "chunk %zd is too small to contain a zstd frame", chunkIndex);
+			goto finally;
+		}
+
+		if (0 == frameParams.frameContentSize) {
+			PyErr_Format(PyExc_ValueError, "chunk %zd missing content size in frame", chunkIndex);
+			goto finally;
+		}
+
+		parity = chunkIndex % 2;
+
+		/* This could definitely be abstracted to reduce code duplication. */
+		if (parity) {
+			/* Resize destination buffer to hold larger content. */
+			if (buffer2Size < frameParams.frameContentSize) {
+				buffer2Size = frameParams.frameContentSize;
+				destBuffer = PyMem_Realloc(buffer2, buffer2Size);
+				if (!destBuffer) {
+					goto finally;
+				}
+				buffer2 = destBuffer;
+			}
+
+			Py_BEGIN_ALLOW_THREADS
+			zresult = ZSTD_decompress_usingDict(dctx, buffer2, buffer2Size,
+				chunkData, chunkSize, buffer1, buffer1ContentSize);
+			Py_END_ALLOW_THREADS
+			if (ZSTD_isError(zresult)) {
+				PyErr_Format(ZstdError, "could not decompress chunk %zd: %s",
+					chunkIndex, ZSTD_getErrorName(zresult));
+				goto finally;
+			}
+			buffer2ContentSize = zresult;
+		}
+		else {
+			if (buffer1Size < frameParams.frameContentSize) {
+				buffer1Size = frameParams.frameContentSize;
+				destBuffer = PyMem_Realloc(buffer1, buffer1Size);
+				if (!destBuffer) {
+					goto finally;
+				}
+				buffer1 = destBuffer;
+			}
+
+			Py_BEGIN_ALLOW_THREADS
+			zresult = ZSTD_decompress_usingDict(dctx, buffer1, buffer1Size,
+				chunkData, chunkSize, buffer2, buffer2ContentSize);
+			Py_END_ALLOW_THREADS
+			if (ZSTD_isError(zresult)) {
+				PyErr_Format(ZstdError, "could not decompress chunk %zd: %s",
+					chunkIndex, ZSTD_getErrorName(zresult));
+				goto finally;
+			}
+			buffer1ContentSize = zresult;
+		}
+	}
+
+	result = PyBytes_FromStringAndSize(parity ? buffer2 : buffer1,
+		parity ? buffer2ContentSize : buffer1ContentSize);
+
+finally:
+	if (buffer2) {
+		PyMem_Free(buffer2);
+	}
+	if (buffer1) {
+		PyMem_Free(buffer1);
+	}
+
+	if (dctx) {
+		ZSTD_freeDCtx(dctx);
+	}
+
+	return result;
+}
+
 static PyMethodDef Decompressor_methods[] = {
 	{ "copy_stream", (PyCFunction)Decompressor_copy_stream, METH_VARARGS | METH_KEYWORDS,
 	Decompressor_copy_stream__doc__ },
@@ -616,6 +787,8 @@
 	Decompressor_read_from__doc__ },
 	{ "write_to", (PyCFunction)Decompressor_write_to, METH_VARARGS | METH_KEYWORDS,
 	Decompressor_write_to__doc__ },
+	{ "decompress_content_dict_chain", (PyCFunction)Decompressor_decompress_content_dict_chain,
+	  METH_VARARGS | METH_KEYWORDS, Decompressor_decompress_content_dict_chain__doc__ },
 	{ NULL, NULL }
 };
 
--- a/contrib/python-zstandard/c-ext/dictparams.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/dictparams.c	Tue Feb 28 11:13:25 2017 -0800
@@ -18,8 +18,8 @@
 	unsigned notificationLevel;
 	unsigned dictID;
 
-	if (!PyArg_ParseTuple(args, "IiII", &selectivityLevel, &compressionLevel,
-		&notificationLevel, &dictID)) {
+	if (!PyArg_ParseTuple(args, "IiII:DictParameters",
+		&selectivityLevel, &compressionLevel, &notificationLevel, &dictID)) {
 		return NULL;
 	}
 
@@ -40,6 +40,22 @@
 	PyObject_Del(self);
 }
 
+static PyMemberDef DictParameters_members[] = {
+	{ "selectivity_level", T_UINT,
+	  offsetof(DictParametersObject, selectivityLevel), READONLY,
+	  "selectivity level" },
+	{ "compression_level", T_INT,
+	  offsetof(DictParametersObject, compressionLevel), READONLY,
+	  "compression level" },
+	{ "notification_level", T_UINT,
+	  offsetof(DictParametersObject, notificationLevel), READONLY,
+	  "notification level" },
+	{ "dict_id", T_UINT,
+	  offsetof(DictParametersObject, dictID), READONLY,
+	  "dictionary ID" },
+	{ NULL }
+};
+
 static Py_ssize_t DictParameters_length(PyObject* self) {
 	return 4;
 }
@@ -102,7 +118,7 @@
 	0,                         /* tp_iter */
 	0,                         /* tp_iternext */
 	0,                         /* tp_methods */
-	0,                         /* tp_members */
+	DictParameters_members,    /* tp_members */
 	0,                         /* tp_getset */
 	0,                         /* tp_base */
 	0,                         /* tp_dict */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/frameparams.c	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,132 @@
+/**
+* Copyright (c) 2017-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(FrameParameters__doc__,
+	"FrameParameters: information about a zstd frame");
+
+FrameParametersObject* get_frame_parameters(PyObject* self, PyObject* args) {
+	const char* source;
+	Py_ssize_t sourceSize;
+	ZSTD_frameParams params;
+	FrameParametersObject* result = NULL;
+	size_t zresult;
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTuple(args, "y#:get_frame_parameters",
+#else
+	if (!PyArg_ParseTuple(args, "s#:get_frame_parameters",
+#endif
+		&source, &sourceSize)) {
+		return NULL;
+	}
+
+	/* Needed for Python 2 to reject unicode */
+	if (!PyBytes_Check(PyTuple_GET_ITEM(args, 0))) {
+		PyErr_SetString(PyExc_TypeError, "argument must be bytes");
+		return NULL;
+	}
+
+	zresult = ZSTD_getFrameParams(&params, (void*)source, sourceSize);
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "cannot get frame parameters: %s", ZSTD_getErrorName(zresult));
+		return NULL;
+	}
+
+	if (zresult) {
+		PyErr_Format(ZstdError, "not enough data for frame parameters; need %zu bytes", zresult);
+		return NULL;
+	}
+
+	result = PyObject_New(FrameParametersObject, &FrameParametersType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->frameContentSize = params.frameContentSize;
+	result->windowSize = params.windowSize;
+	result->dictID = params.dictID;
+	result->checksumFlag = params.checksumFlag ? 1 : 0;
+
+	return result;
+}
+
+static void FrameParameters_dealloc(PyObject* self) {
+	PyObject_Del(self);
+}
+
+static PyMemberDef FrameParameters_members[] = {
+	{ "content_size", T_ULONGLONG,
+	  offsetof(FrameParametersObject, frameContentSize), READONLY,
+	  "frame content size" },
+	{ "window_size", T_UINT,
+	  offsetof(FrameParametersObject, windowSize), READONLY,
+	  "window size" },
+	{ "dict_id", T_UINT,
+	  offsetof(FrameParametersObject, dictID), READONLY,
+	  "dictionary ID" },
+	{ "has_checksum", T_BOOL,
+	  offsetof(FrameParametersObject, checksumFlag), READONLY,
+	  "checksum flag" },
+	{ NULL }
+};
+
+PyTypeObject FrameParametersType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"FrameParameters",          /* tp_name */
+	sizeof(FrameParametersObject), /* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)FrameParameters_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	0,                         /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash  */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	FrameParameters__doc__,    /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	0,                         /* tp_methods */
+	FrameParameters_members,   /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	0,                         /* tp_init */
+	0,                         /* tp_alloc */
+	0,                         /* tp_new */
+};
+
+void frameparams_module_init(PyObject* mod) {
+	Py_TYPE(&FrameParametersType) = &PyType_Type;
+	if (PyType_Ready(&FrameParametersType) < 0) {
+		return;
+	}
+
+	Py_IncRef((PyObject*)&FrameParametersType);
+	PyModule_AddObject(mod, "FrameParameters", (PyObject*)&FrameParametersType);
+}
--- a/contrib/python-zstandard/c-ext/python-zstandard.h	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h	Tue Feb 28 11:13:25 2017 -0800
@@ -8,6 +8,7 @@
 
 #define PY_SSIZE_T_CLEAN
 #include <Python.h>
+#include "structmember.h"
 
 #define ZSTD_STATIC_LINKING_ONLY
 #define ZDICT_STATIC_LINKING_ONLY
@@ -15,7 +16,7 @@
 #include "zstd.h"
 #include "zdict.h"
 
-#define PYTHON_ZSTANDARD_VERSION "0.6.0"
+#define PYTHON_ZSTANDARD_VERSION "0.7.0"
 
 typedef enum {
 	compressorobj_flush_finish,
@@ -37,6 +38,16 @@
 
 typedef struct {
 	PyObject_HEAD
+	unsigned long long frameContentSize;
+	unsigned windowSize;
+	unsigned dictID;
+	char checksumFlag;
+} FrameParametersObject;
+
+extern PyTypeObject FrameParametersType;
+
+typedef struct {
+	PyObject_HEAD
 	unsigned selectivityLevel;
 	int compressionLevel;
 	unsigned notificationLevel;
@@ -115,7 +126,7 @@
 typedef struct {
 	PyObject_HEAD
 
-	ZSTD_DCtx* refdctx;
+	ZSTD_DCtx* dctx;
 
 	ZstdCompressionDict* dict;
 	ZSTD_DDict* ddict;
@@ -172,6 +183,7 @@
 
 void ztopy_compression_parameters(CompressionParametersObject* params, ZSTD_compressionParameters* zparams);
 CompressionParametersObject* get_compression_parameters(PyObject* self, PyObject* args);
+FrameParametersObject* get_frame_parameters(PyObject* self, PyObject* args);
 PyObject* estimate_compression_context_size(PyObject* self, PyObject* args);
 ZSTD_CStream* CStream_from_ZstdCompressor(ZstdCompressor* compressor, Py_ssize_t sourceSize);
 ZSTD_DStream* DStream_from_ZstdDecompressor(ZstdDecompressor* decompressor);
--- a/contrib/python-zstandard/make_cffi.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/make_cffi.py	Tue Feb 28 11:13:25 2017 -0800
@@ -9,6 +9,7 @@
 import cffi
 import distutils.ccompiler
 import os
+import re
 import subprocess
 import tempfile
 
@@ -19,6 +20,8 @@
     'common/entropy_common.c',
     'common/error_private.c',
     'common/fse_decompress.c',
+    'common/pool.c',
+    'common/threading.c',
     'common/xxhash.c',
     'common/zstd_common.c',
     'compress/fse_compress.c',
@@ -26,10 +29,17 @@
     'compress/zstd_compress.c',
     'decompress/huf_decompress.c',
     'decompress/zstd_decompress.c',
+    'dictBuilder/cover.c',
     'dictBuilder/divsufsort.c',
     'dictBuilder/zdict.c',
 )]
 
+HEADERS = [os.path.join(HERE, 'zstd', *p) for p in (
+    ('zstd.h',),
+    ('common', 'pool.h'),
+    ('dictBuilder', 'zdict.h'),
+)]
+
 INCLUDE_DIRS = [os.path.join(HERE, d) for d in (
     'zstd',
     'zstd/common',
@@ -53,56 +63,92 @@
     args.extend([
         '-E',
         '-DZSTD_STATIC_LINKING_ONLY',
+        '-DZDICT_STATIC_LINKING_ONLY',
     ])
 elif compiler.compiler_type == 'msvc':
     args = [compiler.cc]
     args.extend([
         '/EP',
         '/DZSTD_STATIC_LINKING_ONLY',
+        '/DZDICT_STATIC_LINKING_ONLY',
     ])
 else:
     raise Exception('unsupported compiler type: %s' % compiler.compiler_type)
 
-# zstd.h includes <stddef.h>, which is also included by cffi's boilerplate.
-# This can lead to duplicate declarations. So we strip this include from the
-# preprocessor invocation.
+def preprocess(path):
+    # zstd.h includes <stddef.h>, which is also included by cffi's boilerplate.
+    # This can lead to duplicate declarations. So we strip this include from the
+    # preprocessor invocation.
+    with open(path, 'rb') as fh:
+        lines = [l for l in fh if not l.startswith(b'#include <stddef.h>')]
 
-with open(os.path.join(HERE, 'zstd', 'zstd.h'), 'rb') as fh:
-    lines = [l for l in fh if not l.startswith(b'#include <stddef.h>')]
-
-fd, input_file = tempfile.mkstemp(suffix='.h')
-os.write(fd, b''.join(lines))
-os.close(fd)
+    fd, input_file = tempfile.mkstemp(suffix='.h')
+    os.write(fd, b''.join(lines))
+    os.close(fd)
 
-args.append(input_file)
+    try:
+        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE)
+        output = process.communicate()[0]
+        ret = process.poll()
+        if ret:
+            raise Exception('preprocessor exited with error')
 
-try:
-    process = subprocess.Popen(args, stdout=subprocess.PIPE)
-    output = process.communicate()[0]
-    ret = process.poll()
-    if ret:
-        raise Exception('preprocessor exited with error')
-finally:
-    os.unlink(input_file)
+        return output
+    finally:
+        os.unlink(input_file)
 
-def normalize_output():
+
+def normalize_output(output):
     lines = []
     for line in output.splitlines():
         # CFFI's parser doesn't like __attribute__ on UNIX compilers.
         if line.startswith(b'__attribute__ ((visibility ("default"))) '):
             line = line[len(b'__attribute__ ((visibility ("default"))) '):]
 
+        if line.startswith(b'__attribute__((deprecated('):
+            continue
+        elif b'__declspec(deprecated(' in line:
+            continue
+
         lines.append(line)
 
     return b'\n'.join(lines)
 
+
 ffi = cffi.FFI()
 ffi.set_source('_zstd_cffi', '''
+#include "mem.h"
 #define ZSTD_STATIC_LINKING_ONLY
 #include "zstd.h"
+#define ZDICT_STATIC_LINKING_ONLY
+#include "pool.h"
+#include "zdict.h"
 ''', sources=SOURCES, include_dirs=INCLUDE_DIRS)
 
-ffi.cdef(normalize_output().decode('latin1'))
+DEFINE = re.compile(b'^\\#define ([a-zA-Z0-9_]+) ')
+
+sources = []
+
+for header in HEADERS:
+    preprocessed = preprocess(header)
+    sources.append(normalize_output(preprocessed))
+
+    # Do another pass over source and find constants that were preprocessed
+    # away.
+    with open(header, 'rb') as fh:
+        for line in fh:
+            line = line.strip()
+            m = DEFINE.match(line)
+            if not m:
+                continue
+
+            # The parser doesn't like some constants with complex values.
+            if m.group(1) in (b'ZSTD_LIB_VERSION', b'ZSTD_VERSION_STRING'):
+                continue
+
+            sources.append(m.group(0) + b' ...')
+
+ffi.cdef(u'\n'.join(s.decode('latin1') for s in sources))
 
 if __name__ == '__main__':
     ffi.compile()
--- a/contrib/python-zstandard/setup.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/setup.py	Tue Feb 28 11:13:25 2017 -0800
@@ -62,6 +62,7 @@
         'Programming Language :: Python :: 3.3',
         'Programming Language :: Python :: 3.4',
         'Programming Language :: Python :: 3.5',
+        'Programming Language :: Python :: 3.6',
     ],
     keywords='zstandard zstd compression',
     ext_modules=extensions,
--- a/contrib/python-zstandard/setup_zstd.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/setup_zstd.py	Tue Feb 28 11:13:25 2017 -0800
@@ -12,6 +12,8 @@
     'common/entropy_common.c',
     'common/error_private.c',
     'common/fse_decompress.c',
+    'common/pool.c',
+    'common/threading.c',
     'common/xxhash.c',
     'common/zstd_common.c',
     'compress/fse_compress.c',
@@ -19,11 +21,13 @@
     'compress/zstd_compress.c',
     'decompress/huf_decompress.c',
     'decompress/zstd_decompress.c',
+    'dictBuilder/cover.c',
     'dictBuilder/divsufsort.c',
     'dictBuilder/zdict.c',
 )]
 
 zstd_sources_legacy = ['zstd/%s' % p for p in (
+    'deprecated/zbuff_common.c',
     'deprecated/zbuff_compress.c',
     'deprecated/zbuff_decompress.c',
     'legacy/zstd_v01.c',
@@ -63,6 +67,7 @@
     'c-ext/decompressoriterator.c',
     'c-ext/decompressionwriter.c',
     'c-ext/dictparams.c',
+    'c-ext/frameparams.c',
 ]
 
 zstd_depends = [
--- a/contrib/python-zstandard/tests/common.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/tests/common.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1,4 +1,50 @@
+import inspect
 import io
+import types
+
+
+def make_cffi(cls):
+    """Decorator to add CFFI versions of each test method."""
+
+    try:
+        import zstd_cffi
+    except ImportError:
+        return cls
+
+    # If CFFI version is available, dynamically construct test methods
+    # that use it.
+
+    for attr in dir(cls):
+        fn = getattr(cls, attr)
+        if not inspect.ismethod(fn) and not inspect.isfunction(fn):
+            continue
+
+        if not fn.__name__.startswith('test_'):
+            continue
+
+        name = '%s_cffi' % fn.__name__
+
+        # Replace the "zstd" symbol with the CFFI module instance. Then copy
+        # the function object and install it in a new attribute.
+        if isinstance(fn, types.FunctionType):
+            globs = dict(fn.__globals__)
+            globs['zstd'] = zstd_cffi
+            new_fn = types.FunctionType(fn.__code__, globs, name,
+                                        fn.__defaults__, fn.__closure__)
+            new_method = new_fn
+        else:
+            globs = dict(fn.__func__.func_globals)
+            globs['zstd'] = zstd_cffi
+            new_fn = types.FunctionType(fn.__func__.func_code, globs, name,
+                                        fn.__func__.func_defaults,
+                                        fn.__func__.func_closure)
+            new_method = types.UnboundMethodType(new_fn, fn.im_self,
+                                                 fn.im_class)
+
+        setattr(cls, name, new_method)
+
+    return cls
+
 
 class OpCountingBytesIO(io.BytesIO):
     def __init__(self, *args, **kwargs):
--- a/contrib/python-zstandard/tests/test_cffi.py	Sat Feb 25 12:48:50 2017 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-import io
-
-try:
-    import unittest2 as unittest
-except ImportError:
-    import unittest
-
-import zstd
-
-try:
-    import zstd_cffi
-except ImportError:
-    raise unittest.SkipTest('cffi version of zstd not available')
-
-
-class TestCFFIWriteToToCDecompressor(unittest.TestCase):
-    def test_simple(self):
-        orig = io.BytesIO()
-        orig.write(b'foo')
-        orig.write(b'bar')
-        orig.write(b'foobar' * 16384)
-
-        dest = io.BytesIO()
-        cctx = zstd_cffi.ZstdCompressor()
-        with cctx.write_to(dest) as compressor:
-            compressor.write(orig.getvalue())
-
-        uncompressed = io.BytesIO()
-        dctx = zstd.ZstdDecompressor()
-        with dctx.write_to(uncompressed) as decompressor:
-            decompressor.write(dest.getvalue())
-
-        self.assertEqual(uncompressed.getvalue(), orig.getvalue())
-
-
--- a/contrib/python-zstandard/tests/test_compressor.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/tests/test_compressor.py	Tue Feb 28 11:13:25 2017 -0800
@@ -10,7 +10,10 @@
 
 import zstd
 
-from .common import OpCountingBytesIO
+from .common import (
+    make_cffi,
+    OpCountingBytesIO,
+)
 
 
 if sys.version_info[0] >= 3:
@@ -19,6 +22,7 @@
     next = lambda it: it.next()
 
 
+@make_cffi
 class TestCompressor(unittest.TestCase):
     def test_level_bounds(self):
         with self.assertRaises(ValueError):
@@ -28,18 +32,17 @@
             zstd.ZstdCompressor(level=23)
 
 
+@make_cffi
 class TestCompressor_compress(unittest.TestCase):
     def test_compress_empty(self):
         cctx = zstd.ZstdCompressor(level=1)
-        cctx.compress(b'')
-
-        cctx = zstd.ZstdCompressor(level=22)
-        cctx.compress(b'')
-
-    def test_compress_empty(self):
-        cctx = zstd.ZstdCompressor(level=1)
-        self.assertEqual(cctx.compress(b''),
-                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        result = cctx.compress(b'')
+        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        params = zstd.get_frame_parameters(result)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 524288)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum, 0)
 
         # TODO should be temporary until https://github.com/facebook/zstd/issues/506
         # is fixed.
@@ -59,6 +62,13 @@
         self.assertEqual(len(result), 999)
         self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd')
 
+        # This matches the test for read_from() below.
+        cctx = zstd.ZstdCompressor(level=1)
+        result = cctx.compress(b'f' * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b'o')
+        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00'
+                                 b'\x10\x66\x66\x01\x00\xfb\xff\x39\xc0'
+                                 b'\x02\x09\x00\x00\x6f')
+
     def test_write_checksum(self):
         cctx = zstd.ZstdCompressor(level=1)
         no_checksum = cctx.compress(b'foobar')
@@ -67,6 +77,12 @@
 
         self.assertEqual(len(with_checksum), len(no_checksum) + 4)
 
+        no_params = zstd.get_frame_parameters(no_checksum)
+        with_params = zstd.get_frame_parameters(with_checksum)
+
+        self.assertFalse(no_params.has_checksum)
+        self.assertTrue(with_params.has_checksum)
+
     def test_write_content_size(self):
         cctx = zstd.ZstdCompressor(level=1)
         no_size = cctx.compress(b'foobar' * 256)
@@ -75,6 +91,11 @@
 
         self.assertEqual(len(with_size), len(no_size) + 1)
 
+        no_params = zstd.get_frame_parameters(no_size)
+        with_params = zstd.get_frame_parameters(with_size)
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 1536)
+
     def test_no_dict_id(self):
         samples = []
         for i in range(128):
@@ -92,6 +113,11 @@
 
         self.assertEqual(len(with_dict_id), len(no_dict_id) + 4)
 
+        no_params = zstd.get_frame_parameters(no_dict_id)
+        with_params = zstd.get_frame_parameters(with_dict_id)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 1584102229)
+
     def test_compress_dict_multiple(self):
         samples = []
         for i in range(128):
@@ -107,6 +133,7 @@
             cctx.compress(b'foo bar foobar foo bar foobar')
 
 
+@make_cffi
 class TestCompressor_compressobj(unittest.TestCase):
     def test_compressobj_empty(self):
         cctx = zstd.ZstdCompressor(level=1)
@@ -127,6 +154,12 @@
         self.assertEqual(len(result), 999)
         self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd')
 
+        params = zstd.get_frame_parameters(result)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1048576)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
     def test_write_checksum(self):
         cctx = zstd.ZstdCompressor(level=1)
         cobj = cctx.compressobj()
@@ -135,6 +168,15 @@
         cobj = cctx.compressobj()
         with_checksum = cobj.compress(b'foobar') + cobj.flush()
 
+        no_params = zstd.get_frame_parameters(no_checksum)
+        with_params = zstd.get_frame_parameters(with_checksum)
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 0)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertTrue(with_params.has_checksum)
+
         self.assertEqual(len(with_checksum), len(no_checksum) + 4)
 
     def test_write_content_size(self):
@@ -145,6 +187,15 @@
         cobj = cctx.compressobj(size=len(b'foobar' * 256))
         with_size = cobj.compress(b'foobar' * 256) + cobj.flush()
 
+        no_params = zstd.get_frame_parameters(no_size)
+        with_params = zstd.get_frame_parameters(with_size)
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 1536)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertFalse(with_params.has_checksum)
+
         self.assertEqual(len(with_size), len(no_size) + 1)
 
     def test_compress_after_finished(self):
@@ -187,6 +238,7 @@
         self.assertEqual(header, b'\x01\x00\x00')
 
 
+@make_cffi
 class TestCompressor_copy_stream(unittest.TestCase):
     def test_no_read(self):
         source = object()
@@ -229,6 +281,12 @@
         self.assertEqual(r, 255 * 16384)
         self.assertEqual(w, 999)
 
+        params = zstd.get_frame_parameters(dest.getvalue())
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1048576)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
     def test_write_checksum(self):
         source = io.BytesIO(b'foobar')
         no_checksum = io.BytesIO()
@@ -244,6 +302,15 @@
         self.assertEqual(len(with_checksum.getvalue()),
                          len(no_checksum.getvalue()) + 4)
 
+        no_params = zstd.get_frame_parameters(no_checksum.getvalue())
+        with_params = zstd.get_frame_parameters(with_checksum.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 0)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertTrue(with_params.has_checksum)
+
     def test_write_content_size(self):
         source = io.BytesIO(b'foobar' * 256)
         no_size = io.BytesIO()
@@ -268,6 +335,15 @@
         self.assertEqual(len(with_size.getvalue()),
                          len(no_size.getvalue()) + 1)
 
+        no_params = zstd.get_frame_parameters(no_size.getvalue())
+        with_params = zstd.get_frame_parameters(with_size.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 1536)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertFalse(with_params.has_checksum)
+
     def test_read_write_size(self):
         source = OpCountingBytesIO(b'foobarfoobar')
         dest = OpCountingBytesIO()
@@ -288,18 +364,25 @@
     return buffer.getvalue()
 
 
+@make_cffi
 class TestCompressor_write_to(unittest.TestCase):
     def test_empty(self):
-        self.assertEqual(compress(b'', 1),
-                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        result = compress(b'', 1)
+        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+
+        params = zstd.get_frame_parameters(result)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 524288)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
 
     def test_multiple_compress(self):
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=5)
         with cctx.write_to(buffer) as compressor:
-            compressor.write(b'foo')
-            compressor.write(b'bar')
-            compressor.write(b'x' * 8192)
+            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b'x' * 8192), 0)
 
         result = buffer.getvalue()
         self.assertEqual(result,
@@ -318,11 +401,23 @@
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=9, dict_data=d)
         with cctx.write_to(buffer) as compressor:
-            compressor.write(b'foo')
-            compressor.write(b'bar')
-            compressor.write(b'foo' * 16384)
+            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b'foo' * 16384), 634)
 
         compressed = buffer.getvalue()
+
+        params = zstd.get_frame_parameters(compressed)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, d.dict_id())
+        self.assertFalse(params.has_checksum)
+
+        self.assertEqual(compressed[0:32],
+                         b'\x28\xb5\x2f\xfd\x03\x00\x55\x7b\x6b\x5e\x54\x00'
+                         b'\x00\x00\x02\xfc\xf4\xa5\xba\x23\x3f\x85\xb3\x54'
+                         b'\x00\x00\x18\x6f\x6f\x66\x01\x00')
+
         h = hashlib.sha1(compressed).hexdigest()
         self.assertEqual(h, '1c5bcd25181bcd8c1a73ea8773323e0056129f92')
 
@@ -332,11 +427,18 @@
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(compression_params=params)
         with cctx.write_to(buffer) as compressor:
-            compressor.write(b'foo')
-            compressor.write(b'bar')
-            compressor.write(b'foobar' * 16384)
+            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b'foobar' * 16384), 0)
 
         compressed = buffer.getvalue()
+
+        params = zstd.get_frame_parameters(compressed)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1048576)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
         h = hashlib.sha1(compressed).hexdigest()
         self.assertEqual(h, '1ae31f270ed7de14235221a604b31ecd517ebd99')
 
@@ -344,12 +446,21 @@
         no_checksum = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1)
         with cctx.write_to(no_checksum) as compressor:
-            compressor.write(b'foobar')
+            self.assertEqual(compressor.write(b'foobar'), 0)
 
         with_checksum = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
         with cctx.write_to(with_checksum) as compressor:
-            compressor.write(b'foobar')
+            self.assertEqual(compressor.write(b'foobar'), 0)
+
+        no_params = zstd.get_frame_parameters(no_checksum.getvalue())
+        with_params = zstd.get_frame_parameters(with_checksum.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 0)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertTrue(with_params.has_checksum)
 
         self.assertEqual(len(with_checksum.getvalue()),
                          len(no_checksum.getvalue()) + 4)
@@ -358,12 +469,12 @@
         no_size = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1)
         with cctx.write_to(no_size) as compressor:
-            compressor.write(b'foobar' * 256)
+            self.assertEqual(compressor.write(b'foobar' * 256), 0)
 
         with_size = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1, write_content_size=True)
         with cctx.write_to(with_size) as compressor:
-            compressor.write(b'foobar' * 256)
+            self.assertEqual(compressor.write(b'foobar' * 256), 0)
 
         # Source size is not known in streaming mode, so header not
         # written.
@@ -373,7 +484,16 @@
         # Declaring size will write the header.
         with_size = io.BytesIO()
         with cctx.write_to(with_size, size=len(b'foobar' * 256)) as compressor:
-            compressor.write(b'foobar' * 256)
+            self.assertEqual(compressor.write(b'foobar' * 256), 0)
+
+        no_params = zstd.get_frame_parameters(no_size.getvalue())
+        with_params = zstd.get_frame_parameters(with_size.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 1536)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertFalse(with_params.has_checksum)
 
         self.assertEqual(len(with_size.getvalue()),
                          len(no_size.getvalue()) + 1)
@@ -390,12 +510,21 @@
         with_dict_id = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1, dict_data=d)
         with cctx.write_to(with_dict_id) as compressor:
-            compressor.write(b'foobarfoobar')
+            self.assertEqual(compressor.write(b'foobarfoobar'), 0)
 
         cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False)
         no_dict_id = io.BytesIO()
         with cctx.write_to(no_dict_id) as compressor:
-            compressor.write(b'foobarfoobar')
+            self.assertEqual(compressor.write(b'foobarfoobar'), 0)
+
+        no_params = zstd.get_frame_parameters(no_dict_id.getvalue())
+        with_params = zstd.get_frame_parameters(with_dict_id.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 0)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, d.dict_id())
+        self.assertFalse(no_params.has_checksum)
+        self.assertFalse(with_params.has_checksum)
 
         self.assertEqual(len(with_dict_id.getvalue()),
                          len(no_dict_id.getvalue()) + 4)
@@ -412,9 +541,9 @@
         cctx = zstd.ZstdCompressor(level=3)
         dest = OpCountingBytesIO()
         with cctx.write_to(dest, write_size=1) as compressor:
-            compressor.write(b'foo')
-            compressor.write(b'bar')
-            compressor.write(b'foobar')
+            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b'foobar'), 0)
 
         self.assertEqual(len(dest.getvalue()), dest._write_count)
 
@@ -422,15 +551,15 @@
         cctx = zstd.ZstdCompressor(level=3)
         dest = OpCountingBytesIO()
         with cctx.write_to(dest) as compressor:
-            compressor.write(b'foo')
+            self.assertEqual(compressor.write(b'foo'), 0)
             self.assertEqual(dest._write_count, 0)
-            compressor.flush()
+            self.assertEqual(compressor.flush(), 12)
             self.assertEqual(dest._write_count, 1)
-            compressor.write(b'bar')
+            self.assertEqual(compressor.write(b'bar'), 0)
             self.assertEqual(dest._write_count, 1)
-            compressor.flush()
+            self.assertEqual(compressor.flush(), 6)
             self.assertEqual(dest._write_count, 2)
-            compressor.write(b'baz')
+            self.assertEqual(compressor.write(b'baz'), 0)
 
         self.assertEqual(dest._write_count, 3)
 
@@ -438,10 +567,10 @@
         cctx = zstd.ZstdCompressor(level=3, write_checksum=True)
         dest = OpCountingBytesIO()
         with cctx.write_to(dest) as compressor:
-            compressor.write(b'foobar' * 8192)
+            self.assertEqual(compressor.write(b'foobar' * 8192), 0)
             count = dest._write_count
             offset = dest.tell()
-            compressor.flush()
+            self.assertEqual(compressor.flush(), 23)
             self.assertGreater(dest._write_count, count)
             self.assertGreater(dest.tell(), offset)
             offset = dest.tell()
@@ -456,18 +585,22 @@
         self.assertEqual(header, b'\x01\x00\x00')
 
 
+@make_cffi
 class TestCompressor_read_from(unittest.TestCase):
     def test_type_validation(self):
         cctx = zstd.ZstdCompressor()
 
         # Object with read() works.
-        cctx.read_from(io.BytesIO())
+        for chunk in cctx.read_from(io.BytesIO()):
+            pass
 
         # Buffer protocol works.
-        cctx.read_from(b'foobar')
+        for chunk in cctx.read_from(b'foobar'):
+            pass
 
         with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'):
-            cctx.read_from(True)
+            for chunk in cctx.read_from(True):
+                pass
 
     def test_read_empty(self):
         cctx = zstd.ZstdCompressor(level=1)
@@ -521,6 +654,12 @@
         # We should get the same output as the one-shot compression mechanism.
         self.assertEqual(b''.join(chunks), cctx.compress(source.getvalue()))
 
+        params = zstd.get_frame_parameters(b''.join(chunks))
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 262144)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
         # Now check the buffer protocol.
         it = cctx.read_from(source.getvalue())
         chunks = list(it)
--- a/contrib/python-zstandard/tests/test_data_structures.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Tue Feb 28 11:13:25 2017 -0800
@@ -13,6 +13,12 @@
 
 import zstd
 
+from . common import (
+    make_cffi,
+)
+
+
+@make_cffi
 class TestCompressionParameters(unittest.TestCase):
     def test_init_bad_arg_type(self):
         with self.assertRaises(TypeError):
@@ -42,7 +48,81 @@
         p = zstd.get_compression_parameters(1)
         self.assertIsInstance(p, zstd.CompressionParameters)
 
-        self.assertEqual(p[0], 19)
+        self.assertEqual(p.window_log, 19)
+
+    def test_members(self):
+        p = zstd.CompressionParameters(10, 6, 7, 4, 5, 8, 1)
+        self.assertEqual(p.window_log, 10)
+        self.assertEqual(p.chain_log, 6)
+        self.assertEqual(p.hash_log, 7)
+        self.assertEqual(p.search_log, 4)
+        self.assertEqual(p.search_length, 5)
+        self.assertEqual(p.target_length, 8)
+        self.assertEqual(p.strategy, 1)
+
+
+@make_cffi
+class TestFrameParameters(unittest.TestCase):
+    def test_invalid_type(self):
+        with self.assertRaises(TypeError):
+            zstd.get_frame_parameters(None)
+
+        with self.assertRaises(TypeError):
+            zstd.get_frame_parameters(u'foobarbaz')
+
+    def test_invalid_input_sizes(self):
+        with self.assertRaisesRegexp(zstd.ZstdError, 'not enough data for frame'):
+            zstd.get_frame_parameters(b'')
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'not enough data for frame'):
+            zstd.get_frame_parameters(zstd.FRAME_HEADER)
+
+    def test_invalid_frame(self):
+        with self.assertRaisesRegexp(zstd.ZstdError, 'Unknown frame descriptor'):
+            zstd.get_frame_parameters(b'foobarbaz')
+
+    def test_attributes(self):
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x00\x00')
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
+        # Lowest 2 bits indicate a dictionary and length. Here, the dict id is 1 byte.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x01\x00\xff')
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, 255)
+        self.assertFalse(params.has_checksum)
+
+        # Lowest 3rd bit indicates if checksum is present.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x04\x00')
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, 0)
+        self.assertTrue(params.has_checksum)
+
+        # Upper 2 bits indicate content size.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x40\x00\xff\x00')
+        self.assertEqual(params.content_size, 511)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
+        # Window descriptor is 2nd byte after frame header.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x00\x40')
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 262144)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
+        # Set multiple things.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x45\x40\x0f\x10\x00')
+        self.assertEqual(params.content_size, 272)
+        self.assertEqual(params.window_size, 262144)
+        self.assertEqual(params.dict_id, 15)
+        self.assertTrue(params.has_checksum)
+
 
 if hypothesis:
     s_windowlog = strategies.integers(min_value=zstd.WINDOWLOG_MIN,
@@ -65,6 +145,8 @@
                                           zstd.STRATEGY_BTLAZY2,
                                           zstd.STRATEGY_BTOPT))
 
+
+    @make_cffi
     class TestCompressionParametersHypothesis(unittest.TestCase):
         @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
                           s_searchlength, s_targetlength, s_strategy)
@@ -73,9 +155,6 @@
             p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
                                            searchlog, searchlength,
                                            targetlength, strategy)
-            self.assertEqual(tuple(p),
-                             (windowlog, chainlog, hashlog, searchlog,
-                              searchlength, targetlength, strategy))
 
             # Verify we can instantiate a compressor with the supplied values.
             # ZSTD_checkCParams moves the goal posts on us from what's advertised
--- a/contrib/python-zstandard/tests/test_decompressor.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/tests/test_decompressor.py	Tue Feb 28 11:13:25 2017 -0800
@@ -10,7 +10,10 @@
 
 import zstd
 
-from .common import OpCountingBytesIO
+from .common import (
+    make_cffi,
+    OpCountingBytesIO,
+)
 
 
 if sys.version_info[0] >= 3:
@@ -19,6 +22,7 @@
     next = lambda it: it.next()
 
 
+@make_cffi
 class TestDecompressor_decompress(unittest.TestCase):
     def test_empty_input(self):
         dctx = zstd.ZstdDecompressor()
@@ -119,6 +123,7 @@
             self.assertEqual(decompressed, sources[i])
 
 
+@make_cffi
 class TestDecompressor_copy_stream(unittest.TestCase):
     def test_no_read(self):
         source = object()
@@ -180,6 +185,7 @@
         self.assertEqual(dest._write_count, len(dest.getvalue()))
 
 
+@make_cffi
 class TestDecompressor_decompressobj(unittest.TestCase):
     def test_simple(self):
         data = zstd.ZstdCompressor(level=1).compress(b'foobar')
@@ -207,6 +213,7 @@
     return buffer.getvalue()
 
 
+@make_cffi
 class TestDecompressor_write_to(unittest.TestCase):
     def test_empty_roundtrip(self):
         cctx = zstd.ZstdCompressor()
@@ -256,14 +263,14 @@
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(dict_data=d)
         with cctx.write_to(buffer) as compressor:
-            compressor.write(orig)
+            self.assertEqual(compressor.write(orig), 1544)
 
         compressed = buffer.getvalue()
         buffer = io.BytesIO()
 
         dctx = zstd.ZstdDecompressor(dict_data=d)
         with dctx.write_to(buffer) as decompressor:
-            decompressor.write(compressed)
+            self.assertEqual(decompressor.write(compressed), len(orig))
 
         self.assertEqual(buffer.getvalue(), orig)
 
@@ -291,6 +298,7 @@
         self.assertEqual(dest._write_count, len(dest.getvalue()))
 
 
+@make_cffi
 class TestDecompressor_read_from(unittest.TestCase):
     def test_type_validation(self):
         dctx = zstd.ZstdDecompressor()
@@ -302,7 +310,7 @@
         dctx.read_from(b'foobar')
 
         with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'):
-            dctx.read_from(True)
+            b''.join(dctx.read_from(True))
 
     def test_empty_input(self):
         dctx = zstd.ZstdDecompressor()
@@ -351,7 +359,7 @@
         dctx = zstd.ZstdDecompressor()
 
         with self.assertRaisesRegexp(ValueError, 'skip_bytes must be smaller than read_size'):
-            dctx.read_from(b'', skip_bytes=1, read_size=1)
+            b''.join(dctx.read_from(b'', skip_bytes=1, read_size=1))
 
         with self.assertRaisesRegexp(ValueError, 'skip_bytes larger than first input chunk'):
             b''.join(dctx.read_from(b'foobar', skip_bytes=10))
@@ -476,3 +484,94 @@
             self.assertEqual(len(chunk), 1)
 
         self.assertEqual(source._read_count, len(source.getvalue()))
+
+
+@make_cffi
+class TestDecompressor_content_dict_chain(unittest.TestCase):
+    def test_bad_inputs_simple(self):
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaises(TypeError):
+            dctx.decompress_content_dict_chain(b'foo')
+
+        with self.assertRaises(TypeError):
+            dctx.decompress_content_dict_chain((b'foo', b'bar'))
+
+        with self.assertRaisesRegexp(ValueError, 'empty input chain'):
+            dctx.decompress_content_dict_chain([])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 must be bytes'):
+            dctx.decompress_content_dict_chain([u'foo'])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 must be bytes'):
+            dctx.decompress_content_dict_chain([True])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 is too small to contain a zstd frame'):
+            dctx.decompress_content_dict_chain([zstd.FRAME_HEADER])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 is not a valid zstd frame'):
+            dctx.decompress_content_dict_chain([b'foo' * 8])
+
+        no_size = zstd.ZstdCompressor().compress(b'foo' * 64)
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 missing content size in frame'):
+            dctx.decompress_content_dict_chain([no_size])
+
+        # Corrupt first frame.
+        frame = zstd.ZstdCompressor(write_content_size=True).compress(b'foo' * 64)
+        frame = frame[0:12] + frame[15:]
+        with self.assertRaisesRegexp(zstd.ZstdError, 'could not decompress chunk 0'):
+            dctx.decompress_content_dict_chain([frame])
+
+    def test_bad_subsequent_input(self):
+        initial = zstd.ZstdCompressor(write_content_size=True).compress(b'foo' * 64)
+
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 must be bytes'):
+            dctx.decompress_content_dict_chain([initial, u'foo'])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 must be bytes'):
+            dctx.decompress_content_dict_chain([initial, None])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 is too small to contain a zstd frame'):
+            dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 is not a valid zstd frame'):
+            dctx.decompress_content_dict_chain([initial, b'foo' * 8])
+
+        no_size = zstd.ZstdCompressor().compress(b'foo' * 64)
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 missing content size in frame'):
+            dctx.decompress_content_dict_chain([initial, no_size])
+
+        # Corrupt second frame.
+        cctx = zstd.ZstdCompressor(write_content_size=True, dict_data=zstd.ZstdCompressionDict(b'foo' * 64))
+        frame = cctx.compress(b'bar' * 64)
+        frame = frame[0:12] + frame[15:]
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'could not decompress chunk 1'):
+            dctx.decompress_content_dict_chain([initial, frame])
+
+    def test_simple(self):
+        original = [
+            b'foo' * 64,
+            b'foobar' * 64,
+            b'baz' * 64,
+            b'foobaz' * 64,
+            b'foobarbaz' * 64,
+        ]
+
+        chunks = []
+        chunks.append(zstd.ZstdCompressor(write_content_size=True).compress(original[0]))
+        for i, chunk in enumerate(original[1:]):
+            d = zstd.ZstdCompressionDict(original[i])
+            cctx = zstd.ZstdCompressor(dict_data=d, write_content_size=True)
+            chunks.append(cctx.compress(chunk))
+
+        for i in range(1, len(original)):
+            chain = chunks[0:i]
+            expected = original[i - 1]
+            dctx = zstd.ZstdDecompressor()
+            decompressed = dctx.decompress_content_dict_chain(chain)
+            self.assertEqual(decompressed, expected)
--- a/contrib/python-zstandard/tests/test_estimate_sizes.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/tests/test_estimate_sizes.py	Tue Feb 28 11:13:25 2017 -0800
@@ -5,7 +5,12 @@
 
 import zstd
 
+from . common import (
+    make_cffi,
+)
 
+
+@make_cffi
 class TestSizes(unittest.TestCase):
     def test_decompression_size(self):
         size = zstd.estimate_decompression_context_size()
--- a/contrib/python-zstandard/tests/test_module_attributes.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/tests/test_module_attributes.py	Tue Feb 28 11:13:25 2017 -0800
@@ -7,9 +7,15 @@
 
 import zstd
 
+from . common import (
+    make_cffi,
+)
+
+
+@make_cffi
 class TestModuleAttributes(unittest.TestCase):
     def test_version(self):
-        self.assertEqual(zstd.ZSTD_VERSION, (1, 1, 2))
+        self.assertEqual(zstd.ZSTD_VERSION, (1, 1, 3))
 
     def test_constants(self):
         self.assertEqual(zstd.MAX_COMPRESSION_LEVEL, 22)
@@ -45,4 +51,4 @@
         )
 
         for a in attrs:
-            self.assertTrue(hasattr(zstd, a))
+            self.assertTrue(hasattr(zstd, a), a)
--- a/contrib/python-zstandard/tests/test_roundtrip.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/tests/test_roundtrip.py	Tue Feb 28 11:13:25 2017 -0800
@@ -13,10 +13,14 @@
 
 import zstd
 
+from .common import (
+    make_cffi,
+)
 
 compression_levels = strategies.integers(min_value=1, max_value=22)
 
 
+@make_cffi
 class TestRoundTrip(unittest.TestCase):
     @hypothesis.given(strategies.binary(), compression_levels)
     def test_compress_write_to(self, data, level):
--- a/contrib/python-zstandard/tests/test_train_dictionary.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Tue Feb 28 11:13:25 2017 -0800
@@ -7,6 +7,9 @@
 
 import zstd
 
+from . common import (
+    make_cffi,
+)
 
 if sys.version_info[0] >= 3:
     int_type = int
@@ -14,6 +17,7 @@
     int_type = long
 
 
+@make_cffi
 class TestTrainDictionary(unittest.TestCase):
     def test_no_args(self):
         with self.assertRaises(TypeError):
--- a/contrib/python-zstandard/zstd.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd.c	Tue Feb 28 11:13:25 2017 -0800
@@ -34,6 +34,11 @@
 "Obtains a ``CompressionParameters`` instance from a compression level and\n"
 "optional input size and dictionary size");
 
+PyDoc_STRVAR(get_frame_parameters__doc__,
+"get_frame_parameters(data)\n"
+"\n"
+"Obtains a ``FrameParameters`` instance by parsing data.\n");
+
 PyDoc_STRVAR(train_dictionary__doc__,
 "train_dictionary(dict_size, samples)\n"
 "\n"
@@ -53,6 +58,8 @@
 	METH_NOARGS, estimate_decompression_context_size__doc__ },
 	{ "get_compression_parameters", (PyCFunction)get_compression_parameters,
 	METH_VARARGS, get_compression_parameters__doc__ },
+	{ "get_frame_parameters", (PyCFunction)get_frame_parameters,
+	METH_VARARGS, get_frame_parameters__doc__ },
 	{ "train_dictionary", (PyCFunction)train_dictionary,
 	METH_VARARGS | METH_KEYWORDS, train_dictionary__doc__ },
 	{ NULL, NULL }
@@ -70,6 +77,7 @@
 void decompressobj_module_init(PyObject* mod);
 void decompressionwriter_module_init(PyObject* mod);
 void decompressoriterator_module_init(PyObject* mod);
+void frameparams_module_init(PyObject* mod);
 
 void zstd_module_init(PyObject* m) {
 	/* python-zstandard relies on unstable zstd C API features. This means
@@ -87,7 +95,7 @@
 	   We detect this mismatch here and refuse to load the module if this
 	   scenario is detected.
 	*/
-	if (ZSTD_VERSION_NUMBER != 10102 || ZSTD_versionNumber() != 10102) {
+	if (ZSTD_VERSION_NUMBER != 10103 || ZSTD_versionNumber() != 10103) {
 		PyErr_SetString(PyExc_ImportError, "zstd C API mismatch; Python bindings not compiled against expected zstd version");
 		return;
 	}
@@ -104,6 +112,7 @@
 	decompressobj_module_init(m);
 	decompressionwriter_module_init(m);
 	decompressoriterator_module_init(m);
+	frameparams_module_init(m);
 }
 
 #if PY_MAJOR_VERSION >= 3
--- a/contrib/python-zstandard/zstd/common/mem.h	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/common/mem.h	Tue Feb 28 11:13:25 2017 -0800
@@ -39,7 +39,7 @@
 #endif
 
 /* code only tested on 32 and 64 bits systems */
-#define MEM_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }
+#define MEM_STATIC_ASSERT(c)   { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
 MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/pool.c	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,194 @@
+/**
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+/* ======   Dependencies   ======= */
+#include <stddef.h>  /* size_t */
+#include <stdlib.h>  /* malloc, calloc, free */
+#include "pool.h"
+
+/* ======   Compiler specifics   ====== */
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+#ifdef ZSTD_MULTITHREAD
+
+#include "threading.h"   /* pthread adaptation */
+
+/* A job is a function and an opaque argument */
+typedef struct POOL_job_s {
+  POOL_function function;
+  void *opaque;
+} POOL_job;
+
+struct POOL_ctx_s {
+    /* Keep track of the threads */
+    pthread_t *threads;
+    size_t numThreads;
+
+    /* The queue is a circular buffer */
+    POOL_job *queue;
+    size_t queueHead;
+    size_t queueTail;
+    size_t queueSize;
+    /* The mutex protects the queue */
+    pthread_mutex_t queueMutex;
+    /* Condition variable for pushers to wait on when the queue is full */
+    pthread_cond_t queuePushCond;
+    /* Condition variables for poppers to wait on when the queue is empty */
+    pthread_cond_t queuePopCond;
+    /* Indicates if the queue is shutting down */
+    int shutdown;
+};
+
+/* POOL_thread() :
+   Work thread for the thread pool.
+   Waits for jobs and executes them.
+   @returns : NULL on failure else non-null.
+*/
+static void* POOL_thread(void* opaque) {
+    POOL_ctx* const ctx = (POOL_ctx*)opaque;
+    if (!ctx) { return NULL; }
+    for (;;) {
+        /* Lock the mutex and wait for a non-empty queue or until shutdown */
+        pthread_mutex_lock(&ctx->queueMutex);
+        while (ctx->queueHead == ctx->queueTail && !ctx->shutdown) {
+            pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
+        }
+        /* empty => shutting down: so stop */
+        if (ctx->queueHead == ctx->queueTail) {
+            pthread_mutex_unlock(&ctx->queueMutex);
+            return opaque;
+        }
+        /* Pop a job off the queue */
+        {   POOL_job const job = ctx->queue[ctx->queueHead];
+            ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
+            /* Unlock the mutex, signal a pusher, and run the job */
+            pthread_mutex_unlock(&ctx->queueMutex);
+            pthread_cond_signal(&ctx->queuePushCond);
+            job.function(job.opaque);
+        }
+    }
+    /* Unreachable */
+}
+
+POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
+    POOL_ctx *ctx;
+    /* Check the parameters */
+    if (!numThreads || !queueSize) { return NULL; }
+    /* Allocate the context and zero initialize */
+    ctx = (POOL_ctx *)calloc(1, sizeof(POOL_ctx));
+    if (!ctx) { return NULL; }
+    /* Initialize the job queue.
+     * It needs one extra space since one space is wasted to differentiate empty
+     * and full queues.
+     */
+    ctx->queueSize = queueSize + 1;
+    ctx->queue = (POOL_job *)malloc(ctx->queueSize * sizeof(POOL_job));
+    ctx->queueHead = 0;
+    ctx->queueTail = 0;
+    pthread_mutex_init(&ctx->queueMutex, NULL);
+    pthread_cond_init(&ctx->queuePushCond, NULL);
+    pthread_cond_init(&ctx->queuePopCond, NULL);
+    ctx->shutdown = 0;
+    /* Allocate space for the thread handles */
+    ctx->threads = (pthread_t *)malloc(numThreads * sizeof(pthread_t));
+    ctx->numThreads = 0;
+    /* Check for errors */
+    if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
+    /* Initialize the threads */
+    {   size_t i;
+        for (i = 0; i < numThreads; ++i) {
+            if (pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
+                ctx->numThreads = i;
+                POOL_free(ctx);
+                return NULL;
+        }   }
+        ctx->numThreads = numThreads;
+    }
+    return ctx;
+}
+
+/*! POOL_join() :
+    Shutdown the queue, wake any sleeping threads, and join all of the threads.
+*/
+static void POOL_join(POOL_ctx *ctx) {
+    /* Shut down the queue */
+    pthread_mutex_lock(&ctx->queueMutex);
+    ctx->shutdown = 1;
+    pthread_mutex_unlock(&ctx->queueMutex);
+    /* Wake up sleeping threads */
+    pthread_cond_broadcast(&ctx->queuePushCond);
+    pthread_cond_broadcast(&ctx->queuePopCond);
+    /* Join all of the threads */
+    {   size_t i;
+        for (i = 0; i < ctx->numThreads; ++i) {
+            pthread_join(ctx->threads[i], NULL);
+    }   }
+}
+
+void POOL_free(POOL_ctx *ctx) {
+    if (!ctx) { return; }
+    POOL_join(ctx);
+    pthread_mutex_destroy(&ctx->queueMutex);
+    pthread_cond_destroy(&ctx->queuePushCond);
+    pthread_cond_destroy(&ctx->queuePopCond);
+    if (ctx->queue) free(ctx->queue);
+    if (ctx->threads) free(ctx->threads);
+    free(ctx);
+}
+
+void POOL_add(void *ctxVoid, POOL_function function, void *opaque) {
+    POOL_ctx *ctx = (POOL_ctx *)ctxVoid;
+    if (!ctx) { return; }
+
+    pthread_mutex_lock(&ctx->queueMutex);
+    {   POOL_job const job = {function, opaque};
+        /* Wait until there is space in the queue for the new job */
+        size_t newTail = (ctx->queueTail + 1) % ctx->queueSize;
+        while (ctx->queueHead == newTail && !ctx->shutdown) {
+          pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
+          newTail = (ctx->queueTail + 1) % ctx->queueSize;
+        }
+        /* The queue is still going => there is space */
+        if (!ctx->shutdown) {
+            ctx->queue[ctx->queueTail] = job;
+            ctx->queueTail = newTail;
+        }
+    }
+    pthread_mutex_unlock(&ctx->queueMutex);
+    pthread_cond_signal(&ctx->queuePopCond);
+}
+
+#else  /* ZSTD_MULTITHREAD  not defined */
+/* No multi-threading support */
+
+/* We don't need any data, but if it is empty malloc() might return NULL. */
+struct POOL_ctx_s {
+  int data;
+};
+
+POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
+  (void)numThreads;
+  (void)queueSize;
+  return (POOL_ctx *)malloc(sizeof(POOL_ctx));
+}
+
+void POOL_free(POOL_ctx *ctx) {
+  if (ctx) free(ctx);
+}
+
+void POOL_add(void *ctx, POOL_function function, void *opaque) {
+  (void)ctx;
+  function(opaque);
+}
+
+#endif  /* ZSTD_MULTITHREAD */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/pool.h	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,56 @@
+/**
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+#ifndef POOL_H
+#define POOL_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include <stddef.h>   /* size_t */
+
+typedef struct POOL_ctx_s POOL_ctx;
+
+/*! POOL_create() :
+    Create a thread pool with at most `numThreads` threads.
+    `numThreads` must be at least 1.
+    The maximum number of queued jobs before blocking is `queueSize`.
+    `queueSize` must be at least 1.
+    @return : The POOL_ctx pointer on success else NULL.
+*/
+POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);
+
+/*! POOL_free() :
+    Free a thread pool returned by POOL_create().
+*/
+void POOL_free(POOL_ctx *ctx);
+
+/*! POOL_function :
+    The function type that can be added to a thread pool.
+*/
+typedef void (*POOL_function)(void *);
+/*! POOL_add_function :
+    The function type for a generic thread pool add function.
+*/
+typedef void (*POOL_add_function)(void *, POOL_function, void *);
+
+/*! POOL_add() :
+    Add the job `function(opaque)` to the thread pool.
+    Possibly blocks until there is room in the queue.
+    Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
+*/
+void POOL_add(void *ctx, POOL_function function, void *opaque);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/threading.c	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,79 @@
+
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ * You can contact the author at:
+ * - zstdmt source repository: https://github.com/mcmilk/zstdmt
+ */
+
+/**
+ * This file will hold wrapper for systems, which do not support pthreads
+ */
+
+/* ======   Compiler specifics   ====== */
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4206)        /* disable: C4206: translation unit is empty (when ZSTD_MULTITHREAD is not defined) */
+#endif
+
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+
+
+/* ===  Dependencies  === */
+#include <process.h>
+#include <errno.h>
+#include "threading.h"
+
+
+/* ===  Implementation  === */
+
+static unsigned __stdcall worker(void *arg)
+{
+    pthread_t* const thread = (pthread_t*) arg;
+    thread->arg = thread->start_routine(thread->arg);
+    return 0;
+}
+
+int pthread_create(pthread_t* thread, const void* unused,
+            void* (*start_routine) (void*), void* arg)
+{
+    (void)unused;
+    thread->arg = arg;
+    thread->start_routine = start_routine;
+    thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
+
+    if (!thread->handle)
+        return errno;
+    else
+        return 0;
+}
+
+int _pthread_join(pthread_t * thread, void **value_ptr)
+{
+    DWORD result;
+
+    if (!thread->handle) return 0;
+
+    result = WaitForSingleObject(thread->handle, INFINITE);
+    switch (result) {
+    case WAIT_OBJECT_0:
+        if (value_ptr) *value_ptr = thread->arg;
+        return 0;
+    case WAIT_ABANDONED:
+        return EINVAL;
+    default:
+        return GetLastError();
+    }
+}
+
+#endif   /* ZSTD_MULTITHREAD */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/threading.h	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,104 @@
+
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ * You can contact the author at:
+ * - zstdmt source repository: https://github.com/mcmilk/zstdmt
+ */
+
+#ifndef THREADING_H_938743
+#define THREADING_H_938743
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+#ifdef WINVER
+#  undef WINVER
+#endif
+#define WINVER       0x0600
+
+#ifdef _WIN32_WINNT
+#  undef _WIN32_WINNT
+#endif
+#define _WIN32_WINNT 0x0600
+
+#ifndef WIN32_LEAN_AND_MEAN
+#  define WIN32_LEAN_AND_MEAN
+#endif
+
+#include <windows.h>
+
+/* mutex */
+#define pthread_mutex_t           CRITICAL_SECTION
+#define pthread_mutex_init(a,b)   InitializeCriticalSection((a))
+#define pthread_mutex_destroy(a)  DeleteCriticalSection((a))
+#define pthread_mutex_lock(a)     EnterCriticalSection((a))
+#define pthread_mutex_unlock(a)   LeaveCriticalSection((a))
+
+/* condition variable */
+#define pthread_cond_t             CONDITION_VARIABLE
+#define pthread_cond_init(a, b)    InitializeConditionVariable((a))
+#define pthread_cond_destroy(a)    /* No delete */
+#define pthread_cond_wait(a, b)    SleepConditionVariableCS((a), (b), INFINITE)
+#define pthread_cond_signal(a)     WakeConditionVariable((a))
+#define pthread_cond_broadcast(a)  WakeAllConditionVariable((a))
+
+/* pthread_create() and pthread_join() */
+typedef struct {
+    HANDLE handle;
+    void* (*start_routine)(void*);
+    void* arg;
+} pthread_t;
+
+int pthread_create(pthread_t* thread, const void* unused,
+                   void* (*start_routine) (void*), void* arg);
+
+#define pthread_join(a, b) _pthread_join(&(a), (b))
+int _pthread_join(pthread_t* thread, void** value_ptr);
+
+/**
+ * add here more wrappers as required
+ */
+
+
+#elif defined(ZSTD_MULTITHREAD)   /* posix assumed ; need a better detection mathod */
+/* ===   POSIX Systems   === */
+#  include <pthread.h>
+
+#else  /* ZSTD_MULTITHREAD not defined */
+/* No multithreading support */
+
+#define pthread_mutex_t int   /* #define rather than typedef, as sometimes pthread support is implicit, resulting in duplicated symbols */
+#define pthread_mutex_init(a,b)
+#define pthread_mutex_destroy(a)
+#define pthread_mutex_lock(a)
+#define pthread_mutex_unlock(a)
+
+#define pthread_cond_t int
+#define pthread_cond_init(a,b)
+#define pthread_cond_destroy(a)
+#define pthread_cond_wait(a,b)
+#define pthread_cond_signal(a)
+#define pthread_cond_broadcast(a)
+
+/* do not use pthread_t */
+
+#endif /* ZSTD_MULTITHREAD */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* THREADING_H_938743 */
--- a/contrib/python-zstandard/zstd/common/zstd_common.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/common/zstd_common.c	Tue Feb 28 11:13:25 2017 -0800
@@ -43,10 +43,6 @@
 *   provides error code string from enum */
 const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorName(code); }
 
-/* ---   ZBUFF Error Management  (deprecated)   --- */
-unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
-const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
-
 
 /*=**************************************************************
 *  Custom allocator
--- a/contrib/python-zstandard/zstd/common/zstd_errors.h	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/common/zstd_errors.h	Tue Feb 28 11:13:25 2017 -0800
@@ -18,6 +18,20 @@
 #include <stddef.h>   /* size_t */
 
 
+/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#  define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
+#else
+#  define ZSTDERRORLIB_VISIBILITY
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+#  define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+#endif
+
 /*-****************************************
 *  error codes list
 ******************************************/
@@ -49,8 +63,8 @@
 /*! ZSTD_getErrorCode() :
     convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
     which can be used to compare directly with enum list published into "error_public.h" */
-ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
-const char* ZSTD_getErrorString(ZSTD_ErrorCode code);
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);
 
 
 #if defined (__cplusplus)
--- a/contrib/python-zstandard/zstd/common/zstd_internal.h	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/common/zstd_internal.h	Tue Feb 28 11:13:25 2017 -0800
@@ -267,4 +267,13 @@
 }
 
 
+/* hidden functions */
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ *        do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);
+
+
 #endif   /* ZSTD_CCOMMON_H_MODULE */
--- a/contrib/python-zstandard/zstd/compress/zstd_compress.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress.c	Tue Feb 28 11:13:25 2017 -0800
@@ -51,8 +51,7 @@
 /*-*************************************
 *  Context memory management
 ***************************************/
-struct ZSTD_CCtx_s
-{
+struct ZSTD_CCtx_s {
     const BYTE* nextSrc;    /* next block here to continue on current prefix */
     const BYTE* base;       /* All regular indexes relative to this position */
     const BYTE* dictBase;   /* extDict indexes relative to this position */
@@ -61,10 +60,11 @@
     U32   nextToUpdate;     /* index from which to continue dictionary update */
     U32   nextToUpdate3;    /* index from which to continue dictionary update */
     U32   hashLog3;         /* dispatch table : larger == faster, more memory */
-    U32   loadedDictEnd;
+    U32   loadedDictEnd;    /* index of end of dictionary */
+    U32   forceWindow;      /* force back-references to respect limit of 1<<wLog, even for dictionary */
     ZSTD_compressionStage_e stage;
     U32   rep[ZSTD_REP_NUM];
-    U32   savedRep[ZSTD_REP_NUM];
+    U32   repToConfirm[ZSTD_REP_NUM];
     U32   dictID;
     ZSTD_parameters params;
     void* workSpace;
@@ -101,7 +101,7 @@
     cctx = (ZSTD_CCtx*) ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
     if (!cctx) return NULL;
     memset(cctx, 0, sizeof(ZSTD_CCtx));
-    memcpy(&(cctx->customMem), &customMem, sizeof(customMem));
+    cctx->customMem = customMem;
     return cctx;
 }
 
@@ -119,6 +119,15 @@
     return sizeof(*cctx) + cctx->workSpaceSize;
 }
 
+size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value)
+{
+    switch(param)
+    {
+    case ZSTD_p_forceWindow : cctx->forceWindow = value>0; cctx->loadedDictEnd = 0; return 0;
+    default: return ERROR(parameter_unknown);
+    }
+}
+
 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx)   /* hidden interface */
 {
     return &(ctx->seqStore);
@@ -318,6 +327,14 @@
     }
 }
 
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ *        do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
+    int i;
+    for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = 0;
+}
 
 /*! ZSTD_copyCCtx() :
 *   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
@@ -735,12 +752,19 @@
       if ((size_t)(op-ostart) >= maxCSize) return 0; }
 
     /* confirm repcodes */
-    { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->savedRep[i]; }
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->repToConfirm[i]; }
 
     return op - ostart;
 }
 
 
+#if 0 /* for debug */
+#  define STORESEQ_DEBUG
+#include <stdio.h>   /* fprintf */
+U32 g_startDebug = 0;
+const BYTE* g_start = NULL;
+#endif
+
 /*! ZSTD_storeSeq() :
     Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
     `offsetCode` : distance to match, or 0 == repCode.
@@ -748,13 +772,14 @@
 */
 MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
 {
-#if 0  /* for debug */
-    static const BYTE* g_start = NULL;
-    const U32 pos = (U32)((const BYTE*)literals - g_start);
-    if (g_start==NULL) g_start = (const BYTE*)literals;
-    //if ((pos > 1) && (pos < 50000))
-        printf("Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
-               pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
+#ifdef STORESEQ_DEBUG
+    if (g_startDebug) {
+        const U32 pos = (U32)((const BYTE*)literals - g_start);
+        if (g_start==NULL) g_start = (const BYTE*)literals;
+        if ((pos > 1895000) && (pos < 1895300))
+            fprintf(stderr, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
+                   pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
+    }
 #endif
     /* copy Literals */
     ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
@@ -1004,8 +1029,8 @@
     }   }   }
 
     /* save reps for next block */
-    cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
-    cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
+    cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
+    cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -1119,7 +1144,7 @@
     }   }   }
 
     /* save reps for next block */
-    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+    ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -1273,8 +1298,8 @@
     }   }   }
 
     /* save reps for next block */
-    cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
-    cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
+    cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
+    cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -1423,7 +1448,7 @@
     }   }   }
 
     /* save reps for next block */
-    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+    ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -1955,8 +1980,8 @@
     }   }
 
     /* Save reps for next block */
-    ctx->savedRep[0] = offset_1 ? offset_1 : savedOffset;
-    ctx->savedRep[1] = offset_2 ? offset_2 : savedOffset;
+    ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
+    ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -2150,7 +2175,7 @@
     }   }
 
     /* Save reps for next block */
-    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+    ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -2409,12 +2434,14 @@
 
     cctx->nextSrc = ip + srcSize;
 
-    {   size_t const cSize = frame ?
+    if (srcSize) {
+        size_t const cSize = frame ?
                              ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
         if (ZSTD_isError(cSize)) return cSize;
         return cSize + fhSize;
-    }
+    } else
+        return fhSize;
 }
 
 
@@ -2450,7 +2477,7 @@
     zc->dictBase = zc->base;
     zc->base += ip - zc->nextSrc;
     zc->nextToUpdate = zc->dictLimit;
-    zc->loadedDictEnd = (U32)(iend - zc->base);
+    zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
 
     zc->nextSrc = iend;
     if (srcSize <= HASH_READ_SIZE) return 0;
@@ -2557,9 +2584,9 @@
     }
 
     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
-    cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
-    cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
-    cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+    cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] == 0 || cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+    cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] == 0 || cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+    cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] == 0 || cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
     dictPtr += 12;
 
     {   U32 offcodeMax = MaxOff;
@@ -2594,7 +2621,6 @@
     }
 }
 
-
 /*! ZSTD_compressBegin_internal() :
 *   @return : 0, or an error code */
 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
@@ -2626,9 +2652,9 @@
 }
 
 
-size_t ZSTD_compressBegin(ZSTD_CCtx* zc, int compressionLevel)
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
 {
-    return ZSTD_compressBegin_usingDict(zc, NULL, 0, compressionLevel);
+    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
 }
 
 
@@ -2733,7 +2759,8 @@
 /* =====  Dictionary API  ===== */
 
 struct ZSTD_CDict_s {
-    void* dictContent;
+    void* dictBuffer;
+    const void* dictContent;
     size_t dictContentSize;
     ZSTD_CCtx* refContext;
 };  /* typedef'd tp ZSTD_CDict within "zstd.h" */
@@ -2741,39 +2768,45 @@
 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
 {
     if (cdict==NULL) return 0;   /* support sizeof on NULL */
-    return ZSTD_sizeof_CCtx(cdict->refContext) + cdict->dictContentSize;
+    return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
 }
 
-ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_parameters params, ZSTD_customMem customMem)
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, unsigned byReference,
+                                      ZSTD_parameters params, ZSTD_customMem customMem)
 {
     if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
     if (!customMem.customAlloc || !customMem.customFree) return NULL;
 
     {   ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
-        void* const dictContent = ZSTD_malloc(dictSize, customMem);
         ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem);
 
-        if (!dictContent || !cdict || !cctx) {
-            ZSTD_free(dictContent, customMem);
+        if (!cdict || !cctx) {
             ZSTD_free(cdict, customMem);
             ZSTD_free(cctx, customMem);
             return NULL;
         }
 
-        if (dictSize) {
-            memcpy(dictContent, dict, dictSize);
+        if ((byReference) || (!dictBuffer) || (!dictSize)) {
+            cdict->dictBuffer = NULL;
+            cdict->dictContent = dictBuffer;
+        } else {
+            void* const internalBuffer = ZSTD_malloc(dictSize, customMem);
+            if (!internalBuffer) { ZSTD_free(cctx, customMem); ZSTD_free(cdict, customMem); return NULL; }
+            memcpy(internalBuffer, dictBuffer, dictSize);
+            cdict->dictBuffer = internalBuffer;
+            cdict->dictContent = internalBuffer;
         }
-        {   size_t const errorCode = ZSTD_compressBegin_advanced(cctx, dictContent, dictSize, params, 0);
+
+        {   size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
             if (ZSTD_isError(errorCode)) {
-                ZSTD_free(dictContent, customMem);
+                ZSTD_free(cdict->dictBuffer, customMem);
+                ZSTD_free(cctx, customMem);
                 ZSTD_free(cdict, customMem);
-                ZSTD_free(cctx, customMem);
                 return NULL;
         }   }
 
-        cdict->dictContent = dictContent;
+        cdict->refContext = cctx;
         cdict->dictContentSize = dictSize;
-        cdict->refContext = cctx;
         return cdict;
     }
 }
@@ -2783,7 +2816,15 @@
     ZSTD_customMem const allocator = { NULL, NULL, NULL };
     ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
     params.fParams.contentSizeFlag = 1;
-    return ZSTD_createCDict_advanced(dict, dictSize, params, allocator);
+    return ZSTD_createCDict_advanced(dict, dictSize, 0, params, allocator);
+}
+
+ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
+    params.fParams.contentSizeFlag = 1;
+    return ZSTD_createCDict_advanced(dict, dictSize, 1, params, allocator);
 }
 
 size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
@@ -2791,7 +2832,7 @@
     if (cdict==NULL) return 0;   /* support free on NULL */
     {   ZSTD_customMem const cMem = cdict->refContext->customMem;
         ZSTD_freeCCtx(cdict->refContext);
-        ZSTD_free(cdict->dictContent, cMem);
+        ZSTD_free(cdict->dictBuffer, cMem);
         ZSTD_free(cdict, cMem);
         return 0;
     }
@@ -2801,7 +2842,7 @@
     return ZSTD_getParamsFromCCtx(cdict->refContext);
 }
 
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, U64 pledgedSrcSize)
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize)
 {
     if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
     else CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, cdict->refContext->params, pledgedSrcSize));
@@ -2900,7 +2941,7 @@
 
 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
 {
-    if (zcs->inBuffSize==0) return ERROR(stage_wrong);   /* zcs has not been init at least once */
+    if (zcs->inBuffSize==0) return ERROR(stage_wrong);   /* zcs has not been init at least once => can't reset */
 
     if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
     else CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
@@ -2937,9 +2978,9 @@
         if (zcs->outBuff == NULL) return ERROR(memory_allocation);
     }
 
-    if (dict) {
+    if (dict && dictSize >= 8) {
         ZSTD_freeCDict(zcs->cdictLocal);
-        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, params, zcs->customMem);
+        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
         if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
         zcs->cdict = zcs->cdictLocal;
     } else zcs->cdict = NULL;
@@ -2956,6 +2997,7 @@
     ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
     size_t const initError =  ZSTD_initCStream_advanced(zcs, NULL, 0, params, 0);
     zcs->cdict = cdict;
+    zcs->cctx->dictID = params.fParams.noDictIDFlag ? 0 : cdict->refContext->dictID;
     return initError;
 }
 
@@ -2967,7 +3009,8 @@
 
 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize)
 {
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
+    if (pledgedSrcSize) params.fParams.contentSizeFlag = 1;
     return ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
 }
 
--- a/contrib/python-zstandard/zstd/compress/zstd_opt.h	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/compress/zstd_opt.h	Tue Feb 28 11:13:25 2017 -0800
@@ -38,7 +38,7 @@
 
     ssPtr->cachedLiterals = NULL;
     ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
-    ssPtr->staticPrices = 0; 
+    ssPtr->staticPrices = 0;
 
     if (ssPtr->litLengthSum == 0) {
         if (srcSize <= 1024) ssPtr->staticPrices = 1;
@@ -56,7 +56,7 @@
 
         for (u=0; u<=MaxLit; u++) {
             ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV);
-            ssPtr->litSum += ssPtr->litFreq[u]; 
+            ssPtr->litSum += ssPtr->litFreq[u];
         }
         for (u=0; u<=MaxLL; u++)
             ssPtr->litLengthFreq[u] = 1;
@@ -634,7 +634,7 @@
     }    }   /* for (cur=0; cur < last_pos; ) */
 
     /* Save reps for next block */
-    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; }
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -825,7 +825,7 @@
 
             match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
 
-            if (match_num > 0 && matches[match_num-1].len > sufficient_len) {
+            if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
                 best_mlen = matches[match_num-1].len;
                 best_off = matches[match_num-1].off;
                 last_pos = cur + 1;
@@ -835,7 +835,7 @@
             /* set prices using matches at position = cur */
             for (u = 0; u < match_num; u++) {
                 mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
-                best_mlen = (cur + matches[u].len < ZSTD_OPT_NUM) ? matches[u].len : ZSTD_OPT_NUM - cur;
+                best_mlen = matches[u].len;
 
                 while (mlen <= best_mlen) {
                     if (opt[cur].mlen == 1) {
@@ -907,7 +907,7 @@
     }    }   /* for (cur=0; cur < last_pos; ) */
 
     /* Save reps for next block */
-    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; }
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
 
     /* Last Literals */
     {   size_t lastLLSize = iend - anchor;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.c	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,740 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+/* ======   Tuning parameters   ====== */
+#define ZSTDMT_NBTHREADS_MAX 128
+
+
+/* ======   Compiler specifics   ====== */
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+/* ======   Dependencies   ====== */
+#include <stdlib.h>   /* malloc */
+#include <string.h>   /* memcpy */
+#include "pool.h"     /* threadpool */
+#include "threading.h"  /* mutex */
+#include "zstd_internal.h"   /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
+#include "zstdmt_compress.h"
+#define XXH_STATIC_LINKING_ONLY   /* XXH64_state_t */
+#include "xxhash.h"
+
+
+/* ======   Debug   ====== */
+#if 0
+
+#  include <stdio.h>
+#  include <unistd.h>
+#  include <sys/times.h>
+   static unsigned g_debugLevel = 3;
+#  define DEBUGLOGRAW(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __VA_ARGS__); }
+#  define DEBUGLOG(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __FILE__ ": "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " \n"); }
+
+#  define DEBUG_PRINTHEX(l,p,n) { \
+    unsigned debug_u;                   \
+    for (debug_u=0; debug_u<(n); debug_u++)           \
+        DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
+    DEBUGLOGRAW(l, " \n");       \
+}
+
+static unsigned long long GetCurrentClockTimeMicroseconds()
+{
+   static clock_t _ticksPerSecond = 0;
+   if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
+
+   struct tms junk; clock_t newTicks = (clock_t) times(&junk);
+   return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
+}
+
+#define MUTEX_WAIT_TIME_DLEVEL 5
+#define PTHREAD_MUTEX_LOCK(mutex) \
+if (g_debugLevel>=MUTEX_WAIT_TIME_DLEVEL) { \
+   unsigned long long beforeTime = GetCurrentClockTimeMicroseconds(); \
+   pthread_mutex_lock(mutex); \
+   unsigned long long afterTime = GetCurrentClockTimeMicroseconds(); \
+   unsigned long long elapsedTime = (afterTime-beforeTime); \
+   if (elapsedTime > 1000) {  /* or whatever threshold you like; I'm using 1 millisecond here */ \
+      DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
+               elapsedTime, #mutex); \
+  } \
+} else pthread_mutex_lock(mutex);
+
+#else
+
+#  define DEBUGLOG(l, ...)      {}    /* disabled */
+#  define PTHREAD_MUTEX_LOCK(m) pthread_mutex_lock(m)
+#  define DEBUG_PRINTHEX(l,p,n) {}
+
+#endif
+
+
+/* =====   Buffer Pool   ===== */
+
+typedef struct buffer_s {
+    void* start;
+    size_t size;
+} buffer_t;
+
+static const buffer_t g_nullBuffer = { NULL, 0 };
+
+typedef struct ZSTDMT_bufferPool_s {
+    unsigned totalBuffers;
+    unsigned nbBuffers;
+    buffer_t bTable[1];   /* variable size */
+} ZSTDMT_bufferPool;
+
+static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads)
+{
+    unsigned const maxNbBuffers = 2*nbThreads + 2;
+    ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)calloc(1, sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t));
+    if (bufPool==NULL) return NULL;
+    bufPool->totalBuffers = maxNbBuffers;
+    bufPool->nbBuffers = 0;
+    return bufPool;
+}
+
+static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
+{
+    unsigned u;
+    if (!bufPool) return;   /* compatibility with free on NULL */
+    for (u=0; u<bufPool->totalBuffers; u++)
+        free(bufPool->bTable[u].start);
+    free(bufPool);
+}
+
+/* assumption : invocation from main thread only ! */
+static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* pool, size_t bSize)
+{
+    if (pool->nbBuffers) {   /* try to use an existing buffer */
+        buffer_t const buf = pool->bTable[--(pool->nbBuffers)];
+        size_t const availBufferSize = buf.size;
+        if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize))   /* large enough, but not too much */
+            return buf;
+        free(buf.start);   /* size conditions not respected : scratch this buffer and create a new one */
+    }
+    /* create new buffer */
+    {   buffer_t buffer;
+        void* const start = malloc(bSize);
+        if (start==NULL) bSize = 0;
+        buffer.start = start;   /* note : start can be NULL if malloc fails ! */
+        buffer.size = bSize;
+        return buffer;
+    }
+}
+
+/* store buffer for later re-use, up to pool capacity */
+static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* pool, buffer_t buf)
+{
+    if (buf.start == NULL) return;   /* release on NULL */
+    if (pool->nbBuffers < pool->totalBuffers) {
+        pool->bTable[pool->nbBuffers++] = buf;   /* store for later re-use */
+        return;
+    }
+    /* Reached bufferPool capacity (should not happen) */
+    free(buf.start);
+}
+
+
+/* =====   CCtx Pool   ===== */
+
+typedef struct {
+    unsigned totalCCtx;
+    unsigned availCCtx;
+    ZSTD_CCtx* cctx[1];   /* variable size */
+} ZSTDMT_CCtxPool;
+
+/* assumption : CCtxPool invocation only from main thread */
+
+/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
+static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
+{
+    unsigned u;
+    for (u=0; u<pool->totalCCtx; u++)
+        ZSTD_freeCCtx(pool->cctx[u]);  /* note : compatible with free on NULL */
+    free(pool);
+}
+
+/* ZSTDMT_createCCtxPool() :
+ * implies nbThreads >= 1 , checked by caller ZSTDMT_createCCtx() */
+static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads)
+{
+    ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) calloc(1, sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*));
+    if (!cctxPool) return NULL;
+    cctxPool->totalCCtx = nbThreads;
+    cctxPool->availCCtx = 1;   /* at least one cctx for single-thread mode */
+    cctxPool->cctx[0] = ZSTD_createCCtx();
+    if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
+    DEBUGLOG(1, "cctxPool created, with %u threads", nbThreads);
+    return cctxPool;
+}
+
+static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* pool)
+{
+    if (pool->availCCtx) {
+        pool->availCCtx--;
+        return pool->cctx[pool->availCCtx];
+    }
+    return ZSTD_createCCtx();   /* note : can be NULL, when creation fails ! */
+}
+
+static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return;   /* compatibility with release on NULL */
+    if (pool->availCCtx < pool->totalCCtx)
+        pool->cctx[pool->availCCtx++] = cctx;
+    else
+        /* pool overflow : should not happen, since totalCCtx==nbThreads */
+        ZSTD_freeCCtx(cctx);
+}
+
+
+/* =====   Thread worker   ===== */
+
+typedef struct {
+    buffer_t buffer;
+    size_t filled;
+} inBuff_t;
+
+typedef struct {
+    ZSTD_CCtx* cctx;
+    buffer_t src;
+    const void* srcStart;
+    size_t   srcSize;
+    size_t   dictSize;
+    buffer_t dstBuff;
+    size_t   cSize;
+    size_t   dstFlushed;
+    unsigned firstChunk;
+    unsigned lastChunk;
+    unsigned jobCompleted;
+    unsigned jobScanned;
+    pthread_mutex_t* jobCompleted_mutex;
+    pthread_cond_t* jobCompleted_cond;
+    ZSTD_parameters params;
+    ZSTD_CDict* cdict;
+    unsigned long long fullFrameSize;
+} ZSTDMT_jobDescription;
+
+/* ZSTDMT_compressChunk() : POOL_function type */
+void ZSTDMT_compressChunk(void* jobDescription)
+{
+    ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
+    const void* const src = (const char*)job->srcStart + job->dictSize;
+    buffer_t const dstBuff = job->dstBuff;
+    DEBUGLOG(3, "job (first:%u) (last:%u) : dictSize %u, srcSize %u", job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize);
+    if (job->cdict) {
+        size_t const initError = ZSTD_compressBegin_usingCDict(job->cctx, job->cdict, job->fullFrameSize);
+        if (job->cdict) DEBUGLOG(3, "using CDict ");
+        if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
+    } else {
+        size_t const initError = ZSTD_compressBegin_advanced(job->cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize);
+        if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
+        ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceWindow, 1);
+    }
+    if (!job->firstChunk) {  /* flush frame header */
+        size_t const hSize = ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, 0);
+        if (ZSTD_isError(hSize)) { job->cSize = hSize; goto _endJob; }
+        ZSTD_invalidateRepCodes(job->cctx);
+    }
+
+    DEBUGLOG(4, "Compressing : ");
+    DEBUG_PRINTHEX(4, job->srcStart, 12);
+    job->cSize = (job->lastChunk) ?   /* last chunk signal */
+                 ZSTD_compressEnd     (job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
+                 ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
+    DEBUGLOG(3, "compressed %u bytes into %u bytes   (first:%u) (last:%u)", (unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk);
+
+_endJob:
+    PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex);
+    job->jobCompleted = 1;
+    job->jobScanned = 0;
+    pthread_cond_signal(job->jobCompleted_cond);
+    pthread_mutex_unlock(job->jobCompleted_mutex);
+}
+
+
+/* ------------------------------------------ */
+/* =====   Multi-threaded compression   ===== */
+/* ------------------------------------------ */
+
+struct ZSTDMT_CCtx_s {
+    POOL_ctx* factory;
+    ZSTDMT_bufferPool* buffPool;
+    ZSTDMT_CCtxPool* cctxPool;
+    pthread_mutex_t jobCompleted_mutex;
+    pthread_cond_t jobCompleted_cond;
+    size_t targetSectionSize;
+    size_t marginSize;
+    size_t inBuffSize;
+    size_t dictSize;
+    size_t targetDictSize;
+    inBuff_t inBuff;
+    ZSTD_parameters params;
+    XXH64_state_t xxhState;
+    unsigned nbThreads;
+    unsigned jobIDMask;
+    unsigned doneJobID;
+    unsigned nextJobID;
+    unsigned frameEnded;
+    unsigned allJobsCompleted;
+    unsigned overlapRLog;
+    unsigned long long frameContentSize;
+    size_t sectionSize;
+    ZSTD_CDict* cdict;
+    ZSTD_CStream* cstream;
+    ZSTDMT_jobDescription jobs[1];   /* variable size (must lies at the end) */
+};
+
+ZSTDMT_CCtx *ZSTDMT_createCCtx(unsigned nbThreads)
+{
+    ZSTDMT_CCtx* cctx;
+    U32 const minNbJobs = nbThreads + 2;
+    U32 const nbJobsLog2 = ZSTD_highbit32(minNbJobs) + 1;
+    U32 const nbJobs = 1 << nbJobsLog2;
+    DEBUGLOG(5, "nbThreads : %u  ; minNbJobs : %u ;  nbJobsLog2 : %u ;  nbJobs : %u  \n",
+            nbThreads, minNbJobs, nbJobsLog2, nbJobs);
+    if ((nbThreads < 1) | (nbThreads > ZSTDMT_NBTHREADS_MAX)) return NULL;
+    cctx = (ZSTDMT_CCtx*) calloc(1, sizeof(ZSTDMT_CCtx) + nbJobs*sizeof(ZSTDMT_jobDescription));
+    if (!cctx) return NULL;
+    cctx->nbThreads = nbThreads;
+    cctx->jobIDMask = nbJobs - 1;
+    cctx->allJobsCompleted = 1;
+    cctx->sectionSize = 0;
+    cctx->overlapRLog = 3;
+    cctx->factory = POOL_create(nbThreads, 1);
+    cctx->buffPool = ZSTDMT_createBufferPool(nbThreads);
+    cctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads);
+    if (!cctx->factory | !cctx->buffPool | !cctx->cctxPool) {  /* one object was not created */
+        ZSTDMT_freeCCtx(cctx);
+        return NULL;
+    }
+    if (nbThreads==1) {
+        cctx->cstream = ZSTD_createCStream();
+        if (!cctx->cstream) {
+            ZSTDMT_freeCCtx(cctx); return NULL;
+    }   }
+    pthread_mutex_init(&cctx->jobCompleted_mutex, NULL);   /* Todo : check init function return */
+    pthread_cond_init(&cctx->jobCompleted_cond, NULL);
+    DEBUGLOG(4, "mt_cctx created, for %u threads \n", nbThreads);
+    return cctx;
+}
+
+/* ZSTDMT_releaseAllJobResources() :
+ * Ensure all workers are killed first. */
+static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
+{
+    unsigned jobID;
+    for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
+        ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].dstBuff);
+        mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+        ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].src);
+        mtctx->jobs[jobID].src = g_nullBuffer;
+        ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[jobID].cctx);
+        mtctx->jobs[jobID].cctx = NULL;
+    }
+    memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
+    ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer);
+    mtctx->inBuff.buffer = g_nullBuffer;
+    mtctx->allJobsCompleted = 1;
+}
+
+size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
+{
+    if (mtctx==NULL) return 0;   /* compatible with free on NULL */
+    POOL_free(mtctx->factory);
+    if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */
+    ZSTDMT_freeBufferPool(mtctx->buffPool);  /* release job resources into pools first */
+    ZSTDMT_freeCCtxPool(mtctx->cctxPool);
+    ZSTD_freeCDict(mtctx->cdict);
+    ZSTD_freeCStream(mtctx->cstream);
+    pthread_mutex_destroy(&mtctx->jobCompleted_mutex);
+    pthread_cond_destroy(&mtctx->jobCompleted_cond);
+    free(mtctx);
+    return 0;
+}
+
+size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value)
+{
+    switch(parameter)
+    {
+    case ZSTDMT_p_sectionSize :
+        mtctx->sectionSize = value;
+        return 0;
+    case ZSTDMT_p_overlapSectionLog :
+    DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value);
+        mtctx->overlapRLog = (value >= 9) ? 0 : 9 - value;
+        return 0;
+    default :
+        return ERROR(compressionParameter_unsupported);
+    }
+}
+
+
+/* ------------------------------------------ */
+/* =====   Multi-threaded compression   ===== */
+/* ------------------------------------------ */
+
+size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+                           void* dst, size_t dstCapacity,
+                     const void* src, size_t srcSize,
+                           int compressionLevel)
+{
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
+    size_t const chunkTargetSize = (size_t)1 << (params.cParams.windowLog + 2);
+    unsigned const nbChunksMax = (unsigned)(srcSize / chunkTargetSize) + (srcSize < chunkTargetSize) /* min 1 */;
+    unsigned nbChunks = MIN(nbChunksMax, mtctx->nbThreads);
+    size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks;
+    size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0xFFFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize;   /* avoid too small last block */
+    size_t remainingSrcSize = srcSize;
+    const char* const srcStart = (const char*)src;
+    size_t frameStartPos = 0;
+
+    DEBUGLOG(3, "windowLog : %2u => chunkTargetSize : %u bytes  ", params.cParams.windowLog, (U32)chunkTargetSize);
+    DEBUGLOG(2, "nbChunks  : %2u   (chunkSize : %u bytes)   ", nbChunks, (U32)avgChunkSize);
+    params.fParams.contentSizeFlag = 1;
+
+    if (nbChunks==1) {   /* fallback to single-thread mode */
+        ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
+        return ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
+    }
+
+    {   unsigned u;
+        for (u=0; u<nbChunks; u++) {
+            size_t const chunkSize = MIN(remainingSrcSize, avgChunkSize);
+            size_t const dstBufferCapacity = u ? ZSTD_compressBound(chunkSize) : dstCapacity;
+            buffer_t const dstAsBuffer = { dst, dstCapacity };
+            buffer_t const dstBuffer = u ? ZSTDMT_getBuffer(mtctx->buffPool, dstBufferCapacity) : dstAsBuffer;
+            ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(mtctx->cctxPool);
+
+            if ((cctx==NULL) || (dstBuffer.start==NULL)) {
+                mtctx->jobs[u].cSize = ERROR(memory_allocation);   /* job result */
+                mtctx->jobs[u].jobCompleted = 1;
+                nbChunks = u+1;
+                break;   /* let's wait for previous jobs to complete, but don't start new ones */
+            }
+
+            mtctx->jobs[u].srcStart = srcStart + frameStartPos;
+            mtctx->jobs[u].srcSize = chunkSize;
+            mtctx->jobs[u].fullFrameSize = srcSize;
+            mtctx->jobs[u].params = params;
+            mtctx->jobs[u].dstBuff = dstBuffer;
+            mtctx->jobs[u].cctx = cctx;
+            mtctx->jobs[u].firstChunk = (u==0);
+            mtctx->jobs[u].lastChunk = (u==nbChunks-1);
+            mtctx->jobs[u].jobCompleted = 0;
+            mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex;
+            mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond;
+
+            DEBUGLOG(3, "posting job %u   (%u bytes)", u, (U32)chunkSize);
+            DEBUG_PRINTHEX(3, mtctx->jobs[u].srcStart, 12);
+            POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]);
+
+            frameStartPos += chunkSize;
+            remainingSrcSize -= chunkSize;
+    }   }
+    /* note : since nbChunks <= nbThreads, all jobs should be running immediately in parallel */
+
+    {   unsigned chunkID;
+        size_t error = 0, dstPos = 0;
+        for (chunkID=0; chunkID<nbChunks; chunkID++) {
+            DEBUGLOG(3, "waiting for chunk %u ", chunkID);
+            PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex);
+            while (mtctx->jobs[chunkID].jobCompleted==0) {
+                DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", chunkID);
+                pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex);
+            }
+            pthread_mutex_unlock(&mtctx->jobCompleted_mutex);
+            DEBUGLOG(3, "ready to write chunk %u ", chunkID);
+
+            ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[chunkID].cctx);
+            mtctx->jobs[chunkID].cctx = NULL;
+            mtctx->jobs[chunkID].srcStart = NULL;
+            {   size_t const cSize = mtctx->jobs[chunkID].cSize;
+                if (ZSTD_isError(cSize)) error = cSize;
+                if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
+                if (chunkID) {   /* note : chunk 0 is already written directly into dst */
+                    if (!error) memcpy((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize);
+                    ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff);
+                    mtctx->jobs[chunkID].dstBuff = g_nullBuffer;
+                }
+                dstPos += cSize ;
+            }
+        }
+        if (!error) DEBUGLOG(3, "compressed size : %u  ", (U32)dstPos);
+        return error ? error : dstPos;
+    }
+
+}
+
+
+/* ====================================== */
+/* =======      Streaming API     ======= */
+/* ====================================== */
+
+static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs) {
+    while (zcs->doneJobID < zcs->nextJobID) {
+        unsigned const jobID = zcs->doneJobID & zcs->jobIDMask;
+        PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
+        while (zcs->jobs[jobID].jobCompleted==0) {
+            DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID);   /* we want to block when waiting for data to flush */
+            pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex);
+        }
+        pthread_mutex_unlock(&zcs->jobCompleted_mutex);
+        zcs->doneJobID++;
+    }
+}
+
+
+static size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
+                                    const void* dict, size_t dictSize, unsigned updateDict,
+                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    ZSTD_customMem const cmem = { NULL, NULL, NULL };
+    DEBUGLOG(3, "Started new compression, with windowLog : %u", params.cParams.windowLog);
+    if (zcs->nbThreads==1) return ZSTD_initCStream_advanced(zcs->cstream, dict, dictSize, params, pledgedSrcSize);
+    if (zcs->allJobsCompleted == 0) {   /* previous job not correctly finished */
+        ZSTDMT_waitForAllJobsCompleted(zcs);
+        ZSTDMT_releaseAllJobResources(zcs);
+        zcs->allJobsCompleted = 1;
+    }
+    zcs->params = params;
+    if (updateDict) {
+        ZSTD_freeCDict(zcs->cdict); zcs->cdict = NULL;
+        if (dict && dictSize) {
+            zcs->cdict = ZSTD_createCDict_advanced(dict, dictSize, 0, params, cmem);
+            if (zcs->cdict == NULL) return ERROR(memory_allocation);
+    }   }
+    zcs->frameContentSize = pledgedSrcSize;
+    zcs->targetDictSize = (zcs->overlapRLog>=9) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - zcs->overlapRLog);
+    DEBUGLOG(4, "overlapRLog : %u ", zcs->overlapRLog);
+    DEBUGLOG(3, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10));
+    zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2);
+    zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize);
+    zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize);
+    DEBUGLOG(3, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10));
+    zcs->marginSize = zcs->targetSectionSize >> 2;
+    zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize + zcs->marginSize;
+    zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
+    if (zcs->inBuff.buffer.start == NULL) return ERROR(memory_allocation);
+    zcs->inBuff.filled = 0;
+    zcs->dictSize = 0;
+    zcs->doneJobID = 0;
+    zcs->nextJobID = 0;
+    zcs->frameEnded = 0;
+    zcs->allJobsCompleted = 0;
+    if (params.fParams.checksumFlag) XXH64_reset(&zcs->xxhState, 0);
+    return 0;
+}
+
+size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* zcs,
+                                const void* dict, size_t dictSize,
+                                ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    return ZSTDMT_initCStream_internal(zcs, dict, dictSize, 1, params, pledgedSrcSize);
+}
+
+/* ZSTDMT_resetCStream() :
+ * pledgedSrcSize is optional and can be zero == unknown */
+size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* zcs, unsigned long long pledgedSrcSize)
+{
+    if (zcs->nbThreads==1) return ZSTD_resetCStream(zcs->cstream, pledgedSrcSize);
+    return ZSTDMT_initCStream_internal(zcs, NULL, 0, 0, zcs->params, pledgedSrcSize);
+}
+
+size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) {
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0);
+    return ZSTDMT_initCStream_internal(zcs, NULL, 0, 1, params, 0);
+}
+
+
+static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsigned endFrame)
+{
+    size_t const dstBufferCapacity = ZSTD_compressBound(srcSize);
+    buffer_t const dstBuffer = ZSTDMT_getBuffer(zcs->buffPool, dstBufferCapacity);
+    ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(zcs->cctxPool);
+    unsigned const jobID = zcs->nextJobID & zcs->jobIDMask;
+
+    if ((cctx==NULL) || (dstBuffer.start==NULL)) {
+        zcs->jobs[jobID].jobCompleted = 1;
+        zcs->nextJobID++;
+        ZSTDMT_waitForAllJobsCompleted(zcs);
+        ZSTDMT_releaseAllJobResources(zcs);
+        return ERROR(memory_allocation);
+    }
+
+    DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ", zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize);
+    zcs->jobs[jobID].src = zcs->inBuff.buffer;
+    zcs->jobs[jobID].srcStart = zcs->inBuff.buffer.start;
+    zcs->jobs[jobID].srcSize = srcSize;
+    zcs->jobs[jobID].dictSize = zcs->dictSize;   /* note : zcs->inBuff.filled is presumed >= srcSize + dictSize */
+    zcs->jobs[jobID].params = zcs->params;
+    if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0;  /* do not calculate checksum within sections, just keep it in header for first section */
+    zcs->jobs[jobID].cdict = zcs->nextJobID==0 ? zcs->cdict : NULL;
+    zcs->jobs[jobID].fullFrameSize = zcs->frameContentSize;
+    zcs->jobs[jobID].dstBuff = dstBuffer;
+    zcs->jobs[jobID].cctx = cctx;
+    zcs->jobs[jobID].firstChunk = (zcs->nextJobID==0);
+    zcs->jobs[jobID].lastChunk = endFrame;
+    zcs->jobs[jobID].jobCompleted = 0;
+    zcs->jobs[jobID].dstFlushed = 0;
+    zcs->jobs[jobID].jobCompleted_mutex = &zcs->jobCompleted_mutex;
+    zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond;
+
+    /* get a new buffer for next input */
+    if (!endFrame) {
+        size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize);
+        zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
+        if (zcs->inBuff.buffer.start == NULL) {   /* not enough memory to allocate next input buffer */
+            zcs->jobs[jobID].jobCompleted = 1;
+            zcs->nextJobID++;
+            ZSTDMT_waitForAllJobsCompleted(zcs);
+            ZSTDMT_releaseAllJobResources(zcs);
+            return ERROR(memory_allocation);
+        }
+        DEBUGLOG(5, "inBuff filled to %u", (U32)zcs->inBuff.filled);
+        zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize;
+        DEBUGLOG(5, "new job : filled to %u, with %u dict and %u src", (U32)zcs->inBuff.filled, (U32)newDictSize, (U32)(zcs->inBuff.filled - newDictSize));
+        memmove(zcs->inBuff.buffer.start, (const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize, zcs->inBuff.filled);
+        DEBUGLOG(5, "new inBuff pre-filled");
+        zcs->dictSize = newDictSize;
+    } else {
+        zcs->inBuff.buffer = g_nullBuffer;
+        zcs->inBuff.filled = 0;
+        zcs->dictSize = 0;
+        zcs->frameEnded = 1;
+        if (zcs->nextJobID == 0)
+            zcs->params.fParams.checksumFlag = 0;   /* single chunk : checksum is calculated directly within worker thread */
+    }
+
+    DEBUGLOG(3, "posting job %u : %u bytes  (end:%u) (note : doneJob = %u=>%u)", zcs->nextJobID, (U32)zcs->jobs[jobID].srcSize, zcs->jobs[jobID].lastChunk, zcs->doneJobID, zcs->doneJobID & zcs->jobIDMask);
+    POOL_add(zcs->factory, ZSTDMT_compressChunk, &zcs->jobs[jobID]);   /* this call is blocking when thread worker pool is exhausted */
+    zcs->nextJobID++;
+    return 0;
+}
+
+
+/* ZSTDMT_flushNextJob() :
+ * output : will be updated with amount of data flushed .
+ * blockToFlush : if >0, the function will block and wait if there is no data available to flush .
+ * @return : amount of data remaining within internal buffer, 1 if unknown but > 0, 0 if no more, or an error code */
+static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned blockToFlush)
+{
+    unsigned const wJobID = zcs->doneJobID & zcs->jobIDMask;
+    if (zcs->doneJobID == zcs->nextJobID) return 0;   /* all flushed ! */
+    PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
+    while (zcs->jobs[wJobID].jobCompleted==0) {
+        DEBUGLOG(5, "waiting for jobCompleted signal from job %u", zcs->doneJobID);
+        if (!blockToFlush) { pthread_mutex_unlock(&zcs->jobCompleted_mutex); return 0; }  /* nothing ready to be flushed => skip */
+        pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex);  /* block when nothing available to flush */
+    }
+    pthread_mutex_unlock(&zcs->jobCompleted_mutex);
+    /* compression job completed : output can be flushed */
+    {   ZSTDMT_jobDescription job = zcs->jobs[wJobID];
+        if (!job.jobScanned) {
+            if (ZSTD_isError(job.cSize)) {
+                DEBUGLOG(5, "compression error detected ");
+                ZSTDMT_waitForAllJobsCompleted(zcs);
+                ZSTDMT_releaseAllJobResources(zcs);
+                return job.cSize;
+            }
+            ZSTDMT_releaseCCtx(zcs->cctxPool, job.cctx);
+            zcs->jobs[wJobID].cctx = NULL;
+            DEBUGLOG(5, "zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag);
+            if (zcs->params.fParams.checksumFlag) {
+                XXH64_update(&zcs->xxhState, (const char*)job.srcStart + job.dictSize, job.srcSize);
+                if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) {  /* write checksum at end of last section */
+                    U32 const checksum = (U32)XXH64_digest(&zcs->xxhState);
+                    DEBUGLOG(4, "writing checksum : %08X \n", checksum);
+                    MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum);
+                    job.cSize += 4;
+                    zcs->jobs[wJobID].cSize += 4;
+            }   }
+            ZSTDMT_releaseBuffer(zcs->buffPool, job.src);
+            zcs->jobs[wJobID].srcStart = NULL;
+            zcs->jobs[wJobID].src = g_nullBuffer;
+            zcs->jobs[wJobID].jobScanned = 1;
+        }
+        {   size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos);
+            DEBUGLOG(4, "Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID);
+            memcpy((char*)output->dst + output->pos, (const char*)job.dstBuff.start + job.dstFlushed, toWrite);
+            output->pos += toWrite;
+            job.dstFlushed += toWrite;
+        }
+        if (job.dstFlushed == job.cSize) {   /* output buffer fully flushed => move to next one */
+            ZSTDMT_releaseBuffer(zcs->buffPool, job.dstBuff);
+            zcs->jobs[wJobID].dstBuff = g_nullBuffer;
+            zcs->jobs[wJobID].jobCompleted = 0;
+            zcs->doneJobID++;
+        } else {
+            zcs->jobs[wJobID].dstFlushed = job.dstFlushed;
+        }
+        /* return value : how many bytes left in buffer ; fake it to 1 if unknown but >0 */
+        if (job.cSize > job.dstFlushed) return (job.cSize - job.dstFlushed);
+        if (zcs->doneJobID < zcs->nextJobID) return 1;   /* still some buffer to flush */
+        zcs->allJobsCompleted = zcs->frameEnded;   /* frame completed and entirely flushed */
+        return 0;   /* everything flushed */
+}   }
+
+
+size_t ZSTDMT_compressStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    size_t const newJobThreshold = zcs->dictSize + zcs->targetSectionSize + zcs->marginSize;
+    if (zcs->frameEnded) return ERROR(stage_wrong);   /* current frame being ended. Only flush is allowed. Restart with init */
+    if (zcs->nbThreads==1) return ZSTD_compressStream(zcs->cstream, output, input);
+
+    /* fill input buffer */
+    {   size_t const toLoad = MIN(input->size - input->pos, zcs->inBuffSize - zcs->inBuff.filled);
+        memcpy((char*)zcs->inBuff.buffer.start + zcs->inBuff.filled, input->src, toLoad);
+        input->pos += toLoad;
+        zcs->inBuff.filled += toLoad;
+    }
+
+    if ( (zcs->inBuff.filled >= newJobThreshold)  /* filled enough : let's compress */
+        && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) {   /* avoid overwriting job round buffer */
+        CHECK_F( ZSTDMT_createCompressionJob(zcs, zcs->targetSectionSize, 0) );
+    }
+
+    /* check for data to flush */
+    CHECK_F( ZSTDMT_flushNextJob(zcs, output, (zcs->inBuff.filled == zcs->inBuffSize)) ); /* block if it wasn't possible to create new job due to saturation */
+
+    /* recommended next input size : fill current input buffer */
+    return zcs->inBuffSize - zcs->inBuff.filled;   /* note : could be zero when input buffer is fully filled and no more availability to create new job */
+}
+
+
+static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned endFrame)
+{
+    size_t const srcSize = zcs->inBuff.filled - zcs->dictSize;
+
+    if (srcSize) DEBUGLOG(4, "flushing : %u bytes left to compress", (U32)srcSize);
+    if ( ((srcSize > 0) || (endFrame && !zcs->frameEnded))
+       && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) {
+        CHECK_F( ZSTDMT_createCompressionJob(zcs, srcSize, endFrame) );
+    }
+
+    /* check if there is any data available to flush */
+    DEBUGLOG(5, "zcs->doneJobID : %u  ; zcs->nextJobID : %u ", zcs->doneJobID, zcs->nextJobID);
+    return ZSTDMT_flushNextJob(zcs, output, 1);
+}
+
+
+size_t ZSTDMT_flushStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
+{
+    if (zcs->nbThreads==1) return ZSTD_flushStream(zcs->cstream, output);
+    return ZSTDMT_flushStream_internal(zcs, output, 0);
+}
+
+size_t ZSTDMT_endStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
+{
+    if (zcs->nbThreads==1) return ZSTD_endStream(zcs->cstream, output);
+    return ZSTDMT_flushStream_internal(zcs, output, 1);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.h	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,78 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+ #ifndef ZSTDMT_COMPRESS_H
+ #define ZSTDMT_COMPRESS_H
+
+ #if defined (__cplusplus)
+ extern "C" {
+ #endif
+
+
+/* Note : All prototypes defined in this file shall be considered experimental.
+ *        There is no guarantee of API continuity (yet) on any of these prototypes */
+
+/* ===   Dependencies   === */
+#include <stddef.h>   /* size_t */
+#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters */
+#include "zstd.h"     /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
+
+
+/* ===   Simple one-pass functions   === */
+
+typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
+ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads);
+ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* cctx);
+
+ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* cctx,
+                           void* dst, size_t dstCapacity,
+                     const void* src, size_t srcSize,
+                           int compressionLevel);
+
+
+/* ===   Streaming functions   === */
+
+ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);    /**< pledgedSrcSize is optional and can be zero == unknown */
+
+ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+
+
+/* ===   Advanced functions and parameters  === */
+
+#ifndef ZSTDMT_SECTION_SIZE_MIN
+#  define ZSTDMT_SECTION_SIZE_MIN (1U << 20)   /* 1 MB - Minimum size of each compression job */
+#endif
+
+ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize,  /**< dict can be released after init, a local copy is preserved within zcs */
+                                          ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be zero == unknown */
+
+/* ZSDTMT_parameter :
+ * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
+typedef enum {
+    ZSTDMT_p_sectionSize,        /* size of input "section". Each section is compressed in parallel. 0 means default, which is dynamically determined within compression functions */
+    ZSTDMT_p_overlapSectionLog   /* Log of overlapped section; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */
+} ZSDTMT_parameter;
+
+/* ZSTDMT_setMTCtxParameter() :
+ * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
+ * The function must be called typically after ZSTD_createCCtx().
+ * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* ZSTDMT_COMPRESS_H */
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Tue Feb 28 11:13:25 2017 -0800
@@ -1444,7 +1444,7 @@
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
     if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, dict, dictSize);
 #endif
-    ZSTD_decompressBegin_usingDict(dctx, dict, dictSize);
+    CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
     ZSTD_checkContinuity(dctx, dst);
     return ZSTD_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
 }
@@ -1671,9 +1671,9 @@
     }
 
     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
-    dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
-    dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
-    dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
     dictPtr += 12;
 
     dctx->litEntropy = dctx->fseEntropy = 1;
@@ -1713,39 +1713,44 @@
 /* ======   ZSTD_DDict   ====== */
 
 struct ZSTD_DDict_s {
-    void* dict;
+    void* dictBuffer;
+    const void* dictContent;
     size_t dictSize;
     ZSTD_DCtx* refContext;
 };  /* typedef'd to ZSTD_DDict within "zstd.h" */
 
-ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_customMem customMem)
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
 {
     if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
     if (!customMem.customAlloc || !customMem.customFree) return NULL;
 
     {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
-        void* const dictContent = ZSTD_malloc(dictSize, customMem);
         ZSTD_DCtx* const dctx = ZSTD_createDCtx_advanced(customMem);
 
-        if (!dictContent || !ddict || !dctx) {
-            ZSTD_free(dictContent, customMem);
+        if (!ddict || !dctx) {
             ZSTD_free(ddict, customMem);
             ZSTD_free(dctx, customMem);
             return NULL;
         }
 
-        if (dictSize) {
-            memcpy(dictContent, dict, dictSize);
+        if ((byReference) || (!dict) || (!dictSize)) {
+            ddict->dictBuffer = NULL;
+            ddict->dictContent = dict;
+        } else {
+            void* const internalBuffer = ZSTD_malloc(dictSize, customMem);
+            if (!internalBuffer) { ZSTD_free(dctx, customMem); ZSTD_free(ddict, customMem); return NULL; }
+            memcpy(internalBuffer, dict, dictSize);
+            ddict->dictBuffer = internalBuffer;
+            ddict->dictContent = internalBuffer;
         }
-        {   size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, dictContent, dictSize);
+        {   size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, ddict->dictContent, dictSize);
             if (ZSTD_isError(errorCode)) {
-                ZSTD_free(dictContent, customMem);
+                ZSTD_free(ddict->dictBuffer, customMem);
                 ZSTD_free(ddict, customMem);
                 ZSTD_free(dctx, customMem);
                 return NULL;
         }   }
 
-        ddict->dict = dictContent;
         ddict->dictSize = dictSize;
         ddict->refContext = dctx;
         return ddict;
@@ -1758,15 +1763,27 @@
 ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
 {
     ZSTD_customMem const allocator = { NULL, NULL, NULL };
-    return ZSTD_createDDict_advanced(dict, dictSize, allocator);
+    return ZSTD_createDDict_advanced(dict, dictSize, 0, allocator);
 }
 
+
+/*! ZSTD_createDDict_byReference() :
+ *  Create a digested dictionary, ready to start decompression operation without startup delay.
+ *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
+ *  It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    return ZSTD_createDDict_advanced(dictBuffer, dictSize, 1, allocator);
+}
+
+
 size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
 {
     if (ddict==NULL) return 0;   /* support free on NULL */
     {   ZSTD_customMem const cMem = ddict->refContext->customMem;
         ZSTD_freeDCtx(ddict->refContext);
-        ZSTD_free(ddict->dict, cMem);
+        ZSTD_free(ddict->dictBuffer, cMem);
         ZSTD_free(ddict, cMem);
         return 0;
     }
@@ -1775,7 +1792,7 @@
 size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
 {
     if (ddict==NULL) return 0;   /* support sizeof on NULL */
-    return sizeof(*ddict) + sizeof(ddict->refContext) + ddict->dictSize;
+    return sizeof(*ddict) + ZSTD_sizeof_DCtx(ddict->refContext) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
 }
 
 /*! ZSTD_getDictID_fromDict() :
@@ -1796,7 +1813,7 @@
 unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
 {
     if (ddict==NULL) return 0;
-    return ZSTD_getDictID_fromDict(ddict->dict, ddict->dictSize);
+    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
 }
 
 /*! ZSTD_getDictID_fromFrame() :
@@ -1827,7 +1844,7 @@
                             const ZSTD_DDict* ddict)
 {
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
-    if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, ddict->dict, ddict->dictSize);
+    if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, ddict->dictContent, ddict->dictSize);
 #endif
     ZSTD_refDCtx(dctx, ddict->refContext);
     ZSTD_checkContinuity(dctx, dst);
@@ -1919,7 +1936,7 @@
     zds->stage = zdss_loadHeader;
     zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
     ZSTD_freeDDict(zds->ddictLocal);
-    if (dict) {
+    if (dict && dictSize >= 8) {
         zds->ddictLocal = ZSTD_createDDict(dict, dictSize);
         if (zds->ddictLocal == NULL) return ERROR(memory_allocation);
     } else zds->ddictLocal = NULL;
@@ -1956,7 +1973,7 @@
     switch(paramType)
     {
         default : return ERROR(parameter_unknown);
-        case ZSTDdsp_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break;
+        case DStream_p_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break;
     }
     return 0;
 }
@@ -2007,7 +2024,7 @@
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
                 {   U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
                     if (legacyVersion) {
-                        const void* const dict = zds->ddict ? zds->ddict->dict : NULL;
+                        const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL;
                         size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
                         CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion,
                                                        dict, dictSize));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/dictBuilder/cover.c	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,1021 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdio.h>  /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h>   /* clock */
+
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+/*-*************************************
+*  Constants
+***************************************/
+#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
+
+/*-*************************************
+*  Console display
+***************************************/
+static int g_displayLevel = 2;
+#define DISPLAY(...)                                                           \
+  {                                                                            \
+    fprintf(stderr, __VA_ARGS__);                                              \
+    fflush(stderr);                                                            \
+  }
+#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
+  if (displayLevel >= l) {                                                     \
+    DISPLAY(__VA_ARGS__);                                                      \
+  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
+#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+
+#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
+  if (displayLevel >= l) {                                                     \
+    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \
+      g_time = clock();                                                        \
+      DISPLAY(__VA_ARGS__);                                                    \
+      if (displayLevel >= 4)                                                   \
+        fflush(stdout);                                                        \
+    }                                                                          \
+  }
+#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
+static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+
+/*-*************************************
+* Hash table
+***************************************
+* A small specialized hash map for storing activeDmers.
+* The map does not resize, so if it becomes full it will loop forever.
+* Thus, the map must be large enough to store every value.
+* The map implements linear probing and keeps its load less than 0.5.
+*/
+
+#define MAP_EMPTY_VALUE ((U32)-1)
+typedef struct COVER_map_pair_t_s {
+  U32 key;
+  U32 value;
+} COVER_map_pair_t;
+
+typedef struct COVER_map_s {
+  COVER_map_pair_t *data;
+  U32 sizeLog;
+  U32 size;
+  U32 sizeMask;
+} COVER_map_t;
+
+/**
+ * Clear the map.
+ */
+static void COVER_map_clear(COVER_map_t *map) {
+  memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
+}
+
+/**
+ * Initializes a map of the given size.
+ * Returns 1 on success and 0 on failure.
+ * The map must be destroyed with COVER_map_destroy().
+ * The map is only guaranteed to be large enough to hold size elements.
+ */
+static int COVER_map_init(COVER_map_t *map, U32 size) {
+  map->sizeLog = ZSTD_highbit32(size) + 2;
+  map->size = (U32)1 << map->sizeLog;
+  map->sizeMask = map->size - 1;
+  map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
+  if (!map->data) {
+    map->sizeLog = 0;
+    map->size = 0;
+    return 0;
+  }
+  COVER_map_clear(map);
+  return 1;
+}
+
+/**
+ * Internal hash function
+ */
+static const U32 prime4bytes = 2654435761U;
+static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
+  return (key * prime4bytes) >> (32 - map->sizeLog);
+}
+
+/**
+ * Helper function that returns the index that a key should be placed into.
+ */
+static U32 COVER_map_index(COVER_map_t *map, U32 key) {
+  const U32 hash = COVER_map_hash(map, key);
+  U32 i;
+  for (i = hash;; i = (i + 1) & map->sizeMask) {
+    COVER_map_pair_t *pos = &map->data[i];
+    if (pos->value == MAP_EMPTY_VALUE) {
+      return i;
+    }
+    if (pos->key == key) {
+      return i;
+    }
+  }
+}
+
+/**
+ * Returns the pointer to the value for key.
+ * If key is not in the map, it is inserted and the value is set to 0.
+ * The map must not be full.
+ */
+static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
+  COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
+  if (pos->value == MAP_EMPTY_VALUE) {
+    pos->key = key;
+    pos->value = 0;
+  }
+  return &pos->value;
+}
+
+/**
+ * Deletes key from the map if present.
+ */
+static void COVER_map_remove(COVER_map_t *map, U32 key) {
+  U32 i = COVER_map_index(map, key);
+  COVER_map_pair_t *del = &map->data[i];
+  U32 shift = 1;
+  if (del->value == MAP_EMPTY_VALUE) {
+    return;
+  }
+  for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
+    COVER_map_pair_t *const pos = &map->data[i];
+    /* If the position is empty we are done */
+    if (pos->value == MAP_EMPTY_VALUE) {
+      del->value = MAP_EMPTY_VALUE;
+      return;
+    }
+    /* If pos can be moved to del do so */
+    if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
+      del->key = pos->key;
+      del->value = pos->value;
+      del = pos;
+      shift = 1;
+    } else {
+      ++shift;
+    }
+  }
+}
+
+/**
+ * Destroyes a map that is inited with COVER_map_init().
+ */
+static void COVER_map_destroy(COVER_map_t *map) {
+  if (map->data) {
+    free(map->data);
+  }
+  map->data = NULL;
+  map->size = 0;
+}
+
+/*-*************************************
+* Context
+***************************************/
+
+typedef struct {
+  const BYTE *samples;
+  size_t *offsets;
+  const size_t *samplesSizes;
+  size_t nbSamples;
+  U32 *suffix;
+  size_t suffixSize;
+  U32 *freqs;
+  U32 *dmerAt;
+  unsigned d;
+} COVER_ctx_t;
+
+/* We need a global context for qsort... */
+static COVER_ctx_t *g_ctx = NULL;
+
+/*-*************************************
+*  Helper functions
+***************************************/
+
+/**
+ * Returns the sum of the sample sizes.
+ */
+static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
+  size_t sum = 0;
+  size_t i;
+  for (i = 0; i < nbSamples; ++i) {
+    sum += samplesSizes[i];
+  }
+  return sum;
+}
+
+/**
+ * Returns -1 if the dmer at lp is less than the dmer at rp.
+ * Return 0 if the dmers at lp and rp are equal.
+ * Returns 1 if the dmer at lp is greater than the dmer at rp.
+ */
+static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
+  const U32 lhs = *(const U32 *)lp;
+  const U32 rhs = *(const U32 *)rp;
+  return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
+}
+
+/**
+ * Same as COVER_cmp() except ties are broken by pointer value
+ * NOTE: g_ctx must be set to call this function.  A global is required because
+ * qsort doesn't take an opaque pointer.
+ */
+static int COVER_strict_cmp(const void *lp, const void *rp) {
+  int result = COVER_cmp(g_ctx, lp, rp);
+  if (result == 0) {
+    result = lp < rp ? -1 : 1;
+  }
+  return result;
+}
+
+/**
+ * Returns the first pointer in [first, last) whose element does not compare
+ * less than value.  If no such element exists it returns last.
+ */
+static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
+                                       size_t value) {
+  size_t count = last - first;
+  while (count != 0) {
+    size_t step = count / 2;
+    const size_t *ptr = first;
+    ptr += step;
+    if (*ptr < value) {
+      first = ++ptr;
+      count -= step + 1;
+    } else {
+      count = step;
+    }
+  }
+  return first;
+}
+
+/**
+ * Generic groupBy function.
+ * Groups an array sorted by cmp into groups with equivalent values.
+ * Calls grp for each group.
+ */
+static void
+COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
+              int (*cmp)(COVER_ctx_t *, const void *, const void *),
+              void (*grp)(COVER_ctx_t *, const void *, const void *)) {
+  const BYTE *ptr = (const BYTE *)data;
+  size_t num = 0;
+  while (num < count) {
+    const BYTE *grpEnd = ptr + size;
+    ++num;
+    while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
+      grpEnd += size;
+      ++num;
+    }
+    grp(ctx, ptr, grpEnd);
+    ptr = grpEnd;
+  }
+}
+
+/*-*************************************
+*  Cover functions
+***************************************/
+
+/**
+ * Called on each group of positions with the same dmer.
+ * Counts the frequency of each dmer and saves it in the suffix array.
+ * Fills `ctx->dmerAt`.
+ */
+static void COVER_group(COVER_ctx_t *ctx, const void *group,
+                        const void *groupEnd) {
+  /* The group consists of all the positions with the same first d bytes. */
+  const U32 *grpPtr = (const U32 *)group;
+  const U32 *grpEnd = (const U32 *)groupEnd;
+  /* The dmerId is how we will reference this dmer.
+   * This allows us to map the whole dmer space to a much smaller space, the
+   * size of the suffix array.
+   */
+  const U32 dmerId = (U32)(grpPtr - ctx->suffix);
+  /* Count the number of samples this dmer shows up in */
+  U32 freq = 0;
+  /* Details */
+  const size_t *curOffsetPtr = ctx->offsets;
+  const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
+  /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
+   * different sample than the last.
+   */
+  size_t curSampleEnd = ctx->offsets[0];
+  for (; grpPtr != grpEnd; ++grpPtr) {
+    /* Save the dmerId for this position so we can get back to it. */
+    ctx->dmerAt[*grpPtr] = dmerId;
+    /* Dictionaries only help for the first reference to the dmer.
+     * After that zstd can reference the match from the previous reference.
+     * So only count each dmer once for each sample it is in.
+     */
+    if (*grpPtr < curSampleEnd) {
+      continue;
+    }
+    freq += 1;
+    /* Binary search to find the end of the sample *grpPtr is in.
+     * In the common case that grpPtr + 1 == grpEnd we can skip the binary
+     * search because the loop is over.
+     */
+    if (grpPtr + 1 != grpEnd) {
+      const size_t *sampleEndPtr =
+          COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
+      curSampleEnd = *sampleEndPtr;
+      curOffsetPtr = sampleEndPtr + 1;
+    }
+  }
+  /* At this point we are never going to look at this segment of the suffix
+   * array again.  We take advantage of this fact to save memory.
+   * We store the frequency of the dmer in the first position of the group,
+   * which is dmerId.
+   */
+  ctx->suffix[dmerId] = freq;
+}
+
+/**
+ * A segment is a range in the source as well as the score of the segment.
+ */
+typedef struct {
+  U32 begin;
+  U32 end;
+  double score;
+} COVER_segment_t;
+
+/**
+ * Selects the best segment in an epoch.
+ * Segments of are scored according to the function:
+ *
+ * Let F(d) be the frequency of dmer d.
+ * Let S_i be the dmer at position i of segment S which has length k.
+ *
+ *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
+ *
+ * Once the dmer d is in the dictionay we set F(d) = 0.
+ */
+static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
+                                           COVER_map_t *activeDmers, U32 begin,
+                                           U32 end, COVER_params_t parameters) {
+  /* Constants */
+  const U32 k = parameters.k;
+  const U32 d = parameters.d;
+  const U32 dmersInK = k - d + 1;
+  /* Try each segment (activeSegment) and save the best (bestSegment) */
+  COVER_segment_t bestSegment = {0, 0, 0};
+  COVER_segment_t activeSegment;
+  /* Reset the activeDmers in the segment */
+  COVER_map_clear(activeDmers);
+  /* The activeSegment starts at the beginning of the epoch. */
+  activeSegment.begin = begin;
+  activeSegment.end = begin;
+  activeSegment.score = 0;
+  /* Slide the activeSegment through the whole epoch.
+   * Save the best segment in bestSegment.
+   */
+  while (activeSegment.end < end) {
+    /* The dmerId for the dmer at the next position */
+    U32 newDmer = ctx->dmerAt[activeSegment.end];
+    /* The entry in activeDmers for this dmerId */
+    U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
+    /* If the dmer isn't already present in the segment add its score. */
+    if (*newDmerOcc == 0) {
+      /* The paper suggest using the L-0.5 norm, but experiments show that it
+       * doesn't help.
+       */
+      activeSegment.score += freqs[newDmer];
+    }
+    /* Add the dmer to the segment */
+    activeSegment.end += 1;
+    *newDmerOcc += 1;
+
+    /* If the window is now too large, drop the first position */
+    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
+      U32 delDmer = ctx->dmerAt[activeSegment.begin];
+      U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
+      activeSegment.begin += 1;
+      *delDmerOcc -= 1;
+      /* If this is the last occurence of the dmer, subtract its score */
+      if (*delDmerOcc == 0) {
+        COVER_map_remove(activeDmers, delDmer);
+        activeSegment.score -= freqs[delDmer];
+      }
+    }
+
+    /* If this segment is the best so far save it */
+    if (activeSegment.score > bestSegment.score) {
+      bestSegment = activeSegment;
+    }
+  }
+  {
+    /* Trim off the zero frequency head and tail from the segment. */
+    U32 newBegin = bestSegment.end;
+    U32 newEnd = bestSegment.begin;
+    U32 pos;
+    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+      U32 freq = freqs[ctx->dmerAt[pos]];
+      if (freq != 0) {
+        newBegin = MIN(newBegin, pos);
+        newEnd = pos + 1;
+      }
+    }
+    bestSegment.begin = newBegin;
+    bestSegment.end = newEnd;
+  }
+  {
+    /* Zero out the frequency of each dmer covered by the chosen segment. */
+    U32 pos;
+    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+      freqs[ctx->dmerAt[pos]] = 0;
+    }
+  }
+  return bestSegment;
+}
+
+/**
+ * Check the validity of the parameters.
+ * Returns non-zero if the parameters are valid and 0 otherwise.
+ */
+static int COVER_checkParameters(COVER_params_t parameters) {
+  /* k and d are required parameters */
+  if (parameters.d == 0 || parameters.k == 0) {
+    return 0;
+  }
+  /* d <= k */
+  if (parameters.d > parameters.k) {
+    return 0;
+  }
+  return 1;
+}
+
+/**
+ * Clean up a context initialized with `COVER_ctx_init()`.
+ */
+static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
+  if (!ctx) {
+    return;
+  }
+  if (ctx->suffix) {
+    free(ctx->suffix);
+    ctx->suffix = NULL;
+  }
+  if (ctx->freqs) {
+    free(ctx->freqs);
+    ctx->freqs = NULL;
+  }
+  if (ctx->dmerAt) {
+    free(ctx->dmerAt);
+    ctx->dmerAt = NULL;
+  }
+  if (ctx->offsets) {
+    free(ctx->offsets);
+    ctx->offsets = NULL;
+  }
+}
+
+/**
+ * Prepare a context for dictionary building.
+ * The context is only dependent on the parameter `d` and can used multiple
+ * times.
+ * Returns 1 on success or zero on error.
+ * The context must be destroyed with `COVER_ctx_destroy()`.
+ */
+static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
+                          const size_t *samplesSizes, unsigned nbSamples,
+                          unsigned d) {
+  const BYTE *const samples = (const BYTE *)samplesBuffer;
+  const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+  /* Checks */
+  if (totalSamplesSize < d ||
+      totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
+    DISPLAYLEVEL(1, "Total samples size is too large, maximum size is %u MB\n",
+                 (COVER_MAX_SAMPLES_SIZE >> 20));
+    return 0;
+  }
+  /* Zero the context */
+  memset(ctx, 0, sizeof(*ctx));
+  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples,
+               (U32)totalSamplesSize);
+  ctx->samples = samples;
+  ctx->samplesSizes = samplesSizes;
+  ctx->nbSamples = nbSamples;
+  /* Partial suffix array */
+  ctx->suffixSize = totalSamplesSize - d + 1;
+  ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  /* Maps index to the dmerID */
+  ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  /* The offsets of each file */
+  ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
+  if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
+    DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
+    COVER_ctx_destroy(ctx);
+    return 0;
+  }
+  ctx->freqs = NULL;
+  ctx->d = d;
+
+  /* Fill offsets from the samlesSizes */
+  {
+    U32 i;
+    ctx->offsets[0] = 0;
+    for (i = 1; i <= nbSamples; ++i) {
+      ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
+    }
+  }
+  DISPLAYLEVEL(2, "Constructing partial suffix array\n");
+  {
+    /* suffix is a partial suffix array.
+     * It only sorts suffixes by their first parameters.d bytes.
+     * The sort is stable, so each dmer group is sorted by position in input.
+     */
+    U32 i;
+    for (i = 0; i < ctx->suffixSize; ++i) {
+      ctx->suffix[i] = i;
+    }
+    /* qsort doesn't take an opaque pointer, so pass as a global */
+    g_ctx = ctx;
+    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), &COVER_strict_cmp);
+  }
+  DISPLAYLEVEL(2, "Computing frequencies\n");
+  /* For each dmer group (group of positions with the same first d bytes):
+   * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is
+   *    (groupBeginPtr - suffix).  This allows us to go from position to
+   *    dmerID so we can look up values in freq.
+   * 2. We calculate how many samples the dmer occurs in and save it in
+   *    freqs[dmerId].
+   */
+  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, &COVER_cmp,
+                &COVER_group);
+  ctx->freqs = ctx->suffix;
+  ctx->suffix = NULL;
+  return 1;
+}
+
+/**
+ * Given the prepared context build the dictionary.
+ */
+static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
+                                    COVER_map_t *activeDmers, void *dictBuffer,
+                                    size_t dictBufferCapacity,
+                                    COVER_params_t parameters) {
+  BYTE *const dict = (BYTE *)dictBuffer;
+  size_t tail = dictBufferCapacity;
+  /* Divide the data up into epochs of equal size.
+   * We will select at least one segment from each epoch.
+   */
+  const U32 epochs = (U32)(dictBufferCapacity / parameters.k);
+  const U32 epochSize = (U32)(ctx->suffixSize / epochs);
+  size_t epoch;
+  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
+               epochSize);
+  /* Loop through the epochs until there are no more segments or the dictionary
+   * is full.
+   */
+  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) {
+    const U32 epochBegin = (U32)(epoch * epochSize);
+    const U32 epochEnd = epochBegin + epochSize;
+    size_t segmentSize;
+    /* Select a segment */
+    COVER_segment_t segment = COVER_selectSegment(
+        ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
+    /* Trim the segment if necessary and if it is empty then we are done */
+    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
+    if (segmentSize == 0) {
+      break;
+    }
+    /* We fill the dictionary from the back to allow the best segments to be
+     * referenced with the smallest offsets.
+     */
+    tail -= segmentSize;
+    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
+    DISPLAYUPDATE(
+        2, "\r%u%%       ",
+        (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+  }
+  DISPLAYLEVEL(2, "\r%79s\r", "");
+  return tail;
+}
+
+/**
+ * Translate from COVER_params_t to ZDICT_params_t required for finalizing the
+ * dictionary.
+ */
+static ZDICT_params_t COVER_translateParams(COVER_params_t parameters) {
+  ZDICT_params_t zdictParams;
+  memset(&zdictParams, 0, sizeof(zdictParams));
+  zdictParams.notificationLevel = 1;
+  zdictParams.dictID = parameters.dictID;
+  zdictParams.compressionLevel = parameters.compressionLevel;
+  return zdictParams;
+}
+
+/**
+ * Constructs a dictionary using a heuristic based on the following paper:
+ *
+ * Liao, Petri, Moffat, Wirth
+ * Effective Construction of Relative Lempel-Ziv Dictionaries
+ * Published in WWW 2016.
+ */
+ZDICTLIB_API size_t COVER_trainFromBuffer(
+    void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
+    const size_t *samplesSizes, unsigned nbSamples, COVER_params_t parameters) {
+  BYTE *const dict = (BYTE *)dictBuffer;
+  COVER_ctx_t ctx;
+  COVER_map_t activeDmers;
+  /* Checks */
+  if (!COVER_checkParameters(parameters)) {
+    DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+    return ERROR(GENERIC);
+  }
+  if (nbSamples == 0) {
+    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+    return ERROR(GENERIC);
+  }
+  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+                 ZDICT_DICTSIZE_MIN);
+    return ERROR(dstSize_tooSmall);
+  }
+  /* Initialize global data */
+  g_displayLevel = parameters.notificationLevel;
+  /* Initialize context and activeDmers */
+  if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+                      parameters.d)) {
+    return ERROR(GENERIC);
+  }
+  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+    COVER_ctx_destroy(&ctx);
+    return ERROR(GENERIC);
+  }
+
+  DISPLAYLEVEL(2, "Building dictionary\n");
+  {
+    const size_t tail =
+        COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
+                              dictBufferCapacity, parameters);
+    ZDICT_params_t zdictParams = COVER_translateParams(parameters);
+    const size_t dictionarySize = ZDICT_finalizeDictionary(
+        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+        samplesBuffer, samplesSizes, nbSamples, zdictParams);
+    if (!ZSTD_isError(dictionarySize)) {
+      DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
+                   (U32)dictionarySize);
+    }
+    COVER_ctx_destroy(&ctx);
+    COVER_map_destroy(&activeDmers);
+    return dictionarySize;
+  }
+}
+
+/**
+ * COVER_best_t is used for two purposes:
+ * 1. Synchronizing threads.
+ * 2. Saving the best parameters and dictionary.
+ *
+ * All of the methods except COVER_best_init() are thread safe if zstd is
+ * compiled with multithreaded support.
+ */
+typedef struct COVER_best_s {
+  pthread_mutex_t mutex;
+  pthread_cond_t cond;
+  size_t liveJobs;
+  void *dict;
+  size_t dictSize;
+  COVER_params_t parameters;
+  size_t compressedSize;
+} COVER_best_t;
+
+/**
+ * Initialize the `COVER_best_t`.
+ */
+static void COVER_best_init(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  pthread_mutex_init(&best->mutex, NULL);
+  pthread_cond_init(&best->cond, NULL);
+  best->liveJobs = 0;
+  best->dict = NULL;
+  best->dictSize = 0;
+  best->compressedSize = (size_t)-1;
+  memset(&best->parameters, 0, sizeof(best->parameters));
+}
+
+/**
+ * Wait until liveJobs == 0.
+ */
+static void COVER_best_wait(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  pthread_mutex_lock(&best->mutex);
+  while (best->liveJobs != 0) {
+    pthread_cond_wait(&best->cond, &best->mutex);
+  }
+  pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Call COVER_best_wait() and then destroy the COVER_best_t.
+ */
+static void COVER_best_destroy(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  COVER_best_wait(best);
+  if (best->dict) {
+    free(best->dict);
+  }
+  pthread_mutex_destroy(&best->mutex);
+  pthread_cond_destroy(&best->cond);
+}
+
+/**
+ * Called when a thread is about to be launched.
+ * Increments liveJobs.
+ */
+static void COVER_best_start(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  pthread_mutex_lock(&best->mutex);
+  ++best->liveJobs;
+  pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Called when a thread finishes executing, both on error or success.
+ * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
+ * If this dictionary is the best so far save it and its parameters.
+ */
+static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
+                              COVER_params_t parameters, void *dict,
+                              size_t dictSize) {
+  if (!best) {
+    return;
+  }
+  {
+    size_t liveJobs;
+    pthread_mutex_lock(&best->mutex);
+    --best->liveJobs;
+    liveJobs = best->liveJobs;
+    /* If the new dictionary is better */
+    if (compressedSize < best->compressedSize) {
+      /* Allocate space if necessary */
+      if (!best->dict || best->dictSize < dictSize) {
+        if (best->dict) {
+          free(best->dict);
+        }
+        best->dict = malloc(dictSize);
+        if (!best->dict) {
+          best->compressedSize = ERROR(GENERIC);
+          best->dictSize = 0;
+          return;
+        }
+      }
+      /* Save the dictionary, parameters, and size */
+      memcpy(best->dict, dict, dictSize);
+      best->dictSize = dictSize;
+      best->parameters = parameters;
+      best->compressedSize = compressedSize;
+    }
+    pthread_mutex_unlock(&best->mutex);
+    if (liveJobs == 0) {
+      pthread_cond_broadcast(&best->cond);
+    }
+  }
+}
+
+/**
+ * Parameters for COVER_tryParameters().
+ */
+typedef struct COVER_tryParameters_data_s {
+  const COVER_ctx_t *ctx;
+  COVER_best_t *best;
+  size_t dictBufferCapacity;
+  COVER_params_t parameters;
+} COVER_tryParameters_data_t;
+
+/**
+ * Tries a set of parameters and upates the COVER_best_t with the results.
+ * This function is thread safe if zstd is compiled with multithreaded support.
+ * It takes its parameters as an *OWNING* opaque pointer to support threading.
+ */
+static void COVER_tryParameters(void *opaque) {
+  /* Save parameters as local variables */
+  COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;
+  const COVER_ctx_t *const ctx = data->ctx;
+  const COVER_params_t parameters = data->parameters;
+  size_t dictBufferCapacity = data->dictBufferCapacity;
+  size_t totalCompressedSize = ERROR(GENERIC);
+  /* Allocate space for hash table, dict, and freqs */
+  COVER_map_t activeDmers;
+  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+  U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+    goto _cleanup;
+  }
+  if (!dict || !freqs) {
+    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
+    goto _cleanup;
+  }
+  /* Copy the frequencies because we need to modify them */
+  memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
+  /* Build the dictionary */
+  {
+    const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
+                                              dictBufferCapacity, parameters);
+    const ZDICT_params_t zdictParams = COVER_translateParams(parameters);
+    dictBufferCapacity = ZDICT_finalizeDictionary(
+        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, zdictParams);
+    if (ZDICT_isError(dictBufferCapacity)) {
+      DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+      goto _cleanup;
+    }
+  }
+  /* Check total compressed size */
+  {
+    /* Pointers */
+    ZSTD_CCtx *cctx;
+    ZSTD_CDict *cdict;
+    void *dst;
+    /* Local variables */
+    size_t dstCapacity;
+    size_t i;
+    /* Allocate dst with enough space to compress the maximum sized sample */
+    {
+      size_t maxSampleSize = 0;
+      for (i = 0; i < ctx->nbSamples; ++i) {
+        maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize);
+      }
+      dstCapacity = ZSTD_compressBound(maxSampleSize);
+      dst = malloc(dstCapacity);
+    }
+    /* Create the cctx and cdict */
+    cctx = ZSTD_createCCtx();
+    cdict =
+        ZSTD_createCDict(dict, dictBufferCapacity, parameters.compressionLevel);
+    if (!dst || !cctx || !cdict) {
+      goto _compressCleanup;
+    }
+    /* Compress each sample and sum their sizes (or error) */
+    totalCompressedSize = 0;
+    for (i = 0; i < ctx->nbSamples; ++i) {
+      const size_t size = ZSTD_compress_usingCDict(
+          cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i],
+          ctx->samplesSizes[i], cdict);
+      if (ZSTD_isError(size)) {
+        totalCompressedSize = ERROR(GENERIC);
+        goto _compressCleanup;
+      }
+      totalCompressedSize += size;
+    }
+  _compressCleanup:
+    ZSTD_freeCCtx(cctx);
+    ZSTD_freeCDict(cdict);
+    if (dst) {
+      free(dst);
+    }
+  }
+
+_cleanup:
+  COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
+                    dictBufferCapacity);
+  free(data);
+  COVER_map_destroy(&activeDmers);
+  if (dict) {
+    free(dict);
+  }
+  if (freqs) {
+    free(freqs);
+  }
+}
+
+ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void *dictBuffer,
+                                                  size_t dictBufferCapacity,
+                                                  const void *samplesBuffer,
+                                                  const size_t *samplesSizes,
+                                                  unsigned nbSamples,
+                                                  COVER_params_t *parameters) {
+  /* constants */
+  const unsigned nbThreads = parameters->nbThreads;
+  const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
+  const unsigned kMaxD = parameters->d == 0 ? 16 : parameters->d;
+  const unsigned kMinK = parameters->k == 0 ? kMaxD : parameters->k;
+  const unsigned kMaxK = parameters->k == 0 ? 2048 : parameters->k;
+  const unsigned kSteps = parameters->steps == 0 ? 32 : parameters->steps;
+  const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
+  const unsigned kIterations =
+      (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+  /* Local variables */
+  const int displayLevel = parameters->notificationLevel;
+  unsigned iteration = 1;
+  unsigned d;
+  unsigned k;
+  COVER_best_t best;
+  POOL_ctx *pool = NULL;
+  /* Checks */
+  if (kMinK < kMaxD || kMaxK < kMinK) {
+    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
+    return ERROR(GENERIC);
+  }
+  if (nbSamples == 0) {
+    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+    return ERROR(GENERIC);
+  }
+  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+                 ZDICT_DICTSIZE_MIN);
+    return ERROR(dstSize_tooSmall);
+  }
+  if (nbThreads > 1) {
+    pool = POOL_create(nbThreads, 1);
+    if (!pool) {
+      return ERROR(memory_allocation);
+    }
+  }
+  /* Initialization */
+  COVER_best_init(&best);
+  /* Turn down global display level to clean up display at level 2 and below */
+  g_displayLevel = parameters->notificationLevel - 1;
+  /* Loop through d first because each new value needs a new context */
+  LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
+                    kIterations);
+  for (d = kMinD; d <= kMaxD; d += 2) {
+    /* Initialize the context for this value of d */
+    COVER_ctx_t ctx;
+    LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
+    if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+      COVER_best_destroy(&best);
+      return ERROR(GENERIC);
+    }
+    /* Loop through k reusing the same context */
+    for (k = kMinK; k <= kMaxK; k += kStepSize) {
+      /* Prepare the arguments */
+      COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
+          sizeof(COVER_tryParameters_data_t));
+      LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
+      if (!data) {
+        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
+        COVER_best_destroy(&best);
+        COVER_ctx_destroy(&ctx);
+        return ERROR(GENERIC);
+      }
+      data->ctx = &ctx;
+      data->best = &best;
+      data->dictBufferCapacity = dictBufferCapacity;
+      data->parameters = *parameters;
+      data->parameters.k = k;
+      data->parameters.d = d;
+      data->parameters.steps = kSteps;
+      /* Check the parameters */
+      if (!COVER_checkParameters(data->parameters)) {
+        DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+        continue;
+      }
+      /* Call the function and pass ownership of data to it */
+      COVER_best_start(&best);
+      if (pool) {
+        POOL_add(pool, &COVER_tryParameters, data);
+      } else {
+        COVER_tryParameters(data);
+      }
+      /* Print status */
+      LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
+                         (U32)((iteration * 100) / kIterations));
+      ++iteration;
+    }
+    COVER_best_wait(&best);
+    COVER_ctx_destroy(&ctx);
+  }
+  LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
+  /* Fill the output buffer and parameters with output of the best parameters */
+  {
+    const size_t dictSize = best.dictSize;
+    if (ZSTD_isError(best.compressedSize)) {
+      COVER_best_destroy(&best);
+      return best.compressedSize;
+    }
+    *parameters = best.parameters;
+    memcpy(dictBuffer, best.dict, dictSize);
+    COVER_best_destroy(&best);
+    POOL_free(pool);
+    return dictSize;
+  }
+}
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Tue Feb 28 11:13:25 2017 -0800
@@ -36,12 +36,11 @@
 #include <time.h>          /* clock */
 
 #include "mem.h"           /* read */
-#include "error_private.h"
 #include "fse.h"           /* FSE_normalizeCount, FSE_writeNCount */
 #define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
+#include "huf.h"           /* HUF_buildCTable, HUF_writeCTable */
 #include "zstd_internal.h" /* includes zstd.h */
-#include "xxhash.h"
+#include "xxhash.h"        /* XXH64 */
 #include "divsufsort.h"
 #ifndef ZDICT_STATIC_LINKING_ONLY
 #  define ZDICT_STATIC_LINKING_ONLY
@@ -61,7 +60,7 @@
 #define NOISELENGTH 32
 
 #define MINRATIO 4
-static const int g_compressionLevel_default = 5;
+static const int g_compressionLevel_default = 6;
 static const U32 g_selectivity_default = 9;
 static const size_t g_provision_entropySize = 200;
 static const size_t g_min_fast_dictContent = 192;
@@ -307,13 +306,13 @@
         } while (length >=MINMATCHLENGTH);
 
         /* look backward */
-		length = MINMATCHLENGTH;
-		while ((length >= MINMATCHLENGTH) & (start > 0)) {
-			length = ZDICT_count(b + pos, b + suffix[start - 1]);
-			if (length >= LLIMIT) length = LLIMIT - 1;
-			lengthList[length]++;
-			if (length >= MINMATCHLENGTH) start--;
-		}
+        length = MINMATCHLENGTH;
+        while ((length >= MINMATCHLENGTH) & (start > 0)) {
+        	length = ZDICT_count(b + pos, b + suffix[start - 1]);
+        	if (length >= LLIMIT) length = LLIMIT - 1;
+        	lengthList[length]++;
+        	if (length >= MINMATCHLENGTH) start--;
+        }
 
         /* largest useful length */
         memset(cumulLength, 0, sizeof(cumulLength));
@@ -570,7 +569,7 @@
             if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; }
     }
     cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_ABSOLUTEMAX, src, srcSize);
-    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(1, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
+    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
 
     if (cSize) {  /* if == 0; block is not compressible */
         const seqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc);
@@ -825,6 +824,55 @@
 }
 
 
+
+size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+                          const void* customDictContent, size_t dictContentSize,
+                          const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                          ZDICT_params_t params)
+{
+    size_t hSize;
+#define HBUFFSIZE 256
+    BYTE header[HBUFFSIZE];
+    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    U32 const notificationLevel = params.notificationLevel;
+
+    /* check conditions */
+    if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
+    if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);
+    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
+
+    /* dictionary header */
+    MEM_writeLE32(header, ZSTD_DICT_MAGIC);
+    {   U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
+        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
+        U32 const dictID = params.dictID ? params.dictID : compliantID;
+        MEM_writeLE32(header+4, dictID);
+    }
+    hSize = 8;
+
+    /* entropy tables */
+    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
+    DISPLAYLEVEL(2, "statistics ... \n");
+    {   size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
+                                  compressionLevel,
+                                  samplesBuffer, samplesSizes, nbSamples,
+                                  customDictContent, dictContentSize,
+                                  notificationLevel);
+        if (ZDICT_isError(eSize)) return eSize;
+        hSize += eSize;
+    }
+
+    /* copy elements in final buffer ; note : src and dst buffer can overlap */
+    if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;
+    {   size_t const dictSize = hSize + dictContentSize;
+        char* dictEnd = (char*)dictBuffer + dictSize;
+        memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);
+        memcpy(dictBuffer, header, hSize);
+        return dictSize;
+    }
+}
+
+
 size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
                                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
                                                  ZDICT_params_t params)
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Tue Feb 28 11:13:25 2017 -0800
@@ -19,15 +19,18 @@
 #include <stddef.h>  /* size_t */
 
 
-/*======  Export for Windows  ======*/
-/*!
-*  ZSTD_DLL_EXPORT :
-*  Enable exporting of functions when building a Windows DLL
-*/
-#if defined(_WIN32) && defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-#  define ZDICTLIB_API __declspec(dllexport)
+/* =====   ZDICTLIB_API : control library symbols visibility   ===== */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#  define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
 #else
-#  define ZDICTLIB_API
+#  define ZDICTLIB_VISIBILITY
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+#  define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define ZDICTLIB_API ZDICTLIB_VISIBILITY
 #endif
 
 
@@ -79,27 +82,114 @@
               or an error code, which can be tested by ZDICT_isError().
     note : ZDICT_trainFromBuffer_advanced() will send notifications into stderr if instructed to, using notificationLevel>0.
 */
-size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity,
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity,
+                                const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                                ZDICT_params_t parameters);
+
+/*! COVER_params_t :
+    For all values 0 means default.
+    kMin and d are the only required parameters.
+*/
+typedef struct {
+    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
+    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
+    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */
+
+    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+    unsigned notificationLevel;  /* Write to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
+    unsigned dictID;             /* 0 means auto mode (32-bits random value); other : force dictID value */
+    int      compressionLevel;   /* 0 means default; target a specific zstd compression level */
+} COVER_params_t;
+
+
+/*! COVER_trainFromBuffer() :
+    Train a dictionary from an array of samples using the COVER algorithm.
+    Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+    supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+    The resulting dictionary will be saved into `dictBuffer`.
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+              or an error code, which can be tested with ZDICT_isError().
+    Note : COVER_trainFromBuffer() requires about 9 bytes of memory for each input byte.
+    Tips : In general, a reasonable dictionary has a size of ~ 100 KB.
+           It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
+           In general, it's recommended to provide a few thousands samples, but this can vary a lot.
+           It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+*/
+ZDICTLIB_API size_t COVER_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+                              const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                              COVER_params_t parameters);
+
+/*! COVER_optimizeTrainFromBuffer() :
+    The same requirements as above hold for all the parameters except `parameters`.
+    This function tries many parameter combinations and picks the best parameters.
+    `*parameters` is filled with the best parameters found, and the dictionary
+    constructed with those parameters is stored in `dictBuffer`.
+
+    All of the parameters d, k, steps are optional.
+    If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
+    if steps is zero it defaults to its default value.
+    If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
+
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+              or an error code, which can be tested with ZDICT_isError().
+              On success `*parameters` contains the parameters selected.
+    Note : COVER_optimizeTrainFromBuffer() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
+*/
+ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+                                     const void* samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+                                     COVER_params_t *parameters);
+
+/*! ZDICT_finalizeDictionary() :
+
+    Given a custom content as a basis for dictionary, and a set of samples,
+    finalize dictionary by adding headers and statistics.
+
+    Samples must be stored concatenated in a flat buffer `samplesBuffer`,
+    supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
+
+    dictContentSize must be > ZDICT_CONTENTSIZE_MIN bytes.
+    maxDictSize must be >= dictContentSize, and must be > ZDICT_DICTSIZE_MIN bytes.
+
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
+              or an error code, which can be tested by ZDICT_isError().
+    note : ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
+    note 2 : dictBuffer and customDictContent can overlap
+*/
+#define ZDICT_CONTENTSIZE_MIN 256
+#define ZDICT_DICTSIZE_MIN    512
+ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+                                const void* customDictContent, size_t dictContentSize,
                                 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
                                 ZDICT_params_t parameters);
 
 
-/*! ZDICT_addEntropyTablesFromBuffer() :
-
-    Given a content-only dictionary (built using any 3rd party algorithm),
-    add entropy tables computed from an array of samples.
-    Samples must be stored concatenated in a flat buffer `samplesBuffer`,
-    supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
 
-    The input dictionary content must be stored *at the end* of `dictBuffer`.
-    Its size is `dictContentSize`.
-    The resulting dictionary with added entropy tables will be *written back to `dictBuffer`*,
-    starting from its beginning.
-    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`).
-*/
+/* Deprecation warnings */
+/* It is generally possible to disable deprecation warnings from compiler,
+   for example with -Wno-deprecated-declarations for gcc
+   or _CRT_SECURE_NO_WARNINGS in Visual.
+   Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
+#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
+#  define ZDICT_DEPRECATED(message) ZDICTLIB_API   /* disable deprecation warnings */
+#else
+#  define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API [[deprecated(message)]]
+#  elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
+#  elif (ZDICT_GCC_VERSION >= 301)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
+#  elif defined(_MSC_VER)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
+#  else
+#    pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API
+#  endif
+#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
+
+ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
 size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
-                                        const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
-
+                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
 
 
 #endif   /* ZDICT_STATIC_LINKING_ONLY */
--- a/contrib/python-zstandard/zstd/zstd.h	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd/zstd.h	Tue Feb 28 11:13:25 2017 -0800
@@ -20,13 +20,16 @@
 
 /* =====   ZSTDLIB_API : control library symbols visibility   ===== */
 #if defined(__GNUC__) && (__GNUC__ >= 4)
-#  define ZSTDLIB_API __attribute__ ((visibility ("default")))
-#elif defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-#  define ZSTDLIB_API __declspec(dllexport)
+#  define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
+#else
+#  define ZSTDLIB_VISIBILITY
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY
 #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
-#  define ZSTDLIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#  define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
 #else
-#  define ZSTDLIB_API
+#  define ZSTDLIB_API ZSTDLIB_VISIBILITY
 #endif
 
 
@@ -53,7 +56,7 @@
 /*------   Version   ------*/
 #define ZSTD_VERSION_MAJOR    1
 #define ZSTD_VERSION_MINOR    1
-#define ZSTD_VERSION_RELEASE  2
+#define ZSTD_VERSION_RELEASE  3
 
 #define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
 #define ZSTD_QUOTE(str) #str
@@ -170,8 +173,8 @@
 *   When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
 *   ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
 *   ZSTD_CDict can be created once and used by multiple threads concurrently, as its usage is read-only.
-*   `dict` can be released after ZSTD_CDict creation. */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel);
+*   `dictBuffer` can be released after ZSTD_CDict creation, as its content is copied within CDict */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel);
 
 /*! ZSTD_freeCDict() :
 *   Function frees memory allocated by ZSTD_createCDict(). */
@@ -191,8 +194,8 @@
 
 /*! ZSTD_createDDict() :
 *   Create a digested dictionary, ready to start decompression operation without startup delay.
-*   `dict` can be released after creation. */
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize);
+*   dictBuffer can be released after DDict creation, as its content is copied inside DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
 
 /*! ZSTD_freeDDict() :
 *   Function frees memory allocated with ZSTD_createDDict() */
@@ -325,7 +328,7 @@
  * ***************************************************************************************/
 
 /* --- Constants ---*/
-#define ZSTD_MAGICNUMBER            0xFD2FB528   /* v0.8 */
+#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
 #define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
 
 #define ZSTD_WINDOWLOG_MAX_32  25
@@ -345,8 +348,9 @@
 #define ZSTD_TARGETLENGTH_MAX 999
 
 #define ZSTD_FRAMEHEADERSIZE_MAX 18    /* for static allocation */
+#define ZSTD_FRAMEHEADERSIZE_MIN  6
 static const size_t ZSTD_frameHeaderSize_prefix = 5;
-static const size_t ZSTD_frameHeaderSize_min = 6;
+static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
 static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
 static const size_t ZSTD_skippableHeaderSize = 8;  /* magic number + skippable frame length */
 
@@ -365,9 +369,9 @@
 } ZSTD_compressionParameters;
 
 typedef struct {
-    unsigned contentSizeFlag; /**< 1: content size will be in frame header (if known). */
-    unsigned checksumFlag;    /**< 1: will generate a 22-bits checksum at end of frame, to be used for error detection by decompressor */
-    unsigned noDictIDFlag;    /**< 1: no dict ID will be saved into frame header (if dictionary compression) */
+    unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */
+    unsigned checksumFlag;    /**< 1: generate a 32-bits checksum at end of frame, for error detection */
+    unsigned noDictIDFlag;    /**< 1: no dictID will be saved into frame header (if dictionary compression) */
 } ZSTD_frameParameters;
 
 typedef struct {
@@ -397,9 +401,23 @@
  *  Gives the amount of memory used by a given ZSTD_CCtx */
 ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
 
+typedef enum {
+    ZSTD_p_forceWindow   /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0)*/
+} ZSTD_CCtxParameter;
+/*! ZSTD_setCCtxParameter() :
+ *  Set advanced parameters, selected through enum ZSTD_CCtxParameter
+ *  @result : 0, or an error code (which can be tested with ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value);
+
+/*! ZSTD_createCDict_byReference() :
+ *  Create a digested dictionary for compression
+ *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
+ *  It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
 /*! ZSTD_createCDict_advanced() :
  *  Create a ZSTD_CDict using external alloc and free, and customized compression parameters */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference,
                                                   ZSTD_parameters params, ZSTD_customMem customMem);
 
 /*! ZSTD_sizeof_CDict() :
@@ -455,6 +473,15 @@
  *  Gives the amount of memory used by a given ZSTD_DCtx */
 ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
 
+/*! ZSTD_createDDict_byReference() :
+ *  Create a digested dictionary, ready to start decompression operation without startup delay.
+ *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
+ *  It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+                                                  unsigned byReference, ZSTD_customMem customMem);
+
 /*! ZSTD_sizeof_DDict() :
  *  Gives the amount of memory used by a given ZSTD_DDict */
 ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
@@ -463,13 +490,13 @@
  *  Provides the dictID stored within dictionary.
  *  if @return == 0, the dictionary is not conformant with Zstandard specification.
  *  It can still be loaded, but as a content-only dictionary. */
-unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
 
 /*! ZSTD_getDictID_fromDDict() :
  *  Provides the dictID of the dictionary loaded into `ddict`.
  *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
  *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
 
 /*! ZSTD_getDictID_fromFrame() :
  *  Provides the dictID required to decompressed the frame stored within `src`.
@@ -481,7 +508,7 @@
  *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
  *  - This is not a Zstandard frame.
  *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
-unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
 
 
 /********************************************************************
@@ -491,7 +518,7 @@
 /*=====   Advanced Streaming compression functions  =====*/
 ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
 ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct */
-ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
 ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
                                              ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be zero == unknown */
 ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
@@ -500,9 +527,9 @@
 
 
 /*=====   Advanced Streaming decompression functions  =====*/
-typedef enum { ZSTDdsp_maxWindowSize } ZSTD_DStreamParameter_e;
+typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
 ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
 ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
 ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict will just be referenced, and must outlive decompression session */
 ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
@@ -542,10 +569,10 @@
     In which case, it will "discard" the relevant memory section from its history.
 
   Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
-  It's possible to use a NULL,0 src content, in which case, it will write a final empty block to end the frame,
-  Without last block mark, frames will be considered unfinished (broken) by decoders.
+  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+  Without last block mark, frames will be considered unfinished (corrupted) by decoders.
 
-  You can then reuse `ZSTD_CCtx` (ZSTD_compressBegin()) to compress some new frame.
+  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new frame.
 */
 
 /*=====   Buffer-less streaming compression functions  =====*/
@@ -553,6 +580,7 @@
 ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
 ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize);
 ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize);
 ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
 ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
 
--- a/contrib/python-zstandard/zstd_cffi.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/python-zstandard/zstd_cffi.py	Tue Feb 28 11:13:25 2017 -0800
@@ -8,145 +8,1035 @@
 
 from __future__ import absolute_import, unicode_literals
 
-import io
+import sys
 
 from _zstd_cffi import (
     ffi,
     lib,
 )
 
+if sys.version_info[0] == 2:
+    bytes_type = str
+    int_type = long
+else:
+    bytes_type = bytes
+    int_type = int
 
-_CSTREAM_IN_SIZE = lib.ZSTD_CStreamInSize()
-_CSTREAM_OUT_SIZE = lib.ZSTD_CStreamOutSize()
+
+COMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_CStreamInSize()
+COMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_CStreamOutSize()
+DECOMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_DStreamInSize()
+DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_DStreamOutSize()
+
+new_nonzero = ffi.new_allocator(should_clear_after_alloc=False)
+
+
+MAX_COMPRESSION_LEVEL = lib.ZSTD_maxCLevel()
+MAGIC_NUMBER = lib.ZSTD_MAGICNUMBER
+FRAME_HEADER = b'\x28\xb5\x2f\xfd'
+ZSTD_VERSION = (lib.ZSTD_VERSION_MAJOR, lib.ZSTD_VERSION_MINOR, lib.ZSTD_VERSION_RELEASE)
+
+WINDOWLOG_MIN = lib.ZSTD_WINDOWLOG_MIN
+WINDOWLOG_MAX = lib.ZSTD_WINDOWLOG_MAX
+CHAINLOG_MIN = lib.ZSTD_CHAINLOG_MIN
+CHAINLOG_MAX = lib.ZSTD_CHAINLOG_MAX
+HASHLOG_MIN = lib.ZSTD_HASHLOG_MIN
+HASHLOG_MAX = lib.ZSTD_HASHLOG_MAX
+HASHLOG3_MAX = lib.ZSTD_HASHLOG3_MAX
+SEARCHLOG_MIN = lib.ZSTD_SEARCHLOG_MIN
+SEARCHLOG_MAX = lib.ZSTD_SEARCHLOG_MAX
+SEARCHLENGTH_MIN = lib.ZSTD_SEARCHLENGTH_MIN
+SEARCHLENGTH_MAX = lib.ZSTD_SEARCHLENGTH_MAX
+TARGETLENGTH_MIN = lib.ZSTD_TARGETLENGTH_MIN
+TARGETLENGTH_MAX = lib.ZSTD_TARGETLENGTH_MAX
+
+STRATEGY_FAST = lib.ZSTD_fast
+STRATEGY_DFAST = lib.ZSTD_dfast
+STRATEGY_GREEDY = lib.ZSTD_greedy
+STRATEGY_LAZY = lib.ZSTD_lazy
+STRATEGY_LAZY2 = lib.ZSTD_lazy2
+STRATEGY_BTLAZY2 = lib.ZSTD_btlazy2
+STRATEGY_BTOPT = lib.ZSTD_btopt
+
+COMPRESSOBJ_FLUSH_FINISH = 0
+COMPRESSOBJ_FLUSH_BLOCK = 1
+
+
+class ZstdError(Exception):
+    pass
 
 
-class _ZstdCompressionWriter(object):
-    def __init__(self, cstream, writer):
-        self._cstream = cstream
+class CompressionParameters(object):
+    def __init__(self, window_log, chain_log, hash_log, search_log,
+                 search_length, target_length, strategy):
+        if window_log < WINDOWLOG_MIN or window_log > WINDOWLOG_MAX:
+            raise ValueError('invalid window log value')
+
+        if chain_log < CHAINLOG_MIN or chain_log > CHAINLOG_MAX:
+            raise ValueError('invalid chain log value')
+
+        if hash_log < HASHLOG_MIN or hash_log > HASHLOG_MAX:
+            raise ValueError('invalid hash log value')
+
+        if search_log < SEARCHLOG_MIN or search_log > SEARCHLOG_MAX:
+            raise ValueError('invalid search log value')
+
+        if search_length < SEARCHLENGTH_MIN or search_length > SEARCHLENGTH_MAX:
+            raise ValueError('invalid search length value')
+
+        if target_length < TARGETLENGTH_MIN or target_length > TARGETLENGTH_MAX:
+            raise ValueError('invalid target length value')
+
+        if strategy < STRATEGY_FAST or strategy > STRATEGY_BTOPT:
+            raise ValueError('invalid strategy value')
+
+        self.window_log = window_log
+        self.chain_log = chain_log
+        self.hash_log = hash_log
+        self.search_log = search_log
+        self.search_length = search_length
+        self.target_length = target_length
+        self.strategy = strategy
+
+    def as_compression_parameters(self):
+        p = ffi.new('ZSTD_compressionParameters *')[0]
+        p.windowLog = self.window_log
+        p.chainLog = self.chain_log
+        p.hashLog = self.hash_log
+        p.searchLog = self.search_log
+        p.searchLength = self.search_length
+        p.targetLength = self.target_length
+        p.strategy = self.strategy
+
+        return p
+
+def get_compression_parameters(level, source_size=0, dict_size=0):
+    params = lib.ZSTD_getCParams(level, source_size, dict_size)
+    return CompressionParameters(window_log=params.windowLog,
+                                 chain_log=params.chainLog,
+                                 hash_log=params.hashLog,
+                                 search_log=params.searchLog,
+                                 search_length=params.searchLength,
+                                 target_length=params.targetLength,
+                                 strategy=params.strategy)
+
+
+def estimate_compression_context_size(params):
+    if not isinstance(params, CompressionParameters):
+        raise ValueError('argument must be a CompressionParameters')
+
+    cparams = params.as_compression_parameters()
+    return lib.ZSTD_estimateCCtxSize(cparams)
+
+
+def estimate_decompression_context_size():
+    return lib.ZSTD_estimateDCtxSize()
+
+
+class ZstdCompressionWriter(object):
+    def __init__(self, compressor, writer, source_size, write_size):
+        self._compressor = compressor
         self._writer = writer
+        self._source_size = source_size
+        self._write_size = write_size
+        self._entered = False
 
     def __enter__(self):
+        if self._entered:
+            raise ZstdError('cannot __enter__ multiple times')
+
+        self._cstream = self._compressor._get_cstream(self._source_size)
+        self._entered = True
         return self
 
     def __exit__(self, exc_type, exc_value, exc_tb):
+        self._entered = False
+
         if not exc_type and not exc_value and not exc_tb:
             out_buffer = ffi.new('ZSTD_outBuffer *')
-            out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
-            out_buffer.size = _CSTREAM_OUT_SIZE
+            dst_buffer = ffi.new('char[]', self._write_size)
+            out_buffer.dst = dst_buffer
+            out_buffer.size = self._write_size
             out_buffer.pos = 0
 
             while True:
-                res = lib.ZSTD_endStream(self._cstream, out_buffer)
-                if lib.ZSTD_isError(res):
-                    raise Exception('error ending compression stream: %s' % lib.ZSTD_getErrorName)
+                zresult = lib.ZSTD_endStream(self._cstream, out_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('error ending compression stream: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
 
                 if out_buffer.pos:
-                    self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                    self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
                     out_buffer.pos = 0
 
-                if res == 0:
+                if zresult == 0:
                     break
 
+        self._cstream = None
+        self._compressor = None
+
         return False
 
+    def memory_size(self):
+        if not self._entered:
+            raise ZstdError('cannot determine size of an inactive compressor; '
+                            'call when a context manager is active')
+
+        return lib.ZSTD_sizeof_CStream(self._cstream)
+
     def write(self, data):
+        if not self._entered:
+            raise ZstdError('write() must be called from an active context '
+                            'manager')
+
+        total_write = 0
+
+        data_buffer = ffi.from_buffer(data)
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
         out_buffer = ffi.new('ZSTD_outBuffer *')
-        out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
-        out_buffer.size = _CSTREAM_OUT_SIZE
+        dst_buffer = ffi.new('char[]', self._write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = self._write_size
+        out_buffer.pos = 0
+
+        while in_buffer.pos < in_buffer.size:
+            zresult = lib.ZSTD_compressStream(self._cstream, out_buffer, in_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if out_buffer.pos:
+                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                total_write += out_buffer.pos
+                out_buffer.pos = 0
+
+        return total_write
+
+    def flush(self):
+        if not self._entered:
+            raise ZstdError('flush must be called from an active context manager')
+
+        total_write = 0
+
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+        dst_buffer = ffi.new('char[]', self._write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = self._write_size
         out_buffer.pos = 0
 
-        # TODO can we reuse existing memory?
-        in_buffer = ffi.new('ZSTD_inBuffer *')
-        in_buffer.src = ffi.new('char[]', data)
-        in_buffer.size = len(data)
-        in_buffer.pos = 0
-        while in_buffer.pos < in_buffer.size:
-            res = lib.ZSTD_compressStream(self._cstream, out_buffer, in_buffer)
-            if lib.ZSTD_isError(res):
-                raise Exception('zstd compress error: %s' % lib.ZSTD_getErrorName(res))
+        while True:
+            zresult = lib.ZSTD_flushStream(self._cstream, out_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if not out_buffer.pos:
+                break
+
+            self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+            total_write += out_buffer.pos
+            out_buffer.pos = 0
+
+        return total_write
+
+
+class ZstdCompressionObj(object):
+    def compress(self, data):
+        if self._finished:
+            raise ZstdError('cannot call compress() after compressor finished')
+
+        data_buffer = ffi.from_buffer(data)
+        source = ffi.new('ZSTD_inBuffer *')
+        source.src = data_buffer
+        source.size = len(data_buffer)
+        source.pos = 0
+
+        chunks = []
+
+        while source.pos < len(data):
+            zresult = lib.ZSTD_compressStream(self._cstream, self._out, source)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if self._out.pos:
+                chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
+                self._out.pos = 0
+
+        return b''.join(chunks)
 
-            if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
-                out_buffer.pos = 0
+    def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
+        if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK):
+            raise ValueError('flush mode not recognized')
+
+        if self._finished:
+            raise ZstdError('compressor object already finished')
+
+        assert self._out.pos == 0
+
+        if flush_mode == COMPRESSOBJ_FLUSH_BLOCK:
+            zresult = lib.ZSTD_flushStream(self._cstream, self._out)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            # Output buffer is guaranteed to hold full block.
+            assert zresult == 0
+
+            if self._out.pos:
+                result = ffi.buffer(self._out.dst, self._out.pos)[:]
+                self._out.pos = 0
+                return result
+            else:
+                return b''
+
+        assert flush_mode == COMPRESSOBJ_FLUSH_FINISH
+        self._finished = True
+
+        chunks = []
+
+        while True:
+            zresult = lib.ZSTD_endStream(self._cstream, self._out)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('error ending compression stream: %s' %
+                                ffi.string(lib.ZSTD_getErroName(zresult)))
+
+            if self._out.pos:
+                chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
+                self._out.pos = 0
+
+            if not zresult:
+                break
+
+        # GC compression stream immediately.
+        self._cstream = None
+
+        return b''.join(chunks)
 
 
 class ZstdCompressor(object):
-    def __init__(self, level=3, dict_data=None, compression_params=None):
-        if dict_data:
-            raise Exception('dict_data not yet supported')
-        if compression_params:
-            raise Exception('compression_params not yet supported')
+    def __init__(self, level=3, dict_data=None, compression_params=None,
+                 write_checksum=False, write_content_size=False,
+                 write_dict_id=True):
+        if level < 1:
+            raise ValueError('level must be greater than 0')
+        elif level > lib.ZSTD_maxCLevel():
+            raise ValueError('level must be less than %d' % lib.ZSTD_maxCLevel())
 
         self._compression_level = level
+        self._dict_data = dict_data
+        self._cparams = compression_params
+        self._fparams = ffi.new('ZSTD_frameParameters *')[0]
+        self._fparams.checksumFlag = write_checksum
+        self._fparams.contentSizeFlag = write_content_size
+        self._fparams.noDictIDFlag = not write_dict_id
 
-    def compress(self, data):
-        # Just use the stream API for now.
-        output = io.BytesIO()
-        with self.write_to(output) as compressor:
-            compressor.write(data)
-        return output.getvalue()
+        cctx = lib.ZSTD_createCCtx()
+        if cctx == ffi.NULL:
+            raise MemoryError()
+
+        self._cctx = ffi.gc(cctx, lib.ZSTD_freeCCtx)
+
+    def compress(self, data, allow_empty=False):
+        if len(data) == 0 and self._fparams.contentSizeFlag and not allow_empty:
+            raise ValueError('cannot write empty inputs when writing content sizes')
+
+        # TODO use a CDict for performance.
+        dict_data = ffi.NULL
+        dict_size = 0
+
+        if self._dict_data:
+            dict_data = self._dict_data.as_bytes()
+            dict_size = len(self._dict_data)
+
+        params = ffi.new('ZSTD_parameters *')[0]
+        if self._cparams:
+            params.cParams = self._cparams.as_compression_parameters()
+        else:
+            params.cParams = lib.ZSTD_getCParams(self._compression_level, len(data),
+                                                 dict_size)
+        params.fParams = self._fparams
+
+        dest_size = lib.ZSTD_compressBound(len(data))
+        out = new_nonzero('char[]', dest_size)
 
-    def copy_stream(self, ifh, ofh):
-        cstream = self._get_cstream()
+        zresult = lib.ZSTD_compress_advanced(self._cctx,
+                                             ffi.addressof(out), dest_size,
+                                             data, len(data),
+                                             dict_data, dict_size,
+                                             params)
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('cannot compress: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+        return ffi.buffer(out, zresult)[:]
+
+    def compressobj(self, size=0):
+        cstream = self._get_cstream(size)
+        cobj = ZstdCompressionObj()
+        cobj._cstream = cstream
+        cobj._out = ffi.new('ZSTD_outBuffer *')
+        cobj._dst_buffer = ffi.new('char[]', COMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        cobj._out.dst = cobj._dst_buffer
+        cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        cobj._out.pos = 0
+        cobj._compressor = self
+        cobj._finished = False
+
+        return cobj
+
+    def copy_stream(self, ifh, ofh, size=0,
+                    read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+                    write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+
+        if not hasattr(ifh, 'read'):
+            raise ValueError('first argument must have a read() method')
+        if not hasattr(ofh, 'write'):
+            raise ValueError('second argument must have a write() method')
+
+        cstream = self._get_cstream(size)
 
         in_buffer = ffi.new('ZSTD_inBuffer *')
         out_buffer = ffi.new('ZSTD_outBuffer *')
 
-        out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
-        out_buffer.size = _CSTREAM_OUT_SIZE
+        dst_buffer = ffi.new('char[]', write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
         out_buffer.pos = 0
 
         total_read, total_write = 0, 0
 
         while True:
-            data = ifh.read(_CSTREAM_IN_SIZE)
+            data = ifh.read(read_size)
             if not data:
                 break
 
-            total_read += len(data)
-
-            in_buffer.src = ffi.new('char[]', data)
-            in_buffer.size = len(data)
+            data_buffer = ffi.from_buffer(data)
+            total_read += len(data_buffer)
+            in_buffer.src = data_buffer
+            in_buffer.size = len(data_buffer)
             in_buffer.pos = 0
 
             while in_buffer.pos < in_buffer.size:
-                res = lib.ZSTD_compressStream(cstream, out_buffer, in_buffer)
-                if lib.ZSTD_isError(res):
-                    raise Exception('zstd compress error: %s' %
-                                    lib.ZSTD_getErrorName(res))
+                zresult = lib.ZSTD_compressStream(cstream, out_buffer, in_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('zstd compress error: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
 
                 if out_buffer.pos:
                     ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
-                    total_write = out_buffer.pos
+                    total_write += out_buffer.pos
                     out_buffer.pos = 0
 
         # We've finished reading. Flush the compressor.
         while True:
-            res = lib.ZSTD_endStream(cstream, out_buffer)
-            if lib.ZSTD_isError(res):
-                raise Exception('error ending compression stream: %s' %
-                                lib.ZSTD_getErrorName(res))
+            zresult = lib.ZSTD_endStream(cstream, out_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('error ending compression stream: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
 
             if out_buffer.pos:
                 ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
                 total_write += out_buffer.pos
                 out_buffer.pos = 0
 
-            if res == 0:
+            if zresult == 0:
                 break
 
         return total_read, total_write
 
-    def write_to(self, writer):
-        return _ZstdCompressionWriter(self._get_cstream(), writer)
+    def write_to(self, writer, size=0,
+                 write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+
+        if not hasattr(writer, 'write'):
+            raise ValueError('must pass an object with a write() method')
+
+        return ZstdCompressionWriter(self, writer, size, write_size)
+
+    def read_from(self, reader, size=0,
+                  read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+                  write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+        if hasattr(reader, 'read'):
+            have_read = True
+        elif hasattr(reader, '__getitem__'):
+            have_read = False
+            buffer_offset = 0
+            size = len(reader)
+        else:
+            raise ValueError('must pass an object with a read() method or '
+                             'conforms to buffer protocol')
+
+        cstream = self._get_cstream(size)
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        in_buffer.src = ffi.NULL
+        in_buffer.size = 0
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new('char[]', write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
+        out_buffer.pos = 0
+
+        while True:
+            # We should never have output data sitting around after a previous
+            # iteration.
+            assert out_buffer.pos == 0
+
+            # Collect input data.
+            if have_read:
+                read_result = reader.read(read_size)
+            else:
+                remaining = len(reader) - buffer_offset
+                slice_size = min(remaining, read_size)
+                read_result = reader[buffer_offset:buffer_offset + slice_size]
+                buffer_offset += slice_size
 
-    def _get_cstream(self):
+            # No new input data. Break out of the read loop.
+            if not read_result:
+                break
+
+            # Feed all read data into the compressor and emit output until
+            # exhausted.
+            read_buffer = ffi.from_buffer(read_result)
+            in_buffer.src = read_buffer
+            in_buffer.size = len(read_buffer)
+            in_buffer.pos = 0
+
+            while in_buffer.pos < in_buffer.size:
+                zresult = lib.ZSTD_compressStream(cstream, out_buffer, in_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('zstd compress error: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+                if out_buffer.pos:
+                    data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                    out_buffer.pos = 0
+                    yield data
+
+            assert out_buffer.pos == 0
+
+            # And repeat the loop to collect more data.
+            continue
+
+        # If we get here, input is exhausted. End the stream and emit what
+        # remains.
+        while True:
+            assert out_buffer.pos == 0
+            zresult = lib.ZSTD_endStream(cstream, out_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('error ending compression stream: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if out_buffer.pos:
+                data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                out_buffer.pos = 0
+                yield data
+
+            if zresult == 0:
+                break
+
+    def _get_cstream(self, size):
         cstream = lib.ZSTD_createCStream()
+        if cstream == ffi.NULL:
+            raise MemoryError()
+
         cstream = ffi.gc(cstream, lib.ZSTD_freeCStream)
 
-        res = lib.ZSTD_initCStream(cstream, self._compression_level)
-        if lib.ZSTD_isError(res):
+        dict_data = ffi.NULL
+        dict_size = 0
+        if self._dict_data:
+            dict_data = self._dict_data.as_bytes()
+            dict_size = len(self._dict_data)
+
+        zparams = ffi.new('ZSTD_parameters *')[0]
+        if self._cparams:
+            zparams.cParams = self._cparams.as_compression_parameters()
+        else:
+            zparams.cParams = lib.ZSTD_getCParams(self._compression_level,
+                                                  size, dict_size)
+        zparams.fParams = self._fparams
+
+        zresult = lib.ZSTD_initCStream_advanced(cstream, dict_data, dict_size,
+                                                zparams, size)
+        if lib.ZSTD_isError(zresult):
             raise Exception('cannot init CStream: %s' %
-                            lib.ZSTD_getErrorName(res))
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
 
         return cstream
+
+
+class FrameParameters(object):
+    def __init__(self, fparams):
+        self.content_size = fparams.frameContentSize
+        self.window_size = fparams.windowSize
+        self.dict_id = fparams.dictID
+        self.has_checksum = bool(fparams.checksumFlag)
+
+
+def get_frame_parameters(data):
+    if not isinstance(data, bytes_type):
+        raise TypeError('argument must be bytes')
+
+    params = ffi.new('ZSTD_frameParams *')
+
+    zresult = lib.ZSTD_getFrameParams(params, data, len(data))
+    if lib.ZSTD_isError(zresult):
+        raise ZstdError('cannot get frame parameters: %s' %
+                        ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+    if zresult:
+        raise ZstdError('not enough data for frame parameters; need %d bytes' %
+                        zresult)
+
+    return FrameParameters(params[0])
+
+
+class ZstdCompressionDict(object):
+    def __init__(self, data):
+        assert isinstance(data, bytes_type)
+        self._data = data
+
+    def __len__(self):
+        return len(self._data)
+
+    def dict_id(self):
+        return int_type(lib.ZDICT_getDictID(self._data, len(self._data)))
+
+    def as_bytes(self):
+        return self._data
+
+
+def train_dictionary(dict_size, samples, parameters=None):
+    if not isinstance(samples, list):
+        raise TypeError('samples must be a list')
+
+    total_size = sum(map(len, samples))
+
+    samples_buffer = new_nonzero('char[]', total_size)
+    sample_sizes = new_nonzero('size_t[]', len(samples))
+
+    offset = 0
+    for i, sample in enumerate(samples):
+        if not isinstance(sample, bytes_type):
+            raise ValueError('samples must be bytes')
+
+        l = len(sample)
+        ffi.memmove(samples_buffer + offset, sample, l)
+        offset += l
+        sample_sizes[i] = l
+
+    dict_data = new_nonzero('char[]', dict_size)
+
+    zresult = lib.ZDICT_trainFromBuffer(ffi.addressof(dict_data), dict_size,
+                                        ffi.addressof(samples_buffer),
+                                        ffi.addressof(sample_sizes, 0),
+                                        len(samples))
+    if lib.ZDICT_isError(zresult):
+        raise ZstdError('Cannot train dict: %s' %
+                        ffi.string(lib.ZDICT_getErrorName(zresult)))
+
+    return ZstdCompressionDict(ffi.buffer(dict_data, zresult)[:])
+
+
+class ZstdDecompressionObj(object):
+    def __init__(self, decompressor):
+        self._decompressor = decompressor
+        self._dstream = self._decompressor._get_dstream()
+        self._finished = False
+
+    def decompress(self, data):
+        if self._finished:
+            raise ZstdError('cannot use a decompressobj multiple times')
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        data_buffer = ffi.from_buffer(data)
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new('char[]', DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        chunks = []
+
+        while in_buffer.pos < in_buffer.size:
+            zresult = lib.ZSTD_decompressStream(self._dstream, out_buffer, in_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd decompressor error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if zresult == 0:
+                self._finished = True
+                self._dstream = None
+                self._decompressor = None
+
+            if out_buffer.pos:
+                chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                out_buffer.pos = 0
+
+        return b''.join(chunks)
+
+
+class ZstdDecompressionWriter(object):
+    def __init__(self, decompressor, writer, write_size):
+        self._decompressor = decompressor
+        self._writer = writer
+        self._write_size = write_size
+        self._dstream = None
+        self._entered = False
+
+    def __enter__(self):
+        if self._entered:
+            raise ZstdError('cannot __enter__ multiple times')
+
+        self._dstream = self._decompressor._get_dstream()
+        self._entered = True
+
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._entered = False
+        self._dstream = None
+
+    def memory_size(self):
+        if not self._dstream:
+            raise ZstdError('cannot determine size of inactive decompressor '
+                            'call when context manager is active')
+
+        return lib.ZSTD_sizeof_DStream(self._dstream)
+
+    def write(self, data):
+        if not self._entered:
+            raise ZstdError('write must be called from an active context manager')
+
+        total_write = 0
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        data_buffer = ffi.from_buffer(data)
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new('char[]', self._write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        while in_buffer.pos < in_buffer.size:
+            zresult = lib.ZSTD_decompressStream(self._dstream, out_buffer, in_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd decompress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if out_buffer.pos:
+                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                total_write += out_buffer.pos
+                out_buffer.pos = 0
+
+        return total_write
+
+
+class ZstdDecompressor(object):
+    def __init__(self, dict_data=None):
+        self._dict_data = dict_data
+
+        dctx = lib.ZSTD_createDCtx()
+        if dctx == ffi.NULL:
+            raise MemoryError()
+
+        self._refdctx = ffi.gc(dctx, lib.ZSTD_freeDCtx)
+
+    @property
+    def _ddict(self):
+        if self._dict_data:
+            dict_data = self._dict_data.as_bytes()
+            dict_size = len(self._dict_data)
+
+            ddict = lib.ZSTD_createDDict(dict_data, dict_size)
+            if ddict == ffi.NULL:
+                raise ZstdError('could not create decompression dict')
+        else:
+            ddict = None
+
+        self.__dict__['_ddict'] = ddict
+        return ddict
+
+    def decompress(self, data, max_output_size=0):
+        data_buffer = ffi.from_buffer(data)
+
+        orig_dctx = new_nonzero('char[]', lib.ZSTD_sizeof_DCtx(self._refdctx))
+        dctx = ffi.cast('ZSTD_DCtx *', orig_dctx)
+        lib.ZSTD_copyDCtx(dctx, self._refdctx)
+
+        ddict = self._ddict
+
+        output_size = lib.ZSTD_getDecompressedSize(data_buffer, len(data_buffer))
+        if output_size:
+            result_buffer = ffi.new('char[]', output_size)
+            result_size = output_size
+        else:
+            if not max_output_size:
+                raise ZstdError('input data invalid or missing content size '
+                                'in frame header')
+
+            result_buffer = ffi.new('char[]', max_output_size)
+            result_size = max_output_size
+
+        if ddict:
+            zresult = lib.ZSTD_decompress_usingDDict(dctx,
+                                                     result_buffer, result_size,
+                                                     data_buffer, len(data_buffer),
+                                                     ddict)
+        else:
+            zresult = lib.ZSTD_decompressDCtx(dctx,
+                                              result_buffer, result_size,
+                                              data_buffer, len(data_buffer))
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('decompression error: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+        elif output_size and zresult != output_size:
+            raise ZstdError('decompression error: decompressed %d bytes; expected %d' %
+                            (zresult, output_size))
+
+        return ffi.buffer(result_buffer, zresult)[:]
+
+    def decompressobj(self):
+        return ZstdDecompressionObj(self)
+
+    def read_from(self, reader, read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+                  write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+                  skip_bytes=0):
+        if skip_bytes >= read_size:
+            raise ValueError('skip_bytes must be smaller than read_size')
+
+        if hasattr(reader, 'read'):
+            have_read = True
+        elif hasattr(reader, '__getitem__'):
+            have_read = False
+            buffer_offset = 0
+            size = len(reader)
+        else:
+            raise ValueError('must pass an object with a read() method or '
+                             'conforms to buffer protocol')
+
+        if skip_bytes:
+            if have_read:
+                reader.read(skip_bytes)
+            else:
+                if skip_bytes > size:
+                    raise ValueError('skip_bytes larger than first input chunk')
+
+                buffer_offset = skip_bytes
+
+        dstream = self._get_dstream()
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        dst_buffer = ffi.new('char[]', write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        while True:
+            assert out_buffer.pos == 0
+
+            if have_read:
+                read_result = reader.read(read_size)
+            else:
+                remaining = size - buffer_offset
+                slice_size = min(remaining, read_size)
+                read_result = reader[buffer_offset:buffer_offset + slice_size]
+                buffer_offset += slice_size
+
+            # No new input. Break out of read loop.
+            if not read_result:
+                break
+
+            # Feed all read data into decompressor and emit output until
+            # exhausted.
+            read_buffer = ffi.from_buffer(read_result)
+            in_buffer.src = read_buffer
+            in_buffer.size = len(read_buffer)
+            in_buffer.pos = 0
+
+            while in_buffer.pos < in_buffer.size:
+                assert out_buffer.pos == 0
+
+                zresult = lib.ZSTD_decompressStream(dstream, out_buffer, in_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('zstd decompress error: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+                if out_buffer.pos:
+                    data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                    out_buffer.pos = 0
+                    yield data
+
+                if zresult == 0:
+                    return
+
+            # Repeat loop to collect more input data.
+            continue
+
+        # If we get here, input is exhausted.
+
+    def write_to(self, writer, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+        if not hasattr(writer, 'write'):
+            raise ValueError('must pass an object with a write() method')
+
+        return ZstdDecompressionWriter(self, writer, write_size)
+
+    def copy_stream(self, ifh, ofh,
+                    read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+                    write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+        if not hasattr(ifh, 'read'):
+            raise ValueError('first argument must have a read() method')
+        if not hasattr(ofh, 'write'):
+            raise ValueError('second argument must have a write() method')
+
+        dstream = self._get_dstream()
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        dst_buffer = ffi.new('char[]', write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
+        out_buffer.pos = 0
+
+        total_read, total_write = 0, 0
+
+        # Read all available input.
+        while True:
+            data = ifh.read(read_size)
+            if not data:
+                break
+
+            data_buffer = ffi.from_buffer(data)
+            total_read += len(data_buffer)
+            in_buffer.src = data_buffer
+            in_buffer.size = len(data_buffer)
+            in_buffer.pos = 0
+
+            # Flush all read data to output.
+            while in_buffer.pos < in_buffer.size:
+                zresult = lib.ZSTD_decompressStream(dstream, out_buffer, in_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('zstd decompressor error: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+                if out_buffer.pos:
+                    ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                    total_write += out_buffer.pos
+                    out_buffer.pos = 0
+
+            # Continue loop to keep reading.
+
+        return total_read, total_write
+
+    def decompress_content_dict_chain(self, frames):
+        if not isinstance(frames, list):
+            raise TypeError('argument must be a list')
+
+        if not frames:
+            raise ValueError('empty input chain')
+
+        # First chunk should not be using a dictionary. We handle it specially.
+        chunk = frames[0]
+        if not isinstance(chunk, bytes_type):
+            raise ValueError('chunk 0 must be bytes')
+
+        # All chunks should be zstd frames and should have content size set.
+        chunk_buffer = ffi.from_buffer(chunk)
+        params = ffi.new('ZSTD_frameParams *')
+        zresult = lib.ZSTD_getFrameParams(params, chunk_buffer, len(chunk_buffer))
+        if lib.ZSTD_isError(zresult):
+            raise ValueError('chunk 0 is not a valid zstd frame')
+        elif zresult:
+            raise ValueError('chunk 0 is too small to contain a zstd frame')
+
+        if not params.frameContentSize:
+            raise ValueError('chunk 0 missing content size in frame')
+
+        dctx = lib.ZSTD_createDCtx()
+        if dctx == ffi.NULL:
+            raise MemoryError()
+
+        dctx = ffi.gc(dctx, lib.ZSTD_freeDCtx)
+
+        last_buffer = ffi.new('char[]', params.frameContentSize)
+
+        zresult = lib.ZSTD_decompressDCtx(dctx, last_buffer, len(last_buffer),
+                                          chunk_buffer, len(chunk_buffer))
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('could not decompress chunk 0: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+        # Special case of chain length of 1
+        if len(frames) == 1:
+            return ffi.buffer(last_buffer, len(last_buffer))[:]
+
+        i = 1
+        while i < len(frames):
+            chunk = frames[i]
+            if not isinstance(chunk, bytes_type):
+                raise ValueError('chunk %d must be bytes' % i)
+
+            chunk_buffer = ffi.from_buffer(chunk)
+            zresult = lib.ZSTD_getFrameParams(params, chunk_buffer, len(chunk_buffer))
+            if lib.ZSTD_isError(zresult):
+                raise ValueError('chunk %d is not a valid zstd frame' % i)
+            elif zresult:
+                raise ValueError('chunk %d is too small to contain a zstd frame' % i)
+
+            if not params.frameContentSize:
+                raise ValueError('chunk %d missing content size in frame' % i)
+
+            dest_buffer = ffi.new('char[]', params.frameContentSize)
+
+            zresult = lib.ZSTD_decompress_usingDict(dctx, dest_buffer, len(dest_buffer),
+                                                    chunk_buffer, len(chunk_buffer),
+                                                    last_buffer, len(last_buffer))
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('could not decompress chunk %d' % i)
+
+            last_buffer = dest_buffer
+            i += 1
+
+        return ffi.buffer(last_buffer, len(last_buffer))[:]
+
+    def _get_dstream(self):
+        dstream = lib.ZSTD_createDStream()
+        if dstream == ffi.NULL:
+            raise MemoryError()
+
+        dstream = ffi.gc(dstream, lib.ZSTD_freeDStream)
+
+        if self._dict_data:
+            zresult = lib.ZSTD_initDStream_usingDict(dstream,
+                                                     self._dict_data.as_bytes(),
+                                                     len(self._dict_data))
+        else:
+            zresult = lib.ZSTD_initDStream(dstream)
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('could not initialize DStream: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+        return dstream
--- a/contrib/wix/help.wxs	Sat Feb 25 12:48:50 2017 +0900
+++ b/contrib/wix/help.wxs	Tue Feb 28 11:13:25 2017 -0800
@@ -25,6 +25,7 @@
           <File Name="hgignore.txt" />
           <File Name="hgweb.txt" />
           <File Name="merge-tools.txt" />
+          <File Name="pager.txt" />
           <File Name="patterns.txt" />
           <File Name="phases.txt" />
           <File Name="revisions.txt" />
--- a/hgext/bugzilla.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/bugzilla.py	Tue Feb 28 11:13:25 2017 -0800
@@ -15,14 +15,16 @@
 The bug references can optionally include an update for Bugzilla of the
 hours spent working on the bug. Bugs can also be marked fixed.
 
-Three basic modes of access to Bugzilla are provided:
+Four basic modes of access to Bugzilla are provided:
+
+1. Access via the Bugzilla REST-API. Requires bugzilla 5.0 or later.
 
-1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
+2. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
 
-2. Check data via the Bugzilla XMLRPC interface and submit bug change
+3. Check data via the Bugzilla XMLRPC interface and submit bug change
    via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
 
-3. Writing directly to the Bugzilla database. Only Bugzilla installations
+4. Writing directly to the Bugzilla database. Only Bugzilla installations
    using MySQL are supported. Requires Python MySQLdb.
 
 Writing directly to the database is susceptible to schema changes, and
@@ -50,11 +52,16 @@
 Bugzilla is used instead as the source of the comment. Marking bugs fixed
 works on all supported Bugzilla versions.
 
+Access via the REST-API needs either a Bugzilla username and password
+or an apikey specified in the configuration. Comments are made under
+the given username or the user assoicated with the apikey in Bugzilla.
+
 Configuration items common to all access modes:
 
 bugzilla.version
   The access type to use. Values recognized are:
 
+  :``restapi``:      Bugzilla REST-API, Bugzilla 5.0 and later.
   :``xmlrpc``:       Bugzilla XMLRPC interface.
   :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
   :``3.0``:          MySQL access, Bugzilla 3.0 and later.
@@ -135,7 +142,7 @@
 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
 Contains entries of the form ``committer = Bugzilla user``.
 
-XMLRPC access mode configuration:
+XMLRPC and REST-API access mode configuration:
 
 bugzilla.bzurl
   The base URL for the Bugzilla installation.
@@ -148,6 +155,13 @@
 bugzilla.password
   The password for Bugzilla login.
 
+REST-API access mode uses the options listed above as well as:
+
+bugzilla.apikey
+  An apikey generated on the Bugzilla instance for api access.
+  Using an apikey removes the need to store the user and password
+  options.
+
 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
 and also:
 
@@ -279,6 +293,7 @@
 
 from __future__ import absolute_import
 
+import json
 import re
 import time
 
@@ -288,6 +303,7 @@
     cmdutil,
     error,
     mail,
+    url,
     util,
 )
 
@@ -773,6 +789,136 @@
             cmds.append(self.makecommandline("resolution", self.fixresolution))
         self.send_bug_modify_email(bugid, cmds, text, committer)
 
+class NotFound(LookupError):
+    pass
+
+class bzrestapi(bzaccess):
+    """Read and write bugzilla data using the REST API available since
+    Bugzilla 5.0.
+    """
+    def __init__(self, ui):
+        bzaccess.__init__(self, ui)
+        bz = self.ui.config('bugzilla', 'bzurl',
+                            'http://localhost/bugzilla/')
+        self.bzroot = '/'.join([bz, 'rest'])
+        self.apikey = self.ui.config('bugzilla', 'apikey', '')
+        self.user = self.ui.config('bugzilla', 'user', 'bugs')
+        self.passwd = self.ui.config('bugzilla', 'password')
+        self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
+        self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
+                                            'FIXED')
+
+    def apiurl(self, targets, include_fields=None):
+        url = '/'.join([self.bzroot] + [str(t) for t in targets])
+        qv = {}
+        if self.apikey:
+            qv['api_key'] = self.apikey
+        elif self.user and self.passwd:
+            qv['login'] = self.user
+            qv['password'] = self.passwd
+        if include_fields:
+            qv['include_fields'] = include_fields
+        if qv:
+            url = '%s?%s' % (url, util.urlreq.urlencode(qv))
+        return url
+
+    def _fetch(self, burl):
+        try:
+            resp = url.open(self.ui, burl)
+            return json.loads(resp.read())
+        except util.urlerr.httperror as inst:
+            if inst.code == 401:
+                raise error.Abort(_('authorization failed'))
+            if inst.code == 404:
+                raise NotFound()
+            else:
+                raise
+
+    def _submit(self, burl, data, method='POST'):
+        data = json.dumps(data)
+        if method == 'PUT':
+            class putrequest(util.urlreq.request):
+                def get_method(self):
+                    return 'PUT'
+            request_type = putrequest
+        else:
+            request_type = util.urlreq.request
+        req = request_type(burl, data,
+                           {'Content-Type': 'application/json'})
+        try:
+            resp = url.opener(self.ui).open(req)
+            return json.loads(resp.read())
+        except util.urlerr.httperror as inst:
+            if inst.code == 401:
+                raise error.Abort(_('authorization failed'))
+            if inst.code == 404:
+                raise NotFound()
+            else:
+                raise
+
+    def filter_real_bug_ids(self, bugs):
+        '''remove bug IDs that do not exist in Bugzilla from bugs.'''
+        badbugs = set()
+        for bugid in bugs:
+            burl = self.apiurl(('bug', bugid), include_fields='status')
+            try:
+                self._fetch(burl)
+            except NotFound:
+                badbugs.add(bugid)
+        for bugid in badbugs:
+            del bugs[bugid]
+
+    def filter_cset_known_bug_ids(self, node, bugs):
+        '''remove bug IDs where node occurs in comment text from bugs.'''
+        sn = short(node)
+        for bugid in bugs.keys():
+            burl = self.apiurl(('bug', bugid, 'comment'), include_fields='text')
+            result = self._fetch(burl)
+            comments = result['bugs'][str(bugid)]['comments']
+            if any(sn in c['text'] for c in comments):
+                self.ui.status(_('bug %d already knows about changeset %s\n') %
+                               (bugid, sn))
+                del bugs[bugid]
+
+    def updatebug(self, bugid, newstate, text, committer):
+        '''update the specified bug. Add comment text and set new states.
+
+        If possible add the comment as being from the committer of
+        the changeset. Otherwise use the default Bugzilla user.
+        '''
+        bugmod = {}
+        if 'hours' in newstate:
+            bugmod['work_time'] = newstate['hours']
+        if 'fix' in newstate:
+            bugmod['status'] = self.fixstatus
+            bugmod['resolution'] = self.fixresolution
+        if bugmod:
+            # if we have to change the bugs state do it here
+            bugmod['comment'] = {
+                'comment': text,
+                'is_private': False,
+                'is_markdown': False,
+            }
+            burl = self.apiurl(('bug', bugid))
+            self._submit(burl, bugmod, method='PUT')
+            self.ui.debug('updated bug %s\n' % bugid)
+        else:
+            burl = self.apiurl(('bug', bugid, 'comment'))
+            self._submit(burl, {
+                'comment': text,
+                'is_private': False,
+                'is_markdown': False,
+            })
+            self.ui.debug('added comment to bug %s\n' % bugid)
+
+    def notify(self, bugs, committer):
+        '''Force sending of Bugzilla notification emails.
+
+        Only required if the access method does not trigger notification
+        emails automatically.
+        '''
+        pass
+
 class bugzilla(object):
     # supported versions of bugzilla. different versions have
     # different schemas.
@@ -781,7 +927,8 @@
         '2.18': bzmysql_2_18,
         '3.0':  bzmysql_3_0,
         'xmlrpc': bzxmlrpc,
-        'xmlrpc+email': bzxmlrpcemail
+        'xmlrpc+email': bzxmlrpcemail,
+        'restapi': bzrestapi,
         }
 
     _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
--- a/hgext/color.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/color.py	Tue Feb 28 11:13:25 2017 -0800
@@ -164,18 +164,17 @@
 
 from __future__ import absolute_import
 
+try:
+    import curses
+    curses.COLOR_BLACK # force import
+except ImportError:
+    curses = None
+
 from mercurial.i18n import _
 from mercurial import (
     cmdutil,
     color,
     commands,
-    dispatch,
-    encoding,
-    extensions,
-    pycompat,
-    subrepo,
-    ui as uimod,
-    util,
 )
 
 cmdtable = {}
@@ -186,294 +185,15 @@
 # leave the attribute unspecified.
 testedwith = 'ships-with-hg-core'
 
-# start and stop parameters for effects
-_effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
-            'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
-            'italic': 3, 'underline': 4, 'inverse': 7, 'dim': 2,
-            'black_background': 40, 'red_background': 41,
-            'green_background': 42, 'yellow_background': 43,
-            'blue_background': 44, 'purple_background': 45,
-            'cyan_background': 46, 'white_background': 47}
-
-def _terminfosetup(ui, mode):
-    '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
-
-    # If we failed to load curses, we go ahead and return.
-    if not _terminfo_params:
-        return
-    # Otherwise, see what the config file says.
-    if mode not in ('auto', 'terminfo'):
-        return
-
-    _terminfo_params.update((key[6:], (False, int(val), ''))
-        for key, val in ui.configitems('color')
-        if key.startswith('color.'))
-    _terminfo_params.update((key[9:], (True, '', val.replace('\\E', '\x1b')))
-        for key, val in ui.configitems('color')
-        if key.startswith('terminfo.'))
-
-    try:
-        curses.setupterm()
-    except curses.error as e:
-        _terminfo_params.clear()
-        return
-
-    for key, (b, e, c) in _terminfo_params.items():
-        if not b:
-            continue
-        if not c and not curses.tigetstr(e):
-            # Most terminals don't support dim, invis, etc, so don't be
-            # noisy and use ui.debug().
-            ui.debug("no terminfo entry for %s\n" % e)
-            del _terminfo_params[key]
-    if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
-        # Only warn about missing terminfo entries if we explicitly asked for
-        # terminfo mode.
-        if mode == "terminfo":
-            ui.warn(_("no terminfo entry for setab/setaf: reverting to "
-              "ECMA-48 color\n"))
-        _terminfo_params.clear()
-
-def _modesetup(ui, coloropt):
-    if coloropt == 'debug':
-        return 'debug'
-
-    auto = (coloropt == 'auto')
-    always = not auto and util.parsebool(coloropt)
-    if not always and not auto:
-        return None
-
-    formatted = (always or (encoding.environ.get('TERM') != 'dumb'
-                 and ui.formatted()))
-
-    mode = ui.config('color', 'mode', 'auto')
-
-    # If pager is active, color.pagermode overrides color.mode.
-    if getattr(ui, 'pageractive', False):
-        mode = ui.config('color', 'pagermode', mode)
-
-    realmode = mode
-    if mode == 'auto':
-        if pycompat.osname == 'nt':
-            term = encoding.environ.get('TERM')
-            # TERM won't be defined in a vanilla cmd.exe environment.
-
-            # UNIX-like environments on Windows such as Cygwin and MSYS will
-            # set TERM. They appear to make a best effort attempt at setting it
-            # to something appropriate. However, not all environments with TERM
-            # defined support ANSI. Since "ansi" could result in terminal
-            # gibberish, we error on the side of selecting "win32". However, if
-            # w32effects is not defined, we almost certainly don't support
-            # "win32", so don't even try.
-            if (term and 'xterm' in term) or not w32effects:
-                realmode = 'ansi'
-            else:
-                realmode = 'win32'
-        else:
-            realmode = 'ansi'
-
-    def modewarn():
-        # only warn if color.mode was explicitly set and we're in
-        # a formatted terminal
-        if mode == realmode and ui.formatted():
-            ui.warn(_('warning: failed to set color mode to %s\n') % mode)
-
-    if realmode == 'win32':
-        _terminfo_params.clear()
-        if not w32effects:
-            modewarn()
-            return None
-        _effects.update(w32effects)
-    elif realmode == 'ansi':
-        _terminfo_params.clear()
-    elif realmode == 'terminfo':
-        _terminfosetup(ui, mode)
-        if not _terminfo_params:
-            ## FIXME Shouldn't we return None in this case too?
-            modewarn()
-            realmode = 'ansi'
-    else:
-        return None
-
-    if always or (auto and formatted):
-        return realmode
-    return None
-
-try:
-    import curses
-    # Mapping from effect name to terminfo attribute name (or raw code) or
-    # color number.  This will also force-load the curses module.
-    _terminfo_params = {'none': (True, 'sgr0', ''),
-                        'standout': (True, 'smso', ''),
-                        'underline': (True, 'smul', ''),
-                        'reverse': (True, 'rev', ''),
-                        'inverse': (True, 'rev', ''),
-                        'blink': (True, 'blink', ''),
-                        'dim': (True, 'dim', ''),
-                        'bold': (True, 'bold', ''),
-                        'invisible': (True, 'invis', ''),
-                        'italic': (True, 'sitm', ''),
-                        'black': (False, curses.COLOR_BLACK, ''),
-                        'red': (False, curses.COLOR_RED, ''),
-                        'green': (False, curses.COLOR_GREEN, ''),
-                        'yellow': (False, curses.COLOR_YELLOW, ''),
-                        'blue': (False, curses.COLOR_BLUE, ''),
-                        'magenta': (False, curses.COLOR_MAGENTA, ''),
-                        'cyan': (False, curses.COLOR_CYAN, ''),
-                        'white': (False, curses.COLOR_WHITE, '')}
-except ImportError:
-    _terminfo_params = {}
-
-def _effect_str(effect):
-    '''Helper function for render_effects().'''
-
-    bg = False
-    if effect.endswith('_background'):
-        bg = True
-        effect = effect[:-11]
-    try:
-        attr, val, termcode = _terminfo_params[effect]
-    except KeyError:
-        return ''
-    if attr:
-        if termcode:
-            return termcode
-        else:
-            return curses.tigetstr(val)
-    elif bg:
-        return curses.tparm(curses.tigetstr('setab'), val)
-    else:
-        return curses.tparm(curses.tigetstr('setaf'), val)
-
-def render_effects(text, effects):
-    'Wrap text in commands to turn on each effect.'
-    if not text:
-        return text
-    if not _terminfo_params:
-        start = [str(_effects[e]) for e in ['none'] + effects.split()]
-        start = '\033[' + ';'.join(start) + 'm'
-        stop = '\033[' + str(_effects['none']) + 'm'
-    else:
-        start = ''.join(_effect_str(effect)
-                        for effect in ['none'] + effects.split())
-        stop = _effect_str('none')
-    return ''.join([start, text, stop])
-
-def valideffect(effect):
-    'Determine if the effect is valid or not.'
-    good = False
-    if not _terminfo_params and effect in _effects:
-        good = True
-    elif effect in _terminfo_params or effect[:-11] in _terminfo_params:
-        good = True
-    return good
-
-def configstyles(ui):
-    for status, cfgeffects in ui.configitems('color'):
-        if '.' not in status or status.startswith(('color.', 'terminfo.')):
-            continue
-        cfgeffects = ui.configlist('color', status)
-        if cfgeffects:
-            good = []
-            for e in cfgeffects:
-                if valideffect(e):
-                    good.append(e)
-                else:
-                    ui.warn(_("ignoring unknown color/effect %r "
-                              "(configured in color.%s)\n")
-                            % (e, status))
-            color._styles[status] = ' '.join(good)
-
-class colorui(uimod.ui):
-    _colormode = 'ansi'
-    def write(self, *args, **opts):
-        if self._colormode is None:
-            return super(colorui, self).write(*args, **opts)
-
-        label = opts.get('label', '')
-        if self._buffers and not opts.get('prompt', False):
-            if self._bufferapplylabels:
-                self._buffers[-1].extend(self.label(a, label) for a in args)
-            else:
-                self._buffers[-1].extend(args)
-        elif self._colormode == 'win32':
-            for a in args:
-                win32print(a, super(colorui, self).write, **opts)
-        else:
-            return super(colorui, self).write(
-                *[self.label(a, label) for a in args], **opts)
-
-    def write_err(self, *args, **opts):
-        if self._colormode is None:
-            return super(colorui, self).write_err(*args, **opts)
-
-        label = opts.get('label', '')
-        if self._bufferstates and self._bufferstates[-1][0]:
-            return self.write(*args, **opts)
-        if self._colormode == 'win32':
-            for a in args:
-                win32print(a, super(colorui, self).write_err, **opts)
-        else:
-            return super(colorui, self).write_err(
-                *[self.label(a, label) for a in args], **opts)
-
-    def showlabel(self, msg, label):
-        if label and msg:
-            if msg[-1] == '\n':
-                return "[%s|%s]\n" % (label, msg[:-1])
-            else:
-                return "[%s|%s]" % (label, msg)
-        else:
-            return msg
-
-    def label(self, msg, label):
-        if self._colormode is None:
-            return super(colorui, self).label(msg, label)
-
-        if self._colormode == 'debug':
-            return self.showlabel(msg, label)
-
-        effects = []
-        for l in label.split():
-            s = color._styles.get(l, '')
-            if s:
-                effects.append(s)
-            elif valideffect(l):
-                effects.append(l)
-        effects = ' '.join(effects)
-        if effects:
-            return '\n'.join([render_effects(line, effects)
-                              for line in msg.split('\n')])
-        return msg
-
-def uisetup(ui):
-    if ui.plain():
-        return
-    if not isinstance(ui, colorui):
-        colorui.__bases__ = (ui.__class__,)
-        ui.__class__ = colorui
-    def colorcmd(orig, ui_, opts, cmd, cmdfunc):
-        mode = _modesetup(ui_, opts['color'])
-        colorui._colormode = mode
-        if mode and mode != 'debug':
-            configstyles(ui_)
-        return orig(ui_, opts, cmd, cmdfunc)
-    def colorgit(orig, gitsub, commands, env=None, stream=False, cwd=None):
-        if gitsub.ui._colormode and len(commands) and commands[0] == "diff":
-                # insert the argument in the front,
-                # the end of git diff arguments is used for paths
-                commands.insert(1, '--color')
-        return orig(gitsub, commands, env, stream, cwd)
-    extensions.wrapfunction(dispatch, '_runcommand', colorcmd)
-    extensions.wrapfunction(subrepo.gitsubrepo, '_gitnodir', colorgit)
-
 def extsetup(ui):
-    commands.globalopts.append(
-        ('', 'color', 'auto',
-         # i18n: 'always', 'auto', 'never', and 'debug' are keywords
-         # and should not be translated
-         _("when to colorize (boolean, always, auto, never, or debug)"),
-         _('TYPE')))
+    # change default color config
+    color._enabledbydefault = True
+    for idx, entry in enumerate(commands.globalopts):
+        if entry[1] == 'color':
+            patch = (entry[3].replace(' (EXPERIMENTAL)', ''),)
+            new = entry[:3] + patch + entry[4:]
+            commands.globalopts[idx] = new
+            break
 
 @command('debugcolor',
         [('', 'style', None, _('show all configured styles'))],
@@ -487,31 +207,31 @@
         return _debugdisplaycolor(ui)
 
 def _debugdisplaycolor(ui):
-    oldstyle = color._styles.copy()
+    oldstyle = ui._styles.copy()
     try:
-        color._styles.clear()
-        for effect in _effects.keys():
-            color._styles[effect] = effect
-        if _terminfo_params:
+        ui._styles.clear()
+        for effect in color._effects.keys():
+            ui._styles[effect] = effect
+        if ui._terminfoparams:
             for k, v in ui.configitems('color'):
                 if k.startswith('color.'):
-                    color._styles[k] = k[6:]
+                    ui._styles[k] = k[6:]
                 elif k.startswith('terminfo.'):
-                    color._styles[k] = k[9:]
+                    ui._styles[k] = k[9:]
         ui.write(_('available colors:\n'))
         # sort label with a '_' after the other to group '_background' entry.
-        items = sorted(color._styles.items(),
+        items = sorted(ui._styles.items(),
                        key=lambda i: ('_' in i[0], i[0], i[1]))
         for colorname, label in items:
             ui.write(('%s\n') % colorname, label=label)
     finally:
-        color._styles.clear()
-        color._styles.update(oldstyle)
+        ui._styles.clear()
+        ui._styles.update(oldstyle)
 
 def _debugdisplaystyle(ui):
     ui.write(_('available style:\n'))
-    width = max(len(s) for s in color._styles)
-    for label, effects in sorted(color._styles.items()):
+    width = max(len(s) for s in ui._styles)
+    for label, effects in sorted(ui._styles.items()):
         ui.write('%s' % label, label=label)
         if effects:
             # 50
@@ -519,138 +239,3 @@
             ui.write(' ' * (max(0, width - len(label))))
             ui.write(', '.join(ui.label(e, e) for e in effects.split()))
         ui.write('\n')
-
-if pycompat.osname != 'nt':
-    w32effects = None
-else:
-    import ctypes
-    import re
-
-    _kernel32 = ctypes.windll.kernel32
-
-    _WORD = ctypes.c_ushort
-
-    _INVALID_HANDLE_VALUE = -1
-
-    class _COORD(ctypes.Structure):
-        _fields_ = [('X', ctypes.c_short),
-                    ('Y', ctypes.c_short)]
-
-    class _SMALL_RECT(ctypes.Structure):
-        _fields_ = [('Left', ctypes.c_short),
-                    ('Top', ctypes.c_short),
-                    ('Right', ctypes.c_short),
-                    ('Bottom', ctypes.c_short)]
-
-    class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
-        _fields_ = [('dwSize', _COORD),
-                    ('dwCursorPosition', _COORD),
-                    ('wAttributes', _WORD),
-                    ('srWindow', _SMALL_RECT),
-                    ('dwMaximumWindowSize', _COORD)]
-
-    _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11
-    _STD_ERROR_HANDLE = 0xfffffff4  # (DWORD)-12
-
-    _FOREGROUND_BLUE = 0x0001
-    _FOREGROUND_GREEN = 0x0002
-    _FOREGROUND_RED = 0x0004
-    _FOREGROUND_INTENSITY = 0x0008
-
-    _BACKGROUND_BLUE = 0x0010
-    _BACKGROUND_GREEN = 0x0020
-    _BACKGROUND_RED = 0x0040
-    _BACKGROUND_INTENSITY = 0x0080
-
-    _COMMON_LVB_REVERSE_VIDEO = 0x4000
-    _COMMON_LVB_UNDERSCORE = 0x8000
-
-    # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
-    w32effects = {
-        'none': -1,
-        'black': 0,
-        'red': _FOREGROUND_RED,
-        'green': _FOREGROUND_GREEN,
-        'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
-        'blue': _FOREGROUND_BLUE,
-        'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
-        'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
-        'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
-        'bold': _FOREGROUND_INTENSITY,
-        'black_background': 0x100,                  # unused value > 0x0f
-        'red_background': _BACKGROUND_RED,
-        'green_background': _BACKGROUND_GREEN,
-        'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
-        'blue_background': _BACKGROUND_BLUE,
-        'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
-        'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
-        'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
-                             _BACKGROUND_BLUE),
-        'bold_background': _BACKGROUND_INTENSITY,
-        'underline': _COMMON_LVB_UNDERSCORE,  # double-byte charsets only
-        'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
-    }
-
-    passthrough = set([_FOREGROUND_INTENSITY,
-                       _BACKGROUND_INTENSITY,
-                       _COMMON_LVB_UNDERSCORE,
-                       _COMMON_LVB_REVERSE_VIDEO])
-
-    stdout = _kernel32.GetStdHandle(
-                  _STD_OUTPUT_HANDLE)  # don't close the handle returned
-    if stdout is None or stdout == _INVALID_HANDLE_VALUE:
-        w32effects = None
-    else:
-        csbi = _CONSOLE_SCREEN_BUFFER_INFO()
-        if not _kernel32.GetConsoleScreenBufferInfo(
-                    stdout, ctypes.byref(csbi)):
-            # stdout may not support GetConsoleScreenBufferInfo()
-            # when called from subprocess or redirected
-            w32effects = None
-        else:
-            origattr = csbi.wAttributes
-            ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
-                                re.MULTILINE | re.DOTALL)
-
-    def win32print(text, orig, **opts):
-        label = opts.get('label', '')
-        attr = origattr
-
-        def mapcolor(val, attr):
-            if val == -1:
-                return origattr
-            elif val in passthrough:
-                return attr | val
-            elif val > 0x0f:
-                return (val & 0x70) | (attr & 0x8f)
-            else:
-                return (val & 0x07) | (attr & 0xf8)
-
-        # determine console attributes based on labels
-        for l in label.split():
-            style = color._styles.get(l, '')
-            for effect in style.split():
-                try:
-                    attr = mapcolor(w32effects[effect], attr)
-                except KeyError:
-                    # w32effects could not have certain attributes so we skip
-                    # them if not found
-                    pass
-        # hack to ensure regexp finds data
-        if not text.startswith('\033['):
-            text = '\033[m' + text
-
-        # Look for ANSI-like codes embedded in text
-        m = re.match(ansire, text)
-
-        try:
-            while m:
-                for sattr in m.group(1).split(';'):
-                    if sattr:
-                        attr = mapcolor(int(sattr), attr)
-                _kernel32.SetConsoleTextAttribute(stdout, attr)
-                orig(m.group(2), **opts)
-                m = re.match(ansire, m.group(3))
-        finally:
-            # Explicitly reset original attributes
-            _kernel32.SetConsoleTextAttribute(stdout, origattr)
--- a/hgext/convert/subversion.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/convert/subversion.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1306,7 +1306,7 @@
             self.setexec = []
 
         fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
-        fp = os.fdopen(fd, 'w')
+        fp = os.fdopen(fd, pycompat.sysstr('w'))
         fp.write(commit.desc)
         fp.close()
         try:
--- a/hgext/extdiff.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/extdiff.py	Tue Feb 28 11:13:25 2017 -0800
@@ -273,7 +273,7 @@
         cmdline = re.sub(regex, quote, cmdline)
 
         ui.debug('running %r in %s\n' % (cmdline, tmproot))
-        ui.system(cmdline, cwd=tmproot)
+        ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
 
         for copy_fn, working_fn, mtime in fns_and_mtime:
             if os.lstat(copy_fn).st_mtime != mtime:
--- a/hgext/gpg.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/gpg.py	Tue Feb 28 11:13:25 2017 -0800
@@ -18,6 +18,7 @@
     error,
     match,
     node as hgnode,
+    pycompat,
     util,
 )
 
@@ -44,11 +45,11 @@
         try:
             # create temporary files
             fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
-            fp = os.fdopen(fd, 'wb')
+            fp = os.fdopen(fd, pycompat.sysstr('wb'))
             fp.write(sig)
             fp.close()
             fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
-            fp = os.fdopen(fd, 'wb')
+            fp = os.fdopen(fd, pycompat.sysstr('wb'))
             fp.write(data)
             fp.close()
             gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
--- a/hgext/histedit.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/histedit.py	Tue Feb 28 11:13:25 2017 -0800
@@ -36,7 +36,7 @@
  #  p, pick = use commit
  #  e, edit = use commit, but stop for amending
  #  f, fold = use commit, but combine it with the one above
- #  r, roll = like fold, but discard this commit's description
+ #  r, roll = like fold, but discard this commit's description and date
  #  d, drop = remove commit from history
  #  m, mess = edit commit message without changing commit content
  #
@@ -58,7 +58,7 @@
  #  p, pick = use commit
  #  e, edit = use commit, but stop for amending
  #  f, fold = use commit, but combine it with the one above
- #  r, roll = like fold, but discard this commit's description
+ #  r, roll = like fold, but discard this commit's description and date
  #  d, drop = remove commit from history
  #  m, mess = edit commit message without changing commit content
  #
@@ -71,11 +71,11 @@
  ***
  Add delta
 
-Edit the commit message to your liking, then close the editor. For
-this example, let's assume that the commit message was changed to
-``Add beta and delta.`` After histedit has run and had a chance to
-remove any old or temporary revisions it needed, the history looks
-like this::
+Edit the commit message to your liking, then close the editor. The date used
+for the commit will be the later of the two commits' dates. For this example,
+let's assume that the commit message was changed to ``Add beta and delta.``
+After histedit has run and had a chance to remove any old or temporary
+revisions it needed, the history looks like this::
 
  @  2[tip]   989b4d060121   2009-04-27 18:04 -0500   durin42
  |    Add beta and delta.
@@ -97,9 +97,10 @@
 allowing you to edit files freely, or even use ``hg record`` to commit
 some changes as a separate commit. When you're done, any remaining
 uncommitted changes will be committed as well. When done, run ``hg
-histedit --continue`` to finish this step. You'll be prompted for a
-new commit message, but the default commit message will be the
-original message for the ``edit`` ed revision.
+histedit --continue`` to finish this step. If there are uncommitted
+changes, you'll be prompted for a new commit message, but the default
+commit message will be the original message for the ``edit`` ed
+revision, and the date of the original commit will be preserved.
 
 The ``message`` operation will give you a chance to revise a commit
 message without changing the contents. It's a shortcut for doing
@@ -724,6 +725,15 @@
         """
         return True
 
+    def firstdate(self):
+        """Returns true if the rule should preserve the date of the first
+        change.
+
+        This exists mainly so that 'rollup' rules can be a subclass of
+        'fold'.
+        """
+        return False
+
     def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
         parent = ctx.parents()[0].node()
         repo.ui.pushbuffer()
@@ -742,7 +752,10 @@
                 [oldctx.description()]) + '\n'
         commitopts['message'] = newmessage
         # date
-        commitopts['date'] = max(ctx.date(), oldctx.date())
+        if self.firstdate():
+            commitopts['date'] = ctx.date()
+        else:
+            commitopts['date'] = max(ctx.date(), oldctx.date())
         extra = ctx.extra().copy()
         # histedit_source
         # note: ctx is likely a temporary commit but that the best we can do
@@ -809,7 +822,7 @@
         return True
 
 @action(["roll", "r"],
-        _("like fold, but discard this commit's description"))
+        _("like fold, but discard this commit's description and date"))
 class rollup(fold):
     def mergedescs(self):
         return False
@@ -817,6 +830,9 @@
     def skipprompt(self):
         return True
 
+    def firstdate(self):
+        return True
+
 @action(["drop", "d"],
         _('remove commit from history'))
 class drop(histeditaction):
@@ -884,11 +900,11 @@
 
     - `mess` to reword the changeset commit message
 
-    - `fold` to combine it with the preceding changeset
+    - `fold` to combine it with the preceding changeset (using the later date)
 
-    - `roll` like fold, but discarding this commit's description
+    - `roll` like fold, but discarding this commit's description and date
 
-    - `edit` to edit this changeset
+    - `edit` to edit this changeset (preserving date)
 
     There are a number of ways to select the root changeset:
 
@@ -992,7 +1008,8 @@
 
 def _readfile(ui, path):
     if path == '-':
-        return ui.fin.read()
+        with ui.timeblockedsection('histedit'):
+            return ui.fin.read()
     else:
         with open(path, 'rb') as f:
             return f.read()
--- a/hgext/largefiles/overrides.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/largefiles/overrides.py	Tue Feb 28 11:13:25 2017 -0800
@@ -22,8 +22,8 @@
     match as matchmod,
     pathutil,
     registrar,
-    revset,
     scmutil,
+    smartset,
     util,
 )
 
@@ -855,7 +855,7 @@
         firstpulled = repo.firstpulled
     except AttributeError:
         raise error.Abort(_("pulled() only available in --lfrev"))
-    return revset.baseset([r for r in subset if r >= firstpulled])
+    return smartset.baseset([r for r in subset if r >= firstpulled])
 
 def overrideclone(orig, ui, source, dest=None, **opts):
     d = dest
@@ -993,9 +993,9 @@
 
     archiver.done()
 
-def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
+def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
     if not repo._repo.lfstatus:
-        return orig(repo, archiver, prefix, match)
+        return orig(repo, archiver, prefix, match, decode)
 
     repo._get(repo._state + ('hg',))
     rev = repo._state[1]
@@ -1010,6 +1010,8 @@
         if match and not match(f):
             return
         data = getdata()
+        if decode:
+            data = repo._repo.wwritedata(name, data)
 
         archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
 
@@ -1037,7 +1039,7 @@
         sub = ctx.workingsub(subpath)
         submatch = matchmod.subdirmatcher(subpath, match)
         sub._repo.lfstatus = True
-        sub.archive(archiver, prefix + repo._path + '/', submatch)
+        sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
 
 # If a largefile is modified, the change is not reflected in its
 # standin until a commit. cmdutil.bailifchanged() raises an exception
--- a/hgext/mq.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/mq.py	Tue Feb 28 11:13:25 2017 -0800
@@ -14,7 +14,7 @@
 Known patches are represented as patch files in the .hg/patches
 directory. Applied patches are both patch files and changesets.
 
-Common tasks (use :hg:`help command` for more details)::
+Common tasks (use :hg:`help COMMAND` for more details)::
 
   create new patch                          qnew
   import existing patch                     qimport
@@ -89,8 +89,9 @@
     phases,
     pycompat,
     registrar,
-    revset,
+    revsetlang,
     scmutil,
+    smartset,
     subrepo,
     util,
 )
@@ -2675,6 +2676,7 @@
 
     Returns 0 on success.
     """
+    ui.pager('qdiff')
     repo.mq.diff(repo, pats, opts)
     return 0
 
@@ -3567,9 +3569,9 @@
 def revsetmq(repo, subset, x):
     """Changesets managed by MQ.
     """
-    revset.getargs(x, 0, 0, _("mq takes no arguments"))
+    revsetlang.getargs(x, 0, 0, _("mq takes no arguments"))
     applied = set([repo[r.node].rev() for r in repo.mq.applied])
-    return revset.baseset([r for r in subset if r in applied])
+    return smartset.baseset([r for r in subset if r in applied])
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = [revsetmq]
--- a/hgext/pager.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/pager.py	Tue Feb 28 11:13:25 2017 -0800
@@ -12,68 +12,22 @@
 #
 # Run 'hg help pager' to get info on configuration.
 
-'''browse command output with an external pager
-
-To set the pager that should be used, set the application variable::
-
-  [pager]
-  pager = less -FRX
-
-If no pager is set, the pager extensions uses the environment variable
-$PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
-
-You can disable the pager for certain commands by adding them to the
-pager.ignore list::
+'''browse command output with an external pager (DEPRECATED)
 
-  [pager]
-  ignore = version, help, update
-
-You can also enable the pager only for certain commands using
-pager.attend. Below is the default list of commands to be paged::
-
-  [pager]
-  attend = annotate, cat, diff, export, glog, log, qdiff
-
-Setting pager.attend to an empty value will cause all commands to be
-paged.
-
-If pager.attend is present, pager.ignore will be ignored.
-
-Lastly, you can enable and disable paging for individual commands with
-the attend-<command> option. This setting takes precedence over
-existing attend and ignore options and defaults::
+Forcibly enable paging for individual commands that don't typically
+request pagination with the attend-<command> option. This setting
+takes precedence over ignore options and defaults::
 
   [pager]
   attend-cat = false
-
-To ignore global commands like :hg:`version` or :hg:`help`, you have
-to specify them in your user configuration file.
-
-To control whether the pager is used at all for an individual command,
-you can use --pager=<value>::
-
-  - use as needed: `auto`.
-  - require the pager: `yes` or `on`.
-  - suppress the pager: `no` or `off` (any unrecognized value
-  will also work).
-
 '''
 from __future__ import absolute_import
 
-import atexit
-import os
-import signal
-import subprocess
-import sys
-
-from mercurial.i18n import _
 from mercurial import (
     cmdutil,
     commands,
     dispatch,
-    encoding,
     extensions,
-    util,
     )
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
@@ -82,58 +36,12 @@
 # leave the attribute unspecified.
 testedwith = 'ships-with-hg-core'
 
-def _runpager(ui, p):
-    pager = subprocess.Popen(p, shell=True, bufsize=-1,
-                             close_fds=util.closefds, stdin=subprocess.PIPE,
-                             stdout=util.stdout, stderr=util.stderr)
-
-    # back up original file objects and descriptors
-    olduifout = ui.fout
-    oldstdout = util.stdout
-    stdoutfd = os.dup(util.stdout.fileno())
-    stderrfd = os.dup(util.stderr.fileno())
-
-    # create new line-buffered stdout so that output can show up immediately
-    ui.fout = util.stdout = newstdout = os.fdopen(util.stdout.fileno(), 'wb', 1)
-    os.dup2(pager.stdin.fileno(), util.stdout.fileno())
-    if ui._isatty(util.stderr):
-        os.dup2(pager.stdin.fileno(), util.stderr.fileno())
-
-    @atexit.register
-    def killpager():
-        if util.safehasattr(signal, "SIGINT"):
-            signal.signal(signal.SIGINT, signal.SIG_IGN)
-        pager.stdin.close()
-        ui.fout = olduifout
-        util.stdout = oldstdout
-        # close new stdout while it's associated with pager; otherwise stdout
-        # fd would be closed when newstdout is deleted
-        newstdout.close()
-        # restore original fds: stdout is open again
-        os.dup2(stdoutfd, util.stdout.fileno())
-        os.dup2(stderrfd, util.stderr.fileno())
-        pager.wait()
-
 def uisetup(ui):
-    class pagerui(ui.__class__):
-        def _runpager(self, pagercmd):
-            _runpager(self, pagercmd)
-
-    ui.__class__ = pagerui
 
     def pagecmd(orig, ui, options, cmd, cmdfunc):
-        p = ui.config("pager", "pager", encoding.environ.get("PAGER"))
-        usepager = False
-        always = util.parsebool(options['pager'])
         auto = options['pager'] == 'auto'
-
-        if not p or '--debugger' in sys.argv or not ui.formatted():
-            pass
-        elif always:
-            usepager = True
-        elif not auto:
+        if auto and not ui.pageractive:
             usepager = False
-        else:
             attend = ui.configlist('pager', 'attend', attended)
             ignore = ui.configlist('pager', 'ignore')
             cmds, _ = cmdutil.findcmd(cmd, commands.table)
@@ -148,14 +56,14 @@
                     usepager = True
                     break
 
-        setattr(ui, 'pageractive', usepager)
-
-        if usepager:
-            ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
-            ui.setconfig('ui', 'interactive', False, 'pager')
-            if util.safehasattr(signal, "SIGPIPE"):
-                signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-            ui._runpager(p)
+            if usepager:
+                # Slight hack: the attend list is supposed to override
+                # the ignore list for the pager extension, but the
+                # core code doesn't know about attend, so we have to
+                # lobotomize the ignore list so that the extension's
+                # behavior is preserved.
+                ui.setconfig('pager', 'ignore', '', 'pager')
+                ui.pager('extension-via-attend-' + cmd)
         return orig(ui, options, cmd, cmdfunc)
 
     # Wrap dispatch._runcommand after color is loaded so color can see
@@ -165,10 +73,6 @@
         extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
     extensions.afterloaded('color', afterloaded)
 
-def extsetup(ui):
-    commands.globalopts.append(
-        ('', 'pager', 'auto',
-         _("when to paginate (boolean, always, auto, or never)"),
-         _('TYPE')))
-
-attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
+attended = [
+    'the-default-attend-list-is-now-empty-but-that-breaks-the-extension',
+]
--- a/hgext/rebase.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/rebase.py	Tue Feb 28 11:13:25 2017 -0800
@@ -47,6 +47,7 @@
     repoview,
     revset,
     scmutil,
+    smartset,
     util,
 )
 
@@ -118,8 +119,8 @@
     # i18n: "_rebasedefaultdest" is a keyword
     sourceset = None
     if x is not None:
-        sourceset = revset.getset(repo, revset.fullreposet(repo), x)
-    return subset & revset.baseset([_destrebase(repo, sourceset)])
+        sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
+    return subset & smartset.baseset([_destrebase(repo, sourceset)])
 
 class rebaseruntime(object):
     """This class is a container for rebase runtime state"""
@@ -1367,7 +1368,7 @@
     """store the currently rebased set on the repo object
 
     This is used by another function to prevent rebased revision to because
-    hidden (see issue4505)"""
+    hidden (see issue4504)"""
     repo = repo.unfiltered()
     revs = set(revs)
     repo._rebaseset = revs
@@ -1383,7 +1384,7 @@
         del repo._rebaseset
 
 def _rebasedvisible(orig, repo):
-    """ensure rebased revs stay visible (see issue4505)"""
+    """ensure rebased revs stay visible (see issue4504)"""
     blockers = orig(repo)
     blockers.update(getattr(repo, '_rebaseset', ()))
     return blockers
--- a/hgext/share.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/share.py	Tue Feb 28 11:13:25 2017 -0800
@@ -48,6 +48,7 @@
     error,
     extensions,
     hg,
+    txnutil,
     util,
 )
 
@@ -171,7 +172,28 @@
     if _hassharedbookmarks(repo):
         srcrepo = _getsrcrepo(repo)
         if srcrepo is not None:
+            # just orig(srcrepo) doesn't work as expected, because
+            # HG_PENDING refers repo.root.
+            try:
+                fp, pending = txnutil.trypending(repo.root, repo.vfs,
+                                                 'bookmarks')
+                if pending:
+                    # only in this case, bookmark information in repo
+                    # is up-to-date.
+                    return fp
+                fp.close()
+            except IOError as inst:
+                if inst.errno != errno.ENOENT:
+                    raise
+
+            # otherwise, we should read bookmarks from srcrepo,
+            # because .hg/bookmarks in srcrepo might be already
+            # changed via another sharing repo
             repo = srcrepo
+
+            # TODO: Pending changes in repo are still invisible in
+            # srcrepo, because bookmarks.pending is written only into repo.
+            # See also https://www.mercurial-scm.org/wiki/SharedRepository
     return orig(repo)
 
 def recordchange(orig, self, tr):
--- a/hgext/shelve.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/shelve.py	Tue Feb 28 11:13:25 2017 -0800
@@ -485,6 +485,7 @@
     if not ui.plain():
         width = ui.termwidth()
     namelabel = 'shelve.newest'
+    ui.pager('shelve')
     for mtime, name in listshelves(repo):
         sname = util.split(name)[1]
         if pats and sname not in pats:
@@ -747,10 +748,12 @@
            _('continue an incomplete unshelve operation')),
           ('k', 'keep', None,
            _('keep shelve after unshelving')),
+          ('n', 'name', '',
+           _('restore shelved change with given name'), _('NAME')),
           ('t', 'tool', '', _('specify merge tool')),
           ('', 'date', '',
            _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
-         _('hg unshelve [SHELVED]'))
+         _('hg unshelve [[-n] SHELVED]'))
 def unshelve(ui, repo, *shelved, **opts):
     """restore a shelved change to the working directory
 
@@ -795,6 +798,9 @@
     continuef = opts.get('continue')
     if not abortf and not continuef:
         cmdutil.checkunfinished(repo)
+    shelved = list(shelved)
+    if opts.get("name"):
+        shelved.append(opts["name"])
 
     if abortf or continuef:
         if abortf and continuef:
--- a/hgext/transplant.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/transplant.py	Tue Feb 28 11:13:25 2017 -0800
@@ -28,10 +28,12 @@
     merge,
     node as nodemod,
     patch,
+    pycompat,
     registrar,
     revlog,
     revset,
     scmutil,
+    smartset,
     util,
 )
 
@@ -197,7 +199,7 @@
                     patchfile = None
                 else:
                     fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
-                    fp = os.fdopen(fd, 'w')
+                    fp = os.fdopen(fd, pycompat.sysstr('w'))
                     gen = patch.diff(source, parent, node, opts=diffopts)
                     for chunk in gen:
                         fp.write(chunk)
@@ -245,7 +247,7 @@
         self.ui.status(_('filtering %s\n') % patchfile)
         user, date, msg = (changelog[1], changelog[2], changelog[4])
         fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
-        fp = os.fdopen(fd, 'w')
+        fp = os.fdopen(fd, pycompat.sysstr('w'))
         fp.write("# HG changeset patch\n")
         fp.write("# User %s\n" % user)
         fp.write("# Date %d %d\n" % date)
@@ -722,7 +724,7 @@
         s = revset.getset(repo, subset, x)
     else:
         s = subset
-    return revset.baseset([r for r in s if
+    return smartset.baseset([r for r in s if
         repo[r].extra().get('transplant_source')])
 
 templatekeyword = registrar.templatekeyword()
--- a/hgext/zeroconf/__init__.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/hgext/zeroconf/__init__.py	Tue Feb 28 11:13:25 2017 -0800
@@ -64,7 +64,9 @@
     # Generic method, sometimes gives useless results
     try:
         dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
-        if not dumbip.startswith('127.') and ':' not in dumbip:
+        if ':' in dumbip:
+            dumbip = '127.0.0.1'
+        if not dumbip.startswith('127.'):
             return dumbip
     except (socket.gaierror, socket.herror):
         dumbip = '127.0.0.1'
--- a/mercurial/archival.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/archival.py	Tue Feb 28 11:13:25 2017 -0800
@@ -331,7 +331,7 @@
         for subpath in sorted(ctx.substate):
             sub = ctx.workingsub(subpath)
             submatch = matchmod.subdirmatcher(subpath, matchfn)
-            total += sub.archive(archiver, prefix, submatch)
+            total += sub.archive(archiver, prefix, submatch, decode)
 
     if total == 0:
         raise error.Abort(_('no files match the archive pattern'))
--- a/mercurial/bookmarks.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/bookmarks.py	Tue Feb 28 11:13:25 2017 -0800
@@ -19,6 +19,7 @@
     error,
     lock as lockmod,
     obsolete,
+    txnutil,
     util,
 )
 
@@ -29,17 +30,8 @@
     bookmarks or the committed ones. Other extensions (like share)
     may need to tweak this behavior further.
     """
-    bkfile = None
-    if 'HG_PENDING' in encoding.environ:
-        try:
-            bkfile = repo.vfs('bookmarks.pending')
-        except IOError as inst:
-            if inst.errno != errno.ENOENT:
-                raise
-    if bkfile is None:
-        bkfile = repo.vfs('bookmarks')
-    return bkfile
-
+    fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
+    return fp
 
 class bmstore(dict):
     """Storage for bookmarks.
--- a/mercurial/branchmap.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/branchmap.py	Tue Feb 28 11:13:25 2017 -0800
@@ -9,7 +9,6 @@
 
 import array
 import struct
-import time
 
 from .node import (
     bin,
@@ -21,6 +20,7 @@
     encoding,
     error,
     scmutil,
+    util,
 )
 
 array = array.array
@@ -261,7 +261,7 @@
         missing heads, and a generator of nodes that are strictly a superset of
         heads missing, this function updates self to be correct.
         """
-        starttime = time.time()
+        starttime = util.timer()
         cl = repo.changelog
         # collect new branch entries
         newbranches = {}
@@ -314,7 +314,7 @@
                     self.tiprev = tiprev
         self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
 
-        duration = time.time() - starttime
+        duration = util.timer() - starttime
         repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
                     repo.filtername, duration)
 
--- a/mercurial/bundle2.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/bundle2.py	Tue Feb 28 11:13:25 2017 -0800
@@ -320,9 +320,6 @@
     It iterates over each part then searches for and uses the proper handling
     code to process the part. Parts are processed in order.
 
-    This is very early version of this function that will be strongly reworked
-    before final usage.
-
     Unknown Mandatory part will abort the process.
 
     It is temporarily possible to provide a prebuilt bundleoperation to the
@@ -865,6 +862,11 @@
         self._generated = None
         self.mandatory = mandatory
 
+    def __repr__(self):
+        cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
+        return ('<%s object at %x; id: %s; type: %s; mandatory: %s>'
+                % (cls, id(self), self.id, self.type, self.mandatory))
+
     def copy(self):
         """return a copy of the part
 
--- a/mercurial/bundlerepo.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/bundlerepo.py	Tue Feb 28 11:13:25 2017 -0800
@@ -272,7 +272,7 @@
                                             suffix=".hg10un")
             self.tempfile = temp
 
-            with os.fdopen(fdtemp, 'wb') as fptemp:
+            with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
                 fptemp.write(header)
                 while True:
                     chunk = read(2**18)
--- a/mercurial/changegroup.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/changegroup.py	Tue Feb 28 11:13:25 2017 -0800
@@ -26,6 +26,7 @@
     error,
     mdiff,
     phases,
+    pycompat,
     util,
 )
 
@@ -98,7 +99,7 @@
                 fh = open(filename, "wb", 131072)
         else:
             fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
-            fh = os.fdopen(fd, "wb")
+            fh = os.fdopen(fd, pycompat.sysstr("wb"))
         cleanup = filename
         for c in chunks:
             fh.write(c)
--- a/mercurial/chgserver.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/chgserver.py	Tue Feb 28 11:13:25 2017 -0800
@@ -31,8 +31,11 @@
 ::
 
   [chgserver]
-  idletimeout = 3600 # seconds, after which an idle server will exit
-  skiphash = False   # whether to skip config or env change checks
+  # how long (in seconds) should an idle chg server exit
+  idletimeout = 3600
+
+  # whether to skip config or env change checks
+  skiphash = False
 """
 
 from __future__ import absolute_import
@@ -176,26 +179,17 @@
             else:
                 self._csystem = csystem
 
-        def system(self, cmd, environ=None, cwd=None, onerr=None,
-                   errprefix=None):
+        def _runsystem(self, cmd, environ, cwd, out):
             # fallback to the original system method if the output needs to be
             # captured (to self._buffers), or the output stream is not stdout
             # (e.g. stderr, cStringIO), because the chg client is not aware of
             # these situations and will behave differently (write to stdout).
-            if (any(s[1] for s in self._bufferstates)
+            if (out is not self.fout
                 or not util.safehasattr(self.fout, 'fileno')
                 or self.fout.fileno() != util.stdout.fileno()):
-                return super(chgui, self).system(cmd, environ, cwd, onerr,
-                                                 errprefix)
+                return util.system(cmd, environ=environ, cwd=cwd, out=out)
             self.flush()
-            rc = self._csystem(cmd, util.shellenviron(environ), cwd)
-            if rc and onerr:
-                errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
-                                    util.explainexit(rc)[0])
-                if errprefix:
-                    errmsg = '%s: %s' % (errprefix, errmsg)
-                raise onerr(errmsg)
-            return rc
+            return self._csystem(cmd, util.shellenviron(environ), cwd)
 
         def _runpager(self, cmd):
             self._csystem(cmd, util.shellenviron(), type='pager',
@@ -287,9 +281,9 @@
 
 _iochannels = [
     # server.ch, ui.fp, mode
-    ('cin', 'fin', 'rb'),
-    ('cout', 'fout', 'wb'),
-    ('cerr', 'ferr', 'wb'),
+    ('cin', 'fin', pycompat.sysstr('rb')),
+    ('cout', 'fout', pycompat.sysstr('wb')),
+    ('cerr', 'ferr', pycompat.sysstr('wb')),
 ]
 
 class chgcmdserver(commandserver.server):
--- a/mercurial/cmdutil.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/cmdutil.py	Tue Feb 28 11:13:25 2017 -0800
@@ -26,14 +26,12 @@
     changelog,
     copies,
     crecord as crecordmod,
-    dirstateguard as dirstateguardmod,
     encoding,
     error,
     formatter,
     graphmod,
     lock as lockmod,
     match as matchmod,
-    mergeutil,
     obsolete,
     patch,
     pathutil,
@@ -43,6 +41,7 @@
     revlog,
     revset,
     scmutil,
+    smartset,
     templatekw,
     templater,
     util,
@@ -2092,11 +2091,11 @@
     if opts.get('rev'):
         revs = scmutil.revrange(repo, opts['rev'])
     elif follow and repo.dirstate.p1() == nullid:
-        revs = revset.baseset()
+        revs = smartset.baseset()
     elif follow:
         revs = repo.revs('reverse(:.)')
     else:
-        revs = revset.spanset(repo)
+        revs = smartset.spanset(repo)
         revs.reverse()
     return revs
 
@@ -2111,7 +2110,7 @@
     limit = loglimit(opts)
     revs = _logrevs(repo, opts)
     if not revs:
-        return revset.baseset(), None, None
+        return smartset.baseset(), None, None
     expr, filematcher = _makelogrevset(repo, pats, opts, revs)
     if opts.get('rev'):
         # User-specified revs might be unsorted, but don't sort before
@@ -2127,7 +2126,7 @@
             if idx >= limit:
                 break
             limitedrevs.append(rev)
-        revs = revset.baseset(limitedrevs)
+        revs = smartset.baseset(limitedrevs)
 
     return revs, expr, filematcher
 
@@ -2142,7 +2141,7 @@
     limit = loglimit(opts)
     revs = _logrevs(repo, opts)
     if not revs:
-        return revset.baseset([]), None, None
+        return smartset.baseset([]), None, None
     expr, filematcher = _makelogrevset(repo, pats, opts, revs)
     if expr:
         matcher = revset.match(repo.ui, expr, order=revset.followorder)
@@ -2153,7 +2152,7 @@
             if limit <= idx:
                 break
             limitedrevs.append(r)
-        revs = revset.baseset(limitedrevs)
+        revs = smartset.baseset(limitedrevs)
 
     return revs, expr, filematcher
 
@@ -2236,6 +2235,8 @@
         if opts.get('rev'):
             endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
         getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
+
+    ui.pager('log')
     displayer = show_changeset(ui, repo, opts, buffered=True)
     displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
                  filematcher)
@@ -3366,11 +3367,6 @@
 
     return cmd
 
-def checkunresolved(ms):
-    ms._repo.ui.deprecwarn('checkunresolved moved from cmdutil to mergeutil',
-                           '4.1')
-    return mergeutil.checkunresolved(ms)
-
 # a list of (ui, repo, otherpeer, opts, missing) functions called by
 # commands.outgoing.  "missing" is "missing" of the result of
 # "findcommonoutgoing()"
@@ -3477,10 +3473,3 @@
     if after[1]:
         hint = after[0]
     raise error.Abort(_('no %s in progress') % task, hint=hint)
-
-class dirstateguard(dirstateguardmod.dirstateguard):
-    def __init__(self, repo, name):
-        dirstateguardmod.dirstateguard.__init__(self, repo, name)
-        repo.ui.deprecwarn(
-            'dirstateguard has moved from cmdutil to dirstateguard',
-            '4.1')
--- a/mercurial/color.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/color.py	Tue Feb 28 11:13:25 2017 -0800
@@ -7,59 +7,466 @@
 
 from __future__ import absolute_import
 
-_styles = {'grep.match': 'red bold',
-           'grep.linenumber': 'green',
-           'grep.rev': 'green',
-           'grep.change': 'green',
-           'grep.sep': 'cyan',
-           'grep.filename': 'magenta',
-           'grep.user': 'magenta',
-           'grep.date': 'magenta',
-           'bookmarks.active': 'green',
-           'branches.active': 'none',
-           'branches.closed': 'black bold',
-           'branches.current': 'green',
-           'branches.inactive': 'none',
-           'diff.changed': 'white',
-           'diff.deleted': 'red',
-           'diff.diffline': 'bold',
-           'diff.extended': 'cyan bold',
-           'diff.file_a': 'red bold',
-           'diff.file_b': 'green bold',
-           'diff.hunk': 'magenta',
-           'diff.inserted': 'green',
-           'diff.tab': '',
-           'diff.trailingwhitespace': 'bold red_background',
-           'changeset.public' : '',
-           'changeset.draft' : '',
-           'changeset.secret' : '',
-           'diffstat.deleted': 'red',
-           'diffstat.inserted': 'green',
-           'histedit.remaining': 'red bold',
-           'ui.prompt': 'yellow',
-           'log.changeset': 'yellow',
-           'patchbomb.finalsummary': '',
-           'patchbomb.from': 'magenta',
-           'patchbomb.to': 'cyan',
-           'patchbomb.subject': 'green',
-           'patchbomb.diffstats': '',
-           'rebase.rebased': 'blue',
-           'rebase.remaining': 'red bold',
-           'resolve.resolved': 'green bold',
-           'resolve.unresolved': 'red bold',
-           'shelve.age': 'cyan',
-           'shelve.newest': 'green bold',
-           'shelve.name': 'blue bold',
-           'status.added': 'green bold',
-           'status.clean': 'none',
-           'status.copied': 'none',
-           'status.deleted': 'cyan bold underline',
-           'status.ignored': 'black bold',
-           'status.modified': 'blue bold',
-           'status.removed': 'red bold',
-           'status.unknown': 'magenta bold underline',
-           'tags.normal': 'green',
-           'tags.local': 'black bold'}
+from .i18n import _
+
+from . import (
+    encoding,
+    pycompat,
+    util
+)
+
+try:
+    import curses
+    # Mapping from effect name to terminfo attribute name (or raw code) or
+    # color number.  This will also force-load the curses module.
+    _baseterminfoparams = {
+        'none': (True, 'sgr0', ''),
+        'standout': (True, 'smso', ''),
+        'underline': (True, 'smul', ''),
+        'reverse': (True, 'rev', ''),
+        'inverse': (True, 'rev', ''),
+        'blink': (True, 'blink', ''),
+        'dim': (True, 'dim', ''),
+        'bold': (True, 'bold', ''),
+        'invisible': (True, 'invis', ''),
+        'italic': (True, 'sitm', ''),
+        'black': (False, curses.COLOR_BLACK, ''),
+        'red': (False, curses.COLOR_RED, ''),
+        'green': (False, curses.COLOR_GREEN, ''),
+        'yellow': (False, curses.COLOR_YELLOW, ''),
+        'blue': (False, curses.COLOR_BLUE, ''),
+        'magenta': (False, curses.COLOR_MAGENTA, ''),
+        'cyan': (False, curses.COLOR_CYAN, ''),
+        'white': (False, curses.COLOR_WHITE, ''),
+    }
+except ImportError:
+    curses = None
+    _baseterminfoparams = {}
+
+# allow the extensions to change the default
+_enabledbydefault = False
+
+# start and stop parameters for effects
+_effects = {
+    'none': 0,
+    'black': 30,
+    'red': 31,
+    'green': 32,
+    'yellow': 33,
+    'blue': 34,
+    'magenta': 35,
+    'cyan': 36,
+    'white': 37,
+    'bold': 1,
+    'italic': 3,
+    'underline': 4,
+    'inverse': 7,
+    'dim': 2,
+    'black_background': 40,
+    'red_background': 41,
+    'green_background': 42,
+    'yellow_background': 43,
+    'blue_background': 44,
+    'purple_background': 45,
+    'cyan_background': 46,
+    'white_background': 47,
+    }
+
+_defaultstyles = {
+    'grep.match': 'red bold',
+    'grep.linenumber': 'green',
+    'grep.rev': 'green',
+    'grep.change': 'green',
+    'grep.sep': 'cyan',
+    'grep.filename': 'magenta',
+    'grep.user': 'magenta',
+    'grep.date': 'magenta',
+    'bookmarks.active': 'green',
+    'branches.active': 'none',
+    'branches.closed': 'black bold',
+    'branches.current': 'green',
+    'branches.inactive': 'none',
+    'diff.changed': 'white',
+    'diff.deleted': 'red',
+    'diff.diffline': 'bold',
+    'diff.extended': 'cyan bold',
+    'diff.file_a': 'red bold',
+    'diff.file_b': 'green bold',
+    'diff.hunk': 'magenta',
+    'diff.inserted': 'green',
+    'diff.tab': '',
+    'diff.trailingwhitespace': 'bold red_background',
+    'changeset.public' : '',
+    'changeset.draft' : '',
+    'changeset.secret' : '',
+    'diffstat.deleted': 'red',
+    'diffstat.inserted': 'green',
+    'histedit.remaining': 'red bold',
+    'ui.prompt': 'yellow',
+    'log.changeset': 'yellow',
+    'patchbomb.finalsummary': '',
+    'patchbomb.from': 'magenta',
+    'patchbomb.to': 'cyan',
+    'patchbomb.subject': 'green',
+    'patchbomb.diffstats': '',
+    'rebase.rebased': 'blue',
+    'rebase.remaining': 'red bold',
+    'resolve.resolved': 'green bold',
+    'resolve.unresolved': 'red bold',
+    'shelve.age': 'cyan',
+    'shelve.newest': 'green bold',
+    'shelve.name': 'blue bold',
+    'status.added': 'green bold',
+    'status.clean': 'none',
+    'status.copied': 'none',
+    'status.deleted': 'cyan bold underline',
+    'status.ignored': 'black bold',
+    'status.modified': 'blue bold',
+    'status.removed': 'red bold',
+    'status.unknown': 'magenta bold underline',
+    'tags.normal': 'green',
+    'tags.local': 'black bold',
+}
 
 def loadcolortable(ui, extname, colortable):
-    _styles.update(colortable)
+    _defaultstyles.update(colortable)
+
+def _terminfosetup(ui, mode):
+    '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
+
+    # If we failed to load curses, we go ahead and return.
+    if curses is None:
+        return
+    # Otherwise, see what the config file says.
+    if mode not in ('auto', 'terminfo'):
+        return
+    ui._terminfoparams.update(_baseterminfoparams)
+
+    for key, val in ui.configitems('color'):
+        if key.startswith('color.'):
+            newval = (False, int(val), '')
+            ui._terminfoparams[key[6:]] = newval
+        elif key.startswith('terminfo.'):
+            newval = (True, '', val.replace('\\E', '\x1b'))
+            ui._terminfoparams[key[9:]] = newval
+    try:
+        curses.setupterm()
+    except curses.error as e:
+        ui._terminfoparams.clear()
+        return
+
+    for key, (b, e, c) in ui._terminfoparams.items():
+        if not b:
+            continue
+        if not c and not curses.tigetstr(e):
+            # Most terminals don't support dim, invis, etc, so don't be
+            # noisy and use ui.debug().
+            ui.debug("no terminfo entry for %s\n" % e)
+            del ui._terminfoparams[key]
+    if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
+        # Only warn about missing terminfo entries if we explicitly asked for
+        # terminfo mode.
+        if mode == "terminfo":
+            ui.warn(_("no terminfo entry for setab/setaf: reverting to "
+              "ECMA-48 color\n"))
+        ui._terminfoparams.clear()
+
+def setup(ui):
+    """configure color on a ui
+
+    That function both set the colormode for the ui object and read
+    the configuration looking for custom colors and effect definitions."""
+    mode = _modesetup(ui)
+    ui._colormode = mode
+    if mode and mode != 'debug':
+        configstyles(ui)
+
+def _modesetup(ui):
+    if ui.plain():
+        return None
+    default = 'never'
+    if _enabledbydefault:
+        default = 'auto'
+    # experimental config: ui.color
+    config = ui.config('ui', 'color', default)
+    if config == 'debug':
+        return 'debug'
+
+    auto = (config == 'auto')
+    always = not auto and util.parsebool(config)
+    if not always and not auto:
+        return None
+
+    formatted = (always or (encoding.environ.get('TERM') != 'dumb'
+                 and ui.formatted()))
+
+    mode = ui.config('color', 'mode', 'auto')
+
+    # If pager is active, color.pagermode overrides color.mode.
+    if getattr(ui, 'pageractive', False):
+        mode = ui.config('color', 'pagermode', mode)
+
+    realmode = mode
+    if mode == 'auto':
+        if pycompat.osname == 'nt':
+            term = encoding.environ.get('TERM')
+            # TERM won't be defined in a vanilla cmd.exe environment.
+
+            # UNIX-like environments on Windows such as Cygwin and MSYS will
+            # set TERM. They appear to make a best effort attempt at setting it
+            # to something appropriate. However, not all environments with TERM
+            # defined support ANSI. Since "ansi" could result in terminal
+            # gibberish, we error on the side of selecting "win32". However, if
+            # w32effects is not defined, we almost certainly don't support
+            # "win32", so don't even try.
+            if (term and 'xterm' in term) or not w32effects:
+                realmode = 'ansi'
+            else:
+                realmode = 'win32'
+        else:
+            realmode = 'ansi'
+
+    def modewarn():
+        # only warn if color.mode was explicitly set and we're in
+        # a formatted terminal
+        if mode == realmode and ui.formatted():
+            ui.warn(_('warning: failed to set color mode to %s\n') % mode)
+
+    if realmode == 'win32':
+        ui._terminfoparams.clear()
+        if not w32effects:
+            modewarn()
+            return None
+        _effects.update(w32effects)
+    elif realmode == 'ansi':
+        ui._terminfoparams.clear()
+    elif realmode == 'terminfo':
+        _terminfosetup(ui, mode)
+        if not ui._terminfoparams:
+            ## FIXME Shouldn't we return None in this case too?
+            modewarn()
+            realmode = 'ansi'
+    else:
+        return None
+
+    if always or (auto and formatted):
+        return realmode
+    return None
+
+def configstyles(ui):
+    ui._styles.update(_defaultstyles)
+    for status, cfgeffects in ui.configitems('color'):
+        if '.' not in status or status.startswith(('color.', 'terminfo.')):
+            continue
+        cfgeffects = ui.configlist('color', status)
+        if cfgeffects:
+            good = []
+            for e in cfgeffects:
+                if valideffect(ui, e):
+                    good.append(e)
+                else:
+                    ui.warn(_("ignoring unknown color/effect %r "
+                              "(configured in color.%s)\n")
+                            % (e, status))
+            ui._styles[status] = ' '.join(good)
+
+def valideffect(ui, effect):
+    'Determine if the effect is valid or not.'
+    return ((not ui._terminfoparams and effect in _effects)
+             or (effect in ui._terminfoparams
+                 or effect[:-11] in ui._terminfoparams))
+
+def _effect_str(ui, effect):
+    '''Helper function for render_effects().'''
+
+    bg = False
+    if effect.endswith('_background'):
+        bg = True
+        effect = effect[:-11]
+    try:
+        attr, val, termcode = ui._terminfoparams[effect]
+    except KeyError:
+        return ''
+    if attr:
+        if termcode:
+            return termcode
+        else:
+            return curses.tigetstr(val)
+    elif bg:
+        return curses.tparm(curses.tigetstr('setab'), val)
+    else:
+        return curses.tparm(curses.tigetstr('setaf'), val)
+
+def _render_effects(ui, text, effects):
+    'Wrap text in commands to turn on each effect.'
+    if not text:
+        return text
+    if ui._terminfoparams:
+        start = ''.join(_effect_str(ui, effect)
+                        for effect in ['none'] + effects.split())
+        stop = _effect_str(ui, 'none')
+    else:
+        start = [str(_effects[e]) for e in ['none'] + effects.split()]
+        start = '\033[' + ';'.join(start) + 'm'
+        stop = '\033[' + str(_effects['none']) + 'm'
+    return ''.join([start, text, stop])
+
+def colorlabel(ui, msg, label):
+    """add color control code according to the mode"""
+    if ui._colormode == 'debug':
+        if label and msg:
+            if msg[-1] == '\n':
+                msg = "[%s|%s]\n" % (label, msg[:-1])
+            else:
+                msg = "[%s|%s]" % (label, msg)
+    elif ui._colormode is not None:
+        effects = []
+        for l in label.split():
+            s = ui._styles.get(l, '')
+            if s:
+                effects.append(s)
+            elif valideffect(ui, l):
+                effects.append(l)
+        effects = ' '.join(effects)
+        if effects:
+            msg = '\n'.join([_render_effects(ui, line, effects)
+                             for line in msg.split('\n')])
+    return msg
+
+w32effects = None
+if pycompat.osname == 'nt':
+    import ctypes
+    import re
+
+    _kernel32 = ctypes.windll.kernel32
+
+    _WORD = ctypes.c_ushort
+
+    _INVALID_HANDLE_VALUE = -1
+
+    class _COORD(ctypes.Structure):
+        _fields_ = [('X', ctypes.c_short),
+                    ('Y', ctypes.c_short)]
+
+    class _SMALL_RECT(ctypes.Structure):
+        _fields_ = [('Left', ctypes.c_short),
+                    ('Top', ctypes.c_short),
+                    ('Right', ctypes.c_short),
+                    ('Bottom', ctypes.c_short)]
+
+    class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
+        _fields_ = [('dwSize', _COORD),
+                    ('dwCursorPosition', _COORD),
+                    ('wAttributes', _WORD),
+                    ('srWindow', _SMALL_RECT),
+                    ('dwMaximumWindowSize', _COORD)]
+
+    _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11
+    _STD_ERROR_HANDLE = 0xfffffff4  # (DWORD)-12
+
+    _FOREGROUND_BLUE = 0x0001
+    _FOREGROUND_GREEN = 0x0002
+    _FOREGROUND_RED = 0x0004
+    _FOREGROUND_INTENSITY = 0x0008
+
+    _BACKGROUND_BLUE = 0x0010
+    _BACKGROUND_GREEN = 0x0020
+    _BACKGROUND_RED = 0x0040
+    _BACKGROUND_INTENSITY = 0x0080
+
+    _COMMON_LVB_REVERSE_VIDEO = 0x4000
+    _COMMON_LVB_UNDERSCORE = 0x8000
+
+    # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
+    w32effects = {
+        'none': -1,
+        'black': 0,
+        'red': _FOREGROUND_RED,
+        'green': _FOREGROUND_GREEN,
+        'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
+        'blue': _FOREGROUND_BLUE,
+        'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
+        'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
+        'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
+        'bold': _FOREGROUND_INTENSITY,
+        'black_background': 0x100,                  # unused value > 0x0f
+        'red_background': _BACKGROUND_RED,
+        'green_background': _BACKGROUND_GREEN,
+        'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
+        'blue_background': _BACKGROUND_BLUE,
+        'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
+        'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
+        'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
+                             _BACKGROUND_BLUE),
+        'bold_background': _BACKGROUND_INTENSITY,
+        'underline': _COMMON_LVB_UNDERSCORE,  # double-byte charsets only
+        'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
+    }
+
+    passthrough = set([_FOREGROUND_INTENSITY,
+                       _BACKGROUND_INTENSITY,
+                       _COMMON_LVB_UNDERSCORE,
+                       _COMMON_LVB_REVERSE_VIDEO])
+
+    stdout = _kernel32.GetStdHandle(
+                  _STD_OUTPUT_HANDLE)  # don't close the handle returned
+    if stdout is None or stdout == _INVALID_HANDLE_VALUE:
+        w32effects = None
+    else:
+        csbi = _CONSOLE_SCREEN_BUFFER_INFO()
+        if not _kernel32.GetConsoleScreenBufferInfo(
+                    stdout, ctypes.byref(csbi)):
+            # stdout may not support GetConsoleScreenBufferInfo()
+            # when called from subprocess or redirected
+            w32effects = None
+        else:
+            origattr = csbi.wAttributes
+            ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
+                                re.MULTILINE | re.DOTALL)
+
+    def win32print(ui, writefunc, *msgs, **opts):
+        for text in msgs:
+            _win32print(ui, text, writefunc, **opts)
+
+    def _win32print(ui, text, writefunc, **opts):
+        label = opts.get('label', '')
+        attr = origattr
+
+        def mapcolor(val, attr):
+            if val == -1:
+                return origattr
+            elif val in passthrough:
+                return attr | val
+            elif val > 0x0f:
+                return (val & 0x70) | (attr & 0x8f)
+            else:
+                return (val & 0x07) | (attr & 0xf8)
+
+        # determine console attributes based on labels
+        for l in label.split():
+            style = ui._styles.get(l, '')
+            for effect in style.split():
+                try:
+                    attr = mapcolor(w32effects[effect], attr)
+                except KeyError:
+                    # w32effects could not have certain attributes so we skip
+                    # them if not found
+                    pass
+        # hack to ensure regexp finds data
+        if not text.startswith('\033['):
+            text = '\033[m' + text
+
+        # Look for ANSI-like codes embedded in text
+        m = re.match(ansire, text)
+
+        try:
+            while m:
+                for sattr in m.group(1).split(';'):
+                    if sattr:
+                        attr = mapcolor(int(sattr), attr)
+                _kernel32.SetConsoleTextAttribute(stdout, attr)
+                writefunc(m.group(2), **opts)
+                m = re.match(ansire, m.group(3))
+        finally:
+            # Explicitly reset original attributes
+            _kernel32.SetConsoleTextAttribute(stdout, origattr)
--- a/mercurial/commands.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/commands.py	Tue Feb 28 11:13:25 2017 -0800
@@ -11,17 +11,10 @@
 import errno
 import os
 import re
-import socket
-import string
-import sys
-import tempfile
-import time
 
 from .i18n import _
 from .node import (
-    bin,
     hex,
-    nullhex,
     nullid,
     nullrev,
     short,
@@ -40,30 +33,22 @@
     error,
     exchange,
     extensions,
-    formatter,
     graphmod,
     hbisect,
     help,
     hg,
     lock as lockmod,
     merge as mergemod,
-    minirst,
     obsolete,
     patch,
     phases,
-    policy,
-    pvec,
     pycompat,
-    repair,
-    revlog,
-    revset,
+    revsetlang,
     scmutil,
     server,
     sshserver,
-    sslutil,
     streamclone,
     templatekw,
-    templater,
     ui as uimod,
     util,
 )
@@ -92,6 +77,12 @@
      _('do not prompt, automatically pick the first choice for all prompts')),
     ('q', 'quiet', None, _('suppress output')),
     ('v', 'verbose', None, _('enable additional output')),
+    ('', 'color', '',
+     # i18n: 'always', 'auto', 'never', and 'debug' are keywords
+     # and should not be translated
+     _("when to colorize (boolean, always, auto, never, or debug)"
+       " (EXPERIMENTAL)"),
+     _('TYPE')),
     ('', 'config', [],
      _('set/override config option (use \'section.name=value\')'),
      _('CONFIG')),
@@ -107,6 +98,8 @@
     ('', 'version', None, _('output version information and exit')),
     ('h', 'help', None, _('display help and exit')),
     ('', 'hidden', False, _('consider hidden changesets')),
+    ('', 'pager', 'auto',
+     _("when to paginate (boolean, always, auto, or never)"), _('TYPE')),
 ]
 
 dryrunopts = [('n', 'dry-run', None,
@@ -433,6 +426,8 @@
     if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
         raise error.Abort(_('at least one of -n/-c is required for -l'))
 
+    ui.pager('annotate')
+
     if fm.isplain():
         def makefunc(get, fmt):
             return lambda x: fmt(get(x))
@@ -1427,6 +1422,7 @@
     ctx = scmutil.revsingle(repo, opts.get('rev'))
     m = scmutil.match(ctx, (file1,) + pats, opts)
 
+    ui.pager('cat')
     return cmdutil.cat(ui, repo, ctx, m, '', **opts)
 
 @command('^clone',
@@ -1801,7 +1797,7 @@
         ui.system("%s \"%s\"" % (editor, f),
                   onerr=error.Abort, errprefix=_("edit failed"))
         return
-
+    ui.pager('config')
     fm = ui.formatter('config', opts)
     for f in scmutil.rcpath():
         ui.debug('read config from: %s\n' % f)
@@ -1866,1176 +1862,6 @@
     with repo.wlock(False):
         return cmdutil.copy(ui, repo, pats, opts)
 
-@command('debuginstall', [] + formatteropts, '', norepo=True)
-def debuginstall(ui, **opts):
-    '''test Mercurial installation
-
-    Returns 0 on success.
-    '''
-
-    def writetemp(contents):
-        (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
-        f = os.fdopen(fd, "wb")
-        f.write(contents)
-        f.close()
-        return name
-
-    problems = 0
-
-    fm = ui.formatter('debuginstall', opts)
-    fm.startitem()
-
-    # encoding
-    fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
-    err = None
-    try:
-        encoding.fromlocal("test")
-    except error.Abort as inst:
-        err = inst
-        problems += 1
-    fm.condwrite(err, 'encodingerror', _(" %s\n"
-                 " (check that your locale is properly set)\n"), err)
-
-    # Python
-    fm.write('pythonexe', _("checking Python executable (%s)\n"),
-             pycompat.sysexecutable)
-    fm.write('pythonver', _("checking Python version (%s)\n"),
-             ("%d.%d.%d" % sys.version_info[:3]))
-    fm.write('pythonlib', _("checking Python lib (%s)...\n"),
-             os.path.dirname(os.__file__))
-
-    security = set(sslutil.supportedprotocols)
-    if sslutil.hassni:
-        security.add('sni')
-
-    fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
-             fm.formatlist(sorted(security), name='protocol',
-                           fmt='%s', sep=','))
-
-    # These are warnings, not errors. So don't increment problem count. This
-    # may change in the future.
-    if 'tls1.2' not in security:
-        fm.plain(_('  TLS 1.2 not supported by Python install; '
-                   'network connections lack modern security\n'))
-    if 'sni' not in security:
-        fm.plain(_('  SNI not supported by Python install; may have '
-                   'connectivity issues with some servers\n'))
-
-    # TODO print CA cert info
-
-    # hg version
-    hgver = util.version()
-    fm.write('hgver', _("checking Mercurial version (%s)\n"),
-             hgver.split('+')[0])
-    fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
-             '+'.join(hgver.split('+')[1:]))
-
-    # compiled modules
-    fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
-             policy.policy)
-    fm.write('hgmodules', _("checking installed modules (%s)...\n"),
-             os.path.dirname(__file__))
-
-    err = None
-    try:
-        from . import (
-            base85,
-            bdiff,
-            mpatch,
-            osutil,
-        )
-        dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
-    except Exception as inst:
-        err = inst
-        problems += 1
-    fm.condwrite(err, 'extensionserror', " %s\n", err)
-
-    compengines = util.compengines._engines.values()
-    fm.write('compengines', _('checking registered compression engines (%s)\n'),
-             fm.formatlist(sorted(e.name() for e in compengines),
-                           name='compengine', fmt='%s', sep=', '))
-    fm.write('compenginesavail', _('checking available compression engines '
-                                   '(%s)\n'),
-             fm.formatlist(sorted(e.name() for e in compengines
-                                  if e.available()),
-                           name='compengine', fmt='%s', sep=', '))
-    wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
-    fm.write('compenginesserver', _('checking available compression engines '
-                                    'for wire protocol (%s)\n'),
-             fm.formatlist([e.name() for e in wirecompengines
-                            if e.wireprotosupport()],
-                           name='compengine', fmt='%s', sep=', '))
-
-    # templates
-    p = templater.templatepaths()
-    fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
-    fm.condwrite(not p, '', _(" no template directories found\n"))
-    if p:
-        m = templater.templatepath("map-cmdline.default")
-        if m:
-            # template found, check if it is working
-            err = None
-            try:
-                templater.templater.frommapfile(m)
-            except Exception as inst:
-                err = inst
-                p = None
-            fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
-        else:
-            p = None
-        fm.condwrite(p, 'defaulttemplate',
-                     _("checking default template (%s)\n"), m)
-        fm.condwrite(not m, 'defaulttemplatenotfound',
-                     _(" template '%s' not found\n"), "default")
-    if not p:
-        problems += 1
-    fm.condwrite(not p, '',
-                 _(" (templates seem to have been installed incorrectly)\n"))
-
-    # editor
-    editor = ui.geteditor()
-    editor = util.expandpath(editor)
-    fm.write('editor', _("checking commit editor... (%s)\n"), editor)
-    cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
-    fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
-                 _(" No commit editor set and can't find %s in PATH\n"
-                   " (specify a commit editor in your configuration"
-                   " file)\n"), not cmdpath and editor == 'vi' and editor)
-    fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
-                 _(" Can't find editor '%s' in PATH\n"
-                   " (specify a commit editor in your configuration"
-                   " file)\n"), not cmdpath and editor)
-    if not cmdpath and editor != 'vi':
-        problems += 1
-
-    # check username
-    username = None
-    err = None
-    try:
-        username = ui.username()
-    except error.Abort as e:
-        err = e
-        problems += 1
-
-    fm.condwrite(username, 'username',  _("checking username (%s)\n"), username)
-    fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
-        " (specify a username in your configuration file)\n"), err)
-
-    fm.condwrite(not problems, '',
-                 _("no problems detected\n"))
-    if not problems:
-        fm.data(problems=problems)
-    fm.condwrite(problems, 'problems',
-                 _("%d problems detected,"
-                   " please check your install!\n"), problems)
-    fm.end()
-
-    return problems
-
-@command('debugknown', [], _('REPO ID...'), norepo=True)
-def debugknown(ui, repopath, *ids, **opts):
-    """test whether node ids are known to a repo
-
-    Every ID must be a full-length hex node id string. Returns a list of 0s
-    and 1s indicating unknown/known.
-    """
-    repo = hg.peer(ui, opts, repopath)
-    if not repo.capable('known'):
-        raise error.Abort("known() not supported by target repository")
-    flags = repo.known([bin(s) for s in ids])
-    ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
-
-@command('debuglabelcomplete', [], _('LABEL...'))
-def debuglabelcomplete(ui, repo, *args):
-    '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
-    debugnamecomplete(ui, repo, *args)
-
-@command('debugmergestate', [], '')
-def debugmergestate(ui, repo, *args):
-    """print merge state
-
-    Use --verbose to print out information about whether v1 or v2 merge state
-    was chosen."""
-    def _hashornull(h):
-        if h == nullhex:
-            return 'null'
-        else:
-            return h
-
-    def printrecords(version):
-        ui.write(('* version %s records\n') % version)
-        if version == 1:
-            records = v1records
-        else:
-            records = v2records
-
-        for rtype, record in records:
-            # pretty print some record types
-            if rtype == 'L':
-                ui.write(('local: %s\n') % record)
-            elif rtype == 'O':
-                ui.write(('other: %s\n') % record)
-            elif rtype == 'm':
-                driver, mdstate = record.split('\0', 1)
-                ui.write(('merge driver: %s (state "%s")\n')
-                         % (driver, mdstate))
-            elif rtype in 'FDC':
-                r = record.split('\0')
-                f, state, hash, lfile, afile, anode, ofile = r[0:7]
-                if version == 1:
-                    onode = 'not stored in v1 format'
-                    flags = r[7]
-                else:
-                    onode, flags = r[7:9]
-                ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
-                         % (f, rtype, state, _hashornull(hash)))
-                ui.write(('  local path: %s (flags "%s")\n') % (lfile, flags))
-                ui.write(('  ancestor path: %s (node %s)\n')
-                         % (afile, _hashornull(anode)))
-                ui.write(('  other path: %s (node %s)\n')
-                         % (ofile, _hashornull(onode)))
-            elif rtype == 'f':
-                filename, rawextras = record.split('\0', 1)
-                extras = rawextras.split('\0')
-                i = 0
-                extrastrings = []
-                while i < len(extras):
-                    extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
-                    i += 2
-
-                ui.write(('file extras: %s (%s)\n')
-                         % (filename, ', '.join(extrastrings)))
-            elif rtype == 'l':
-                labels = record.split('\0', 2)
-                labels = [l for l in labels if len(l) > 0]
-                ui.write(('labels:\n'))
-                ui.write(('  local: %s\n' % labels[0]))
-                ui.write(('  other: %s\n' % labels[1]))
-                if len(labels) > 2:
-                    ui.write(('  base:  %s\n' % labels[2]))
-            else:
-                ui.write(('unrecognized entry: %s\t%s\n')
-                         % (rtype, record.replace('\0', '\t')))
-
-    # Avoid mergestate.read() since it may raise an exception for unsupported
-    # merge state records. We shouldn't be doing this, but this is OK since this
-    # command is pretty low-level.
-    ms = mergemod.mergestate(repo)
-
-    # sort so that reasonable information is on top
-    v1records = ms._readrecordsv1()
-    v2records = ms._readrecordsv2()
-    order = 'LOml'
-    def key(r):
-        idx = order.find(r[0])
-        if idx == -1:
-            return (1, r[1])
-        else:
-            return (0, idx)
-    v1records.sort(key=key)
-    v2records.sort(key=key)
-
-    if not v1records and not v2records:
-        ui.write(('no merge state found\n'))
-    elif not v2records:
-        ui.note(('no version 2 merge state\n'))
-        printrecords(1)
-    elif ms._v1v2match(v1records, v2records):
-        ui.note(('v1 and v2 states match: using v2\n'))
-        printrecords(2)
-    else:
-        ui.note(('v1 and v2 states mismatch: using v1\n'))
-        printrecords(1)
-        if ui.verbose:
-            printrecords(2)
-
-@command('debugnamecomplete', [], _('NAME...'))
-def debugnamecomplete(ui, repo, *args):
-    '''complete "names" - tags, open branch names, bookmark names'''
-
-    names = set()
-    # since we previously only listed open branches, we will handle that
-    # specially (after this for loop)
-    for name, ns in repo.names.iteritems():
-        if name != 'branches':
-            names.update(ns.listnames(repo))
-    names.update(tag for (tag, heads, tip, closed)
-                 in repo.branchmap().iterbranches() if not closed)
-    completions = set()
-    if not args:
-        args = ['']
-    for a in args:
-        completions.update(n for n in names if n.startswith(a))
-    ui.write('\n'.join(sorted(completions)))
-    ui.write('\n')
-
-@command('debuglocks',
-         [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
-          ('W', 'force-wlock', None,
-           _('free the working state lock (DANGEROUS)'))],
-         _('[OPTION]...'))
-def debuglocks(ui, repo, **opts):
-    """show or modify state of locks
-
-    By default, this command will show which locks are held. This
-    includes the user and process holding the lock, the amount of time
-    the lock has been held, and the machine name where the process is
-    running if it's not local.
-
-    Locks protect the integrity of Mercurial's data, so should be
-    treated with care. System crashes or other interruptions may cause
-    locks to not be properly released, though Mercurial will usually
-    detect and remove such stale locks automatically.
-
-    However, detecting stale locks may not always be possible (for
-    instance, on a shared filesystem). Removing locks may also be
-    blocked by filesystem permissions.
-
-    Returns 0 if no locks are held.
-
-    """
-
-    if opts.get('force_lock'):
-        repo.svfs.unlink('lock')
-    if opts.get('force_wlock'):
-        repo.vfs.unlink('wlock')
-    if opts.get('force_lock') or opts.get('force_lock'):
-        return 0
-
-    now = time.time()
-    held = 0
-
-    def report(vfs, name, method):
-        # this causes stale locks to get reaped for more accurate reporting
-        try:
-            l = method(False)
-        except error.LockHeld:
-            l = None
-
-        if l:
-            l.release()
-        else:
-            try:
-                stat = vfs.lstat(name)
-                age = now - stat.st_mtime
-                user = util.username(stat.st_uid)
-                locker = vfs.readlock(name)
-                if ":" in locker:
-                    host, pid = locker.split(':')
-                    if host == socket.gethostname():
-                        locker = 'user %s, process %s' % (user, pid)
-                    else:
-                        locker = 'user %s, process %s, host %s' \
-                                 % (user, pid, host)
-                ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
-                return 1
-            except OSError as e:
-                if e.errno != errno.ENOENT:
-                    raise
-
-        ui.write(("%-6s free\n") % (name + ":"))
-        return 0
-
-    held += report(repo.svfs, "lock", repo.lock)
-    held += report(repo.vfs, "wlock", repo.wlock)
-
-    return held
-
-@command('debugobsolete',
-        [('', 'flags', 0, _('markers flag')),
-         ('', 'record-parents', False,
-          _('record parent information for the precursor')),
-         ('r', 'rev', [], _('display markers relevant to REV')),
-         ('', 'index', False, _('display index of the marker')),
-         ('', 'delete', [], _('delete markers specified by indices')),
-        ] + commitopts2 + formatteropts,
-         _('[OBSOLETED [REPLACEMENT ...]]'))
-def debugobsolete(ui, repo, precursor=None, *successors, **opts):
-    """create arbitrary obsolete marker
-
-    With no arguments, displays the list of obsolescence markers."""
-
-    def parsenodeid(s):
-        try:
-            # We do not use revsingle/revrange functions here to accept
-            # arbitrary node identifiers, possibly not present in the
-            # local repository.
-            n = bin(s)
-            if len(n) != len(nullid):
-                raise TypeError()
-            return n
-        except TypeError:
-            raise error.Abort('changeset references must be full hexadecimal '
-                             'node identifiers')
-
-    if opts.get('delete'):
-        indices = []
-        for v in opts.get('delete'):
-            try:
-                indices.append(int(v))
-            except ValueError:
-                raise error.Abort(_('invalid index value: %r') % v,
-                                  hint=_('use integers for indices'))
-
-        if repo.currenttransaction():
-            raise error.Abort(_('cannot delete obsmarkers in the middle '
-                                'of transaction.'))
-
-        with repo.lock():
-            n = repair.deleteobsmarkers(repo.obsstore, indices)
-            ui.write(_('deleted %i obsolescence markers\n') % n)
-
-        return
-
-    if precursor is not None:
-        if opts['rev']:
-            raise error.Abort('cannot select revision when creating marker')
-        metadata = {}
-        metadata['user'] = opts['user'] or ui.username()
-        succs = tuple(parsenodeid(succ) for succ in successors)
-        l = repo.lock()
-        try:
-            tr = repo.transaction('debugobsolete')
-            try:
-                date = opts.get('date')
-                if date:
-                    date = util.parsedate(date)
-                else:
-                    date = None
-                prec = parsenodeid(precursor)
-                parents = None
-                if opts['record_parents']:
-                    if prec not in repo.unfiltered():
-                        raise error.Abort('cannot used --record-parents on '
-                                         'unknown changesets')
-                    parents = repo.unfiltered()[prec].parents()
-                    parents = tuple(p.node() for p in parents)
-                repo.obsstore.create(tr, prec, succs, opts['flags'],
-                                     parents=parents, date=date,
-                                     metadata=metadata)
-                tr.close()
-            except ValueError as exc:
-                raise error.Abort(_('bad obsmarker input: %s') % exc)
-            finally:
-                tr.release()
-        finally:
-            l.release()
-    else:
-        if opts['rev']:
-            revs = scmutil.revrange(repo, opts['rev'])
-            nodes = [repo[r].node() for r in revs]
-            markers = list(obsolete.getmarkers(repo, nodes=nodes))
-            markers.sort(key=lambda x: x._data)
-        else:
-            markers = obsolete.getmarkers(repo)
-
-        markerstoiter = markers
-        isrelevant = lambda m: True
-        if opts.get('rev') and opts.get('index'):
-            markerstoiter = obsolete.getmarkers(repo)
-            markerset = set(markers)
-            isrelevant = lambda m: m in markerset
-
-        fm = ui.formatter('debugobsolete', opts)
-        for i, m in enumerate(markerstoiter):
-            if not isrelevant(m):
-                # marker can be irrelevant when we're iterating over a set
-                # of markers (markerstoiter) which is bigger than the set
-                # of markers we want to display (markers)
-                # this can happen if both --index and --rev options are
-                # provided and thus we need to iterate over all of the markers
-                # to get the correct indices, but only display the ones that
-                # are relevant to --rev value
-                continue
-            fm.startitem()
-            ind = i if opts.get('index') else None
-            cmdutil.showmarker(fm, m, index=ind)
-        fm.end()
-
-@command('debugpathcomplete',
-         [('f', 'full', None, _('complete an entire path')),
-          ('n', 'normal', None, _('show only normal files')),
-          ('a', 'added', None, _('show only added files')),
-          ('r', 'removed', None, _('show only removed files'))],
-         _('FILESPEC...'))
-def debugpathcomplete(ui, repo, *specs, **opts):
-    '''complete part or all of a tracked path
-
-    This command supports shells that offer path name completion. It
-    currently completes only files already known to the dirstate.
-
-    Completion extends only to the next path segment unless
-    --full is specified, in which case entire paths are used.'''
-
-    def complete(path, acceptable):
-        dirstate = repo.dirstate
-        spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
-        rootdir = repo.root + pycompat.ossep
-        if spec != repo.root and not spec.startswith(rootdir):
-            return [], []
-        if os.path.isdir(spec):
-            spec += '/'
-        spec = spec[len(rootdir):]
-        fixpaths = pycompat.ossep != '/'
-        if fixpaths:
-            spec = spec.replace(pycompat.ossep, '/')
-        speclen = len(spec)
-        fullpaths = opts['full']
-        files, dirs = set(), set()
-        adddir, addfile = dirs.add, files.add
-        for f, st in dirstate.iteritems():
-            if f.startswith(spec) and st[0] in acceptable:
-                if fixpaths:
-                    f = f.replace('/', pycompat.ossep)
-                if fullpaths:
-                    addfile(f)
-                    continue
-                s = f.find(pycompat.ossep, speclen)
-                if s >= 0:
-                    adddir(f[:s])
-                else:
-                    addfile(f)
-        return files, dirs
-
-    acceptable = ''
-    if opts['normal']:
-        acceptable += 'nm'
-    if opts['added']:
-        acceptable += 'a'
-    if opts['removed']:
-        acceptable += 'r'
-    cwd = repo.getcwd()
-    if not specs:
-        specs = ['.']
-
-    files, dirs = set(), set()
-    for spec in specs:
-        f, d = complete(spec, acceptable or 'nmar')
-        files.update(f)
-        dirs.update(d)
-    files.update(dirs)
-    ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
-    ui.write('\n')
-
-@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
-def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
-    '''access the pushkey key/value protocol
-
-    With two args, list the keys in the given namespace.
-
-    With five args, set a key to new if it currently is set to old.
-    Reports success or failure.
-    '''
-
-    target = hg.peer(ui, {}, repopath)
-    if keyinfo:
-        key, old, new = keyinfo
-        r = target.pushkey(namespace, key, old, new)
-        ui.status(str(r) + '\n')
-        return not r
-    else:
-        for k, v in sorted(target.listkeys(namespace).iteritems()):
-            ui.write("%s\t%s\n" % (k.encode('string-escape'),
-                                   v.encode('string-escape')))
-
-@command('debugpvec', [], _('A B'))
-def debugpvec(ui, repo, a, b=None):
-    ca = scmutil.revsingle(repo, a)
-    cb = scmutil.revsingle(repo, b)
-    pa = pvec.ctxpvec(ca)
-    pb = pvec.ctxpvec(cb)
-    if pa == pb:
-        rel = "="
-    elif pa > pb:
-        rel = ">"
-    elif pa < pb:
-        rel = "<"
-    elif pa | pb:
-        rel = "|"
-    ui.write(_("a: %s\n") % pa)
-    ui.write(_("b: %s\n") % pb)
-    ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
-    ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
-             (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
-              pa.distance(pb), rel))
-
-@command('debugrebuilddirstate|debugrebuildstate',
-    [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
-     ('', 'minimal', None, _('only rebuild files that are inconsistent with '
-                             'the working copy parent')),
-    ],
-    _('[-r REV]'))
-def debugrebuilddirstate(ui, repo, rev, **opts):
-    """rebuild the dirstate as it would look like for the given revision
-
-    If no revision is specified the first current parent will be used.
-
-    The dirstate will be set to the files of the given revision.
-    The actual working directory content or existing dirstate
-    information such as adds or removes is not considered.
-
-    ``minimal`` will only rebuild the dirstate status for files that claim to be
-    tracked but are not in the parent manifest, or that exist in the parent
-    manifest but are not in the dirstate. It will not change adds, removes, or
-    modified files that are in the working copy parent.
-
-    One use of this command is to make the next :hg:`status` invocation
-    check the actual file content.
-    """
-    ctx = scmutil.revsingle(repo, rev)
-    with repo.wlock():
-        dirstate = repo.dirstate
-        changedfiles = None
-        # See command doc for what minimal does.
-        if opts.get('minimal'):
-            manifestfiles = set(ctx.manifest().keys())
-            dirstatefiles = set(dirstate)
-            manifestonly = manifestfiles - dirstatefiles
-            dsonly = dirstatefiles - manifestfiles
-            dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
-            changedfiles = manifestonly | dsnotadded
-
-        dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
-
-@command('debugrebuildfncache', [], '')
-def debugrebuildfncache(ui, repo):
-    """rebuild the fncache file"""
-    repair.rebuildfncache(ui, repo)
-
-@command('debugrename',
-    [('r', 'rev', '', _('revision to debug'), _('REV'))],
-    _('[-r REV] FILE'))
-def debugrename(ui, repo, file1, *pats, **opts):
-    """dump rename information"""
-
-    ctx = scmutil.revsingle(repo, opts.get('rev'))
-    m = scmutil.match(ctx, (file1,) + pats, opts)
-    for abs in ctx.walk(m):
-        fctx = ctx[abs]
-        o = fctx.filelog().renamed(fctx.filenode())
-        rel = m.rel(abs)
-        if o:
-            ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
-        else:
-            ui.write(_("%s not renamed\n") % rel)
-
-@command('debugrevlog', debugrevlogopts +
-    [('d', 'dump', False, _('dump index data'))],
-    _('-c|-m|FILE'),
-    optionalrepo=True)
-def debugrevlog(ui, repo, file_=None, **opts):
-    """show data and statistics about a revlog"""
-    r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
-
-    if opts.get("dump"):
-        numrevs = len(r)
-        ui.write(("# rev p1rev p2rev start   end deltastart base   p1   p2"
-                 " rawsize totalsize compression heads chainlen\n"))
-        ts = 0
-        heads = set()
-
-        for rev in xrange(numrevs):
-            dbase = r.deltaparent(rev)
-            if dbase == -1:
-                dbase = rev
-            cbase = r.chainbase(rev)
-            clen = r.chainlen(rev)
-            p1, p2 = r.parentrevs(rev)
-            rs = r.rawsize(rev)
-            ts = ts + rs
-            heads -= set(r.parentrevs(rev))
-            heads.add(rev)
-            try:
-                compression = ts / r.end(rev)
-            except ZeroDivisionError:
-                compression = 0
-            ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
-                     "%11d %5d %8d\n" %
-                     (rev, p1, p2, r.start(rev), r.end(rev),
-                      r.start(dbase), r.start(cbase),
-                      r.start(p1), r.start(p2),
-                      rs, ts, compression, len(heads), clen))
-        return 0
-
-    v = r.version
-    format = v & 0xFFFF
-    flags = []
-    gdelta = False
-    if v & revlog.REVLOGNGINLINEDATA:
-        flags.append('inline')
-    if v & revlog.REVLOGGENERALDELTA:
-        gdelta = True
-        flags.append('generaldelta')
-    if not flags:
-        flags = ['(none)']
-
-    nummerges = 0
-    numfull = 0
-    numprev = 0
-    nump1 = 0
-    nump2 = 0
-    numother = 0
-    nump1prev = 0
-    nump2prev = 0
-    chainlengths = []
-
-    datasize = [None, 0, 0]
-    fullsize = [None, 0, 0]
-    deltasize = [None, 0, 0]
-    chunktypecounts = {}
-    chunktypesizes = {}
-
-    def addsize(size, l):
-        if l[0] is None or size < l[0]:
-            l[0] = size
-        if size > l[1]:
-            l[1] = size
-        l[2] += size
-
-    numrevs = len(r)
-    for rev in xrange(numrevs):
-        p1, p2 = r.parentrevs(rev)
-        delta = r.deltaparent(rev)
-        if format > 0:
-            addsize(r.rawsize(rev), datasize)
-        if p2 != nullrev:
-            nummerges += 1
-        size = r.length(rev)
-        if delta == nullrev:
-            chainlengths.append(0)
-            numfull += 1
-            addsize(size, fullsize)
-        else:
-            chainlengths.append(chainlengths[delta] + 1)
-            addsize(size, deltasize)
-            if delta == rev - 1:
-                numprev += 1
-                if delta == p1:
-                    nump1prev += 1
-                elif delta == p2:
-                    nump2prev += 1
-            elif delta == p1:
-                nump1 += 1
-            elif delta == p2:
-                nump2 += 1
-            elif delta != nullrev:
-                numother += 1
-
-        # Obtain data on the raw chunks in the revlog.
-        chunk = r._chunkraw(rev, rev)[1]
-        if chunk:
-            chunktype = chunk[0]
-        else:
-            chunktype = 'empty'
-
-        if chunktype not in chunktypecounts:
-            chunktypecounts[chunktype] = 0
-            chunktypesizes[chunktype] = 0
-
-        chunktypecounts[chunktype] += 1
-        chunktypesizes[chunktype] += size
-
-    # Adjust size min value for empty cases
-    for size in (datasize, fullsize, deltasize):
-        if size[0] is None:
-            size[0] = 0
-
-    numdeltas = numrevs - numfull
-    numoprev = numprev - nump1prev - nump2prev
-    totalrawsize = datasize[2]
-    datasize[2] /= numrevs
-    fulltotal = fullsize[2]
-    fullsize[2] /= numfull
-    deltatotal = deltasize[2]
-    if numrevs - numfull > 0:
-        deltasize[2] /= numrevs - numfull
-    totalsize = fulltotal + deltatotal
-    avgchainlen = sum(chainlengths) / numrevs
-    maxchainlen = max(chainlengths)
-    compratio = 1
-    if totalsize:
-        compratio = totalrawsize / totalsize
-
-    basedfmtstr = '%%%dd\n'
-    basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
-
-    def dfmtstr(max):
-        return basedfmtstr % len(str(max))
-    def pcfmtstr(max, padding=0):
-        return basepcfmtstr % (len(str(max)), ' ' * padding)
-
-    def pcfmt(value, total):
-        if total:
-            return (value, 100 * float(value) / total)
-        else:
-            return value, 100.0
-
-    ui.write(('format : %d\n') % format)
-    ui.write(('flags  : %s\n') % ', '.join(flags))
-
-    ui.write('\n')
-    fmt = pcfmtstr(totalsize)
-    fmt2 = dfmtstr(totalsize)
-    ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
-    ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
-    ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
-    ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
-    ui.write(('revision size : ') + fmt2 % totalsize)
-    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
-    ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
-
-    def fmtchunktype(chunktype):
-        if chunktype == 'empty':
-            return '    %s     : ' % chunktype
-        elif chunktype in string.ascii_letters:
-            return '    0x%s (%s)  : ' % (hex(chunktype), chunktype)
-        else:
-            return '    0x%s      : ' % hex(chunktype)
-
-    ui.write('\n')
-    ui.write(('chunks        : ') + fmt2 % numrevs)
-    for chunktype in sorted(chunktypecounts):
-        ui.write(fmtchunktype(chunktype))
-        ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
-    ui.write(('chunks size   : ') + fmt2 % totalsize)
-    for chunktype in sorted(chunktypecounts):
-        ui.write(fmtchunktype(chunktype))
-        ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
-
-    ui.write('\n')
-    fmt = dfmtstr(max(avgchainlen, compratio))
-    ui.write(('avg chain length  : ') + fmt % avgchainlen)
-    ui.write(('max chain length  : ') + fmt % maxchainlen)
-    ui.write(('compression ratio : ') + fmt % compratio)
-
-    if format > 0:
-        ui.write('\n')
-        ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
-                 % tuple(datasize))
-    ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
-             % tuple(fullsize))
-    ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
-             % tuple(deltasize))
-
-    if numdeltas > 0:
-        ui.write('\n')
-        fmt = pcfmtstr(numdeltas)
-        fmt2 = pcfmtstr(numdeltas, 4)
-        ui.write(('deltas against prev  : ') + fmt % pcfmt(numprev, numdeltas))
-        if numprev > 0:
-            ui.write(('    where prev = p1  : ') + fmt2 % pcfmt(nump1prev,
-                                                              numprev))
-            ui.write(('    where prev = p2  : ') + fmt2 % pcfmt(nump2prev,
-                                                              numprev))
-            ui.write(('    other            : ') + fmt2 % pcfmt(numoprev,
-                                                              numprev))
-        if gdelta:
-            ui.write(('deltas against p1    : ')
-                     + fmt % pcfmt(nump1, numdeltas))
-            ui.write(('deltas against p2    : ')
-                     + fmt % pcfmt(nump2, numdeltas))
-            ui.write(('deltas against other : ') + fmt % pcfmt(numother,
-                                                             numdeltas))
-
-@command('debugrevspec',
-    [('', 'optimize', None,
-      _('print parsed tree after optimizing (DEPRECATED)')),
-     ('p', 'show-stage', [],
-      _('print parsed tree at the given stage'), _('NAME')),
-     ('', 'no-optimized', False, _('evaluate tree without optimization')),
-     ('', 'verify-optimized', False, _('verify optimized result')),
-     ],
-    ('REVSPEC'))
-def debugrevspec(ui, repo, expr, **opts):
-    """parse and apply a revision specification
-
-    Use -p/--show-stage option to print the parsed tree at the given stages.
-    Use -p all to print tree at every stage.
-
-    Use --verify-optimized to compare the optimized result with the unoptimized
-    one. Returns 1 if the optimized result differs.
-    """
-    stages = [
-        ('parsed', lambda tree: tree),
-        ('expanded', lambda tree: revset.expandaliases(ui, tree)),
-        ('concatenated', revset.foldconcat),
-        ('analyzed', revset.analyze),
-        ('optimized', revset.optimize),
-    ]
-    if opts['no_optimized']:
-        stages = stages[:-1]
-    if opts['verify_optimized'] and opts['no_optimized']:
-        raise error.Abort(_('cannot use --verify-optimized with '
-                            '--no-optimized'))
-    stagenames = set(n for n, f in stages)
-
-    showalways = set()
-    showchanged = set()
-    if ui.verbose and not opts['show_stage']:
-        # show parsed tree by --verbose (deprecated)
-        showalways.add('parsed')
-        showchanged.update(['expanded', 'concatenated'])
-        if opts['optimize']:
-            showalways.add('optimized')
-    if opts['show_stage'] and opts['optimize']:
-        raise error.Abort(_('cannot use --optimize with --show-stage'))
-    if opts['show_stage'] == ['all']:
-        showalways.update(stagenames)
-    else:
-        for n in opts['show_stage']:
-            if n not in stagenames:
-                raise error.Abort(_('invalid stage name: %s') % n)
-        showalways.update(opts['show_stage'])
-
-    treebystage = {}
-    printedtree = None
-    tree = revset.parse(expr, lookup=repo.__contains__)
-    for n, f in stages:
-        treebystage[n] = tree = f(tree)
-        if n in showalways or (n in showchanged and tree != printedtree):
-            if opts['show_stage'] or n != 'parsed':
-                ui.write(("* %s:\n") % n)
-            ui.write(revset.prettyformat(tree), "\n")
-            printedtree = tree
-
-    if opts['verify_optimized']:
-        arevs = revset.makematcher(treebystage['analyzed'])(repo)
-        brevs = revset.makematcher(treebystage['optimized'])(repo)
-        if ui.verbose:
-            ui.note(("* analyzed set:\n"), revset.prettyformatset(arevs), "\n")
-            ui.note(("* optimized set:\n"), revset.prettyformatset(brevs), "\n")
-        arevs = list(arevs)
-        brevs = list(brevs)
-        if arevs == brevs:
-            return 0
-        ui.write(('--- analyzed\n'), label='diff.file_a')
-        ui.write(('+++ optimized\n'), label='diff.file_b')
-        sm = difflib.SequenceMatcher(None, arevs, brevs)
-        for tag, alo, ahi, blo, bhi in sm.get_opcodes():
-            if tag in ('delete', 'replace'):
-                for c in arevs[alo:ahi]:
-                    ui.write('-%s\n' % c, label='diff.deleted')
-            if tag in ('insert', 'replace'):
-                for c in brevs[blo:bhi]:
-                    ui.write('+%s\n' % c, label='diff.inserted')
-            if tag == 'equal':
-                for c in arevs[alo:ahi]:
-                    ui.write(' %s\n' % c)
-        return 1
-
-    func = revset.makematcher(tree)
-    revs = func(repo)
-    if ui.verbose:
-        ui.note(("* set:\n"), revset.prettyformatset(revs), "\n")
-    for c in revs:
-        ui.write("%s\n" % c)
-
-@command('debugsetparents', [], _('REV1 [REV2]'))
-def debugsetparents(ui, repo, rev1, rev2=None):
-    """manually set the parents of the current working directory
-
-    This is useful for writing repository conversion tools, but should
-    be used with care. For example, neither the working directory nor the
-    dirstate is updated, so file status may be incorrect after running this
-    command.
-
-    Returns 0 on success.
-    """
-
-    r1 = scmutil.revsingle(repo, rev1).node()
-    r2 = scmutil.revsingle(repo, rev2, 'null').node()
-
-    with repo.wlock():
-        repo.setparents(r1, r2)
-
-@command('debugdirstate|debugstate',
-    [('', 'nodates', None, _('do not display the saved mtime')),
-    ('', 'datesort', None, _('sort by saved mtime'))],
-    _('[OPTION]...'))
-def debugstate(ui, repo, **opts):
-    """show the contents of the current dirstate"""
-
-    nodates = opts.get('nodates')
-    datesort = opts.get('datesort')
-
-    timestr = ""
-    if datesort:
-        keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
-    else:
-        keyfunc = None # sort by filename
-    for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
-        if ent[3] == -1:
-            timestr = 'unset               '
-        elif nodates:
-            timestr = 'set                 '
-        else:
-            timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
-                                    time.localtime(ent[3]))
-        if ent[1] & 0o20000:
-            mode = 'lnk'
-        else:
-            mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
-        ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
-    for f in repo.dirstate.copies():
-        ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
-
-@command('debugsub',
-    [('r', 'rev', '',
-     _('revision to check'), _('REV'))],
-    _('[-r REV] [REV]'))
-def debugsub(ui, repo, rev=None):
-    ctx = scmutil.revsingle(repo, rev, None)
-    for k, v in sorted(ctx.substate.items()):
-        ui.write(('path %s\n') % k)
-        ui.write((' source   %s\n') % v[0])
-        ui.write((' revision %s\n') % v[1])
-
-@command('debugsuccessorssets',
-    [],
-    _('[REV]'))
-def debugsuccessorssets(ui, repo, *revs):
-    """show set of successors for revision
-
-    A successors set of changeset A is a consistent group of revisions that
-    succeed A. It contains non-obsolete changesets only.
-
-    In most cases a changeset A has a single successors set containing a single
-    successor (changeset A replaced by A').
-
-    A changeset that is made obsolete with no successors are called "pruned".
-    Such changesets have no successors sets at all.
-
-    A changeset that has been "split" will have a successors set containing
-    more than one successor.
-
-    A changeset that has been rewritten in multiple different ways is called
-    "divergent". Such changesets have multiple successor sets (each of which
-    may also be split, i.e. have multiple successors).
-
-    Results are displayed as follows::
-
-        <rev1>
-            <successors-1A>
-        <rev2>
-            <successors-2A>
-            <successors-2B1> <successors-2B2> <successors-2B3>
-
-    Here rev2 has two possible (i.e. divergent) successors sets. The first
-    holds one element, whereas the second holds three (i.e. the changeset has
-    been split).
-    """
-    # passed to successorssets caching computation from one call to another
-    cache = {}
-    ctx2str = str
-    node2str = short
-    if ui.debug():
-        def ctx2str(ctx):
-            return ctx.hex()
-        node2str = hex
-    for rev in scmutil.revrange(repo, revs):
-        ctx = repo[rev]
-        ui.write('%s\n'% ctx2str(ctx))
-        for succsset in obsolete.successorssets(repo, ctx.node(), cache):
-            if succsset:
-                ui.write('    ')
-                ui.write(node2str(succsset[0]))
-                for node in succsset[1:]:
-                    ui.write(' ')
-                    ui.write(node2str(node))
-            ui.write('\n')
-
-@command('debugtemplate',
-    [('r', 'rev', [], _('apply template on changesets'), _('REV')),
-     ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
-    _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
-    optionalrepo=True)
-def debugtemplate(ui, repo, tmpl, **opts):
-    """parse and apply a template
-
-    If -r/--rev is given, the template is processed as a log template and
-    applied to the given changesets. Otherwise, it is processed as a generic
-    template.
-
-    Use --verbose to print the parsed tree.
-    """
-    revs = None
-    if opts['rev']:
-        if repo is None:
-            raise error.RepoError(_('there is no Mercurial repository here '
-                                    '(.hg not found)'))
-        revs = scmutil.revrange(repo, opts['rev'])
-
-    props = {}
-    for d in opts['define']:
-        try:
-            k, v = (e.strip() for e in d.split('=', 1))
-            if not k:
-                raise ValueError
-            props[k] = v
-        except ValueError:
-            raise error.Abort(_('malformed keyword definition: %s') % d)
-
-    if ui.verbose:
-        aliases = ui.configitems('templatealias')
-        tree = templater.parse(tmpl)
-        ui.note(templater.prettyformat(tree), '\n')
-        newtree = templater.expandaliases(tree, aliases)
-        if newtree != tree:
-            ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
-
-    mapfile = None
-    if revs is None:
-        k = 'debugtemplate'
-        t = formatter.maketemplater(ui, k, tmpl)
-        ui.write(templater.stringify(t(k, **props)))
-    else:
-        displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
-                                                mapfile, buffered=False)
-        for r in revs:
-            displayer.show(repo[r], **props)
-        displayer.close()
-
-@command('debugwalk', walkopts, _('[OPTION]... [FILE]...'), inferrepo=True)
-def debugwalk(ui, repo, *pats, **opts):
-    """show how files match on given patterns"""
-    m = scmutil.match(repo[None], pats, opts)
-    items = list(repo.walk(m))
-    if not items:
-        return
-    f = lambda fn: fn
-    if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
-        f = lambda fn: util.normpath(fn)
-    fmt = 'f  %%-%ds  %%-%ds  %%s' % (
-        max([len(abs) for abs in items]),
-        max([len(m.rel(abs)) for abs in items]))
-    for abs in items:
-        line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
-        ui.write("%s\n" % line.rstrip())
-
-@command('debugwireargs',
-    [('', 'three', '', 'three'),
-    ('', 'four', '', 'four'),
-    ('', 'five', '', 'five'),
-    ] + remoteopts,
-    _('REPO [OPTIONS]... [ONE [TWO]]'),
-    norepo=True)
-def debugwireargs(ui, repopath, *vals, **opts):
-    repo = hg.peer(ui, opts, repopath)
-    for opt in remoteopts:
-        del opts[opt[1]]
-    args = {}
-    for k, v in opts.iteritems():
-        if v:
-            args[k] = v
-    # run twice to check that we don't mess up the stream for the next command
-    res1 = repo.debugwireargs(*vals, **args)
-    res2 = repo.debugwireargs(*vals, **args)
-    ui.write("%s\n" % res1)
-    if res1 != res2:
-        ui.warn("%s\n" % res2)
-
 @command('^diff',
     [('r', 'rev', [], _('revision'), _('REV')),
     ('c', 'change', '', _('change made by revision'), _('REV'))
@@ -3119,6 +1945,7 @@
 
     diffopts = patch.diffallopts(ui, opts)
     m = scmutil.match(repo[node2], pats, opts)
+    ui.pager('diff')
     cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
                            listsubrepos=opts.get('subrepos'),
                            root=opts.get('root'))
@@ -3200,6 +2027,7 @@
         ui.note(_('exporting patches:\n'))
     else:
         ui.note(_('exporting patch:\n'))
+    ui.pager('export')
     cmdutil.export(repo, revs, template=opts.get('output'),
                  switch_parent=opts.get('switch_parent'),
                  opts=patch.diffallopts(ui, opts))
@@ -3261,6 +2089,7 @@
     fmt = '%s' + end
 
     m = scmutil.match(ctx, pats, opts)
+    ui.pager('files')
     with ui.formatter('files', opts) as fm:
         return cmdutil.files(ui, ctx, m, fm, fmt, opts.get('subrepos'))
 
@@ -3782,6 +2611,7 @@
                 except error.LookupError:
                     pass
 
+    ui.pager('grep')
     fm = ui.formatter('grep', opts)
     for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
         rev = ctx.rev()
@@ -3897,11 +2727,6 @@
     Returns 0 if successful.
     """
 
-    textwidth = ui.configint('ui', 'textwidth', 78)
-    termwidth = ui.termwidth() - 2
-    if textwidth <= 0 or termwidth < textwidth:
-        textwidth = termwidth
-
     keep = opts.get('system') or []
     if len(keep) == 0:
         if pycompat.sysplatform.startswith('win'):
@@ -3916,36 +2741,8 @@
     if ui.verbose:
         keep.append('verbose')
 
-    section = None
-    subtopic = None
-    if name and '.' in name:
-        name, remaining = name.split('.', 1)
-        remaining = encoding.lower(remaining)
-        if '.' in remaining:
-            subtopic, section = remaining.split('.', 1)
-        else:
-            if name in help.subtopics:
-                subtopic = remaining
-            else:
-                section = remaining
-
-    text = help.help_(ui, name, subtopic=subtopic, **opts)
-
-    formatted, pruned = minirst.format(text, textwidth, keep=keep,
-                                       section=section)
-
-    # We could have been given a weird ".foo" section without a name
-    # to look for, or we could have simply failed to found "foo.bar"
-    # because bar isn't a section of foo
-    if section and not (formatted and name):
-        raise error.Abort(_("help section not found"))
-
-    if 'verbose' in pruned:
-        keep.append('omitted')
-    else:
-        keep.append('notomitted')
-    formatted, pruned = minirst.format(text, textwidth, keep=keep,
-                                       section=section)
+    formatted = help.formattedhelp(ui, name, keep=keep, **opts)
+    ui.pager('help')
     ui.write(formatted)
 
 
@@ -4127,8 +2924,9 @@
     Import a list of patches and commit them individually (unless
     --no-commit is specified).
 
-    To read a patch from standard input, use "-" as the patch name. If
-    a URL is specified, the patch will be downloaded from there.
+    To read a patch from standard input (stdin), use "-" as the patch
+    name. If a URL is specified, the patch will be downloaded from
+    there.
 
     Import first applies changes to the working directory (unless
     --bypass is specified), import will abort if there are outstanding
@@ -4198,6 +2996,10 @@
 
           hg import incoming-patches.mbox
 
+      - import patches from stdin::
+
+          hg import -
+
       - attempt to exactly restore an exported changeset (not always
         possible)::
 
@@ -4392,6 +3194,7 @@
         if 'bookmarks' not in other.listkeys('namespaces'):
             ui.warn(_("remote doesn't support bookmarks\n"))
             return 0
+        ui.pager('incoming')
         ui.status(_('comparing with %s\n') % util.hidepassword(source))
         return bookmarks.incoming(ui, repo, other)
 
@@ -4458,6 +3261,7 @@
     m = scmutil.match(ctx, pats, opts, default='relglob',
                       badfn=lambda x, y: False)
 
+    ui.pager('locate')
     for abs in ctx.matches(m):
         if opts.get('fullpath'):
             ui.write(repo.wjoin(abs), end)
@@ -4589,7 +3393,7 @@
 
     """
     if opts.get('follow') and opts.get('rev'):
-        opts['rev'] = [revset.formatspec('reverse(::%lr)', opts.get('rev'))]
+        opts['rev'] = [revsetlang.formatspec('reverse(::%lr)', opts.get('rev'))]
         del opts['follow']
 
     if opts.get('graph'):
@@ -4606,6 +3410,7 @@
             endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
         getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
 
+    ui.pager('log')
     displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
     for rev in revs:
         if count == limit:
@@ -4648,7 +3453,6 @@
 
     Returns 0 on success.
     """
-
     fm = ui.formatter('manifest', opts)
 
     if opts.get('all'):
@@ -4664,6 +3468,7 @@
             for fn, b, size in repo.store.datafiles():
                 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
                     res.append(fn[plen:-slen])
+        ui.pager('manifest')
         for f in res:
             fm.startitem()
             fm.write("path", '%s\n', f)
@@ -4680,6 +3485,7 @@
     mode = {'l': '644', 'x': '755', '': '644'}
     ctx = scmutil.revsingle(repo, node)
     mf = ctx.manifest()
+    ui.pager('manifest')
     for f in ctx:
         fm.startitem()
         fl = ctx[f].flags()
@@ -4812,6 +3618,7 @@
             return
 
         revdag = cmdutil.graphrevs(repo, o, opts)
+        ui.pager('outgoing')
         displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
         cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges)
         cmdutil.outgoinghooks(ui, repo, other, opts, o)
@@ -4825,6 +3632,7 @@
             ui.warn(_("remote doesn't support bookmarks\n"))
             return 0
         ui.status(_('comparing with %s\n') % util.hidepassword(dest))
+        ui.pager('outgoing')
         return bookmarks.outgoing(ui, repo, other)
 
     repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
@@ -4921,6 +3729,7 @@
 
     Returns 0 on success.
     """
+    ui.pager('paths')
     if search:
         pathitems = [(name, path) for name, path in ui.paths.iteritems()
                      if name == search]
@@ -5268,7 +4077,7 @@
     elif path.pushrev:
         # It doesn't make any sense to specify ancestor revisions. So limit
         # to DAG heads to make discovery simpler.
-        expr = revset.formatspec('heads(%r)', path.pushrev)
+        expr = revsetlang.formatspec('heads(%r)', path.pushrev)
         revs = scmutil.revrange(repo, [expr])
         revs = [repo[rev].node() for rev in revs]
         if not revs:
@@ -5434,6 +4243,8 @@
 
     - :hg:`resolve -l`: list files which had or still have conflicts.
       In the printed list, ``U`` = unresolved and ``R`` = resolved.
+      You can use ``set:unresolved()`` or ``set:resolved()`` to filter
+      the list. See :hg:`help filesets` for details.
 
     .. note::
 
@@ -5457,6 +4268,7 @@
                          hint=('use --all to re-merge all unresolved files'))
 
     if show:
+        ui.pager('resolve')
         fm = ui.formatter('resolve', opts)
         ms = mergemod.mergestate.read(repo)
         m = scmutil.match(repo[None], pats, opts)
@@ -5780,8 +4592,8 @@
     ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
      _('FILE')),
     ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
-    ('', 'stdio', None, _('for remote clients')),
-    ('', 'cmdserver', '', _('for remote clients'), _('MODE')),
+    ('', 'stdio', None, _('for remote clients (ADVANCED)')),
+    ('', 'cmdserver', '', _('for remote clients (ADVANCED)'), _('MODE')),
     ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
     ('', 'style', '', _('template style to use'), _('STYLE')),
     ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
@@ -5946,6 +4758,7 @@
         or ui.configbool('ui', 'statuscopies')) and not opts.get('no_status'):
         copy = copies.pathcopies(repo[node1], repo[node2], m)
 
+    ui.pager('status')
     fm = ui.formatter('status', opts)
     fmt = '%s' + end
     showchar = not opts.get('no_status')
@@ -5976,6 +4789,7 @@
     Returns 0 on success.
     """
 
+    ui.pager('summary')
     ctx = repo[None]
     parents = ctx.parents()
     pnode = parents[0].node()
@@ -6368,6 +5182,7 @@
     Returns 0 on success.
     """
 
+    ui.pager('tags')
     fm = ui.formatter('tags', opts)
     hexfunc = fm.hexfunc
     tagtype = ""
@@ -6467,7 +5282,7 @@
     ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
     ('r', 'rev', '', _('revision'), _('REV'))
      ] + mergetoolopts,
-    _('[-c] [-C] [-d DATE] [[-r] REV]'))
+    _('[-C|-c] [-d DATE] [[-r] REV]'))
 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False,
            tool=None):
     """update working directory (or switch revisions)
@@ -6488,10 +5303,11 @@
 
     .. container:: verbose
 
-      The following rules apply when the working directory contains
-      uncommitted changes:
-
-      1. If neither -c/--check nor -C/--clean is specified, and if
+      The -C/--clean and -c/--check options control what happens if the
+      working directory contains uncommitted changes.
+      At most of one of them can be specified.
+
+      1. If no option is specified, and if
          the requested changeset is an ancestor or descendant of
          the working directory's parent, the uncommitted changes
          are merged into the requested changeset and the merged
@@ -6541,9 +5357,6 @@
         brev = rev
         rev = scmutil.revsingle(repo, rev, rev).rev()
 
-        if check:
-            cmdutil.bailifchanged(repo, merge=False)
-
         repo.ui.setconfig('ui', 'forcemerge', tool, 'update')
 
         return hg.updatetotally(ui, repo, rev, brev, clean=clean, check=check)
@@ -6570,6 +5383,8 @@
 @command('version', [] + formatteropts, norepo=True)
 def version_(ui, **opts):
     """output version and copyright information"""
+    if ui.verbose:
+        ui.pager('version')
     fm = ui.formatter("version", opts)
     fm.startitem()
     fm.write("ver", _("Mercurial Distributed SCM (version %s)\n"),
--- a/mercurial/commandserver.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/commandserver.py	Tue Feb 28 11:13:25 2017 -0800
@@ -304,8 +304,8 @@
     ui.flush()
     newfiles = []
     nullfd = os.open(os.devnull, os.O_RDWR)
-    for f, sysf, mode in [(ui.fin, util.stdin, 'rb'),
-                          (ui.fout, util.stdout, 'wb')]:
+    for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')),
+                          (ui.fout, util.stdout, pycompat.sysstr('wb'))]:
         if f is sysf:
             newfd = os.dup(f.fileno())
             os.dup2(nullfd, f.fileno())
@@ -447,6 +447,7 @@
         self._sock = None
         self._oldsigchldhandler = None
         self._workerpids = set()  # updated by signal handler; do not iterate
+        self._socketunlinked = None
 
     def init(self):
         self._sock = socket.socket(socket.AF_UNIX)
@@ -455,11 +456,17 @@
         o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
         self._oldsigchldhandler = o
         self._servicehandler.printbanner(self.address)
+        self._socketunlinked = False
+
+    def _unlinksocket(self):
+        if not self._socketunlinked:
+            self._servicehandler.unlinksocket(self.address)
+            self._socketunlinked = True
 
     def _cleanup(self):
         signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
         self._sock.close()
-        self._servicehandler.unlinksocket(self.address)
+        self._unlinksocket()
         # don't kill child processes as they have active clients, just wait
         self._reapworkers(0)
 
@@ -470,11 +477,23 @@
             self._cleanup()
 
     def _mainloop(self):
+        exiting = False
         h = self._servicehandler
-        while not h.shouldexit():
+        while True:
+            if not exiting and h.shouldexit():
+                # clients can no longer connect() to the domain socket, so
+                # we stop queuing new requests.
+                # for requests that are queued (connect()-ed, but haven't been
+                # accept()-ed), handle them before exit. otherwise, clients
+                # waiting for recv() will receive ECONNRESET.
+                self._unlinksocket()
+                exiting = True
             try:
                 ready = select.select([self._sock], [], [], h.pollinterval)[0]
                 if not ready:
+                    # only exit if we completed all queued requests
+                    if exiting:
+                        break
                     continue
                 conn, _addr = self._sock.accept()
             except (select.error, socket.error) as inst:
--- a/mercurial/context.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/context.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1166,7 +1166,7 @@
     diffinrange = any(stype == '!' for _, stype in filteredblocks)
     return diffinrange, linerange1
 
-def blockancestors(fctx, fromline, toline):
+def blockancestors(fctx, fromline, toline, followfirst=False):
     """Yield ancestors of `fctx` with respect to the block of lines within
     `fromline`-`toline` range.
     """
@@ -1175,9 +1175,11 @@
     while visit:
         c, linerange2 = visit.pop(max(visit))
         pl = c.parents()
+        if followfirst:
+            pl = pl[:1]
         if not pl:
             # The block originates from the initial revision.
-            yield c
+            yield c, linerange2
             continue
         inrange = False
         for p in pl:
@@ -1190,7 +1192,7 @@
                 continue
             visit[p.linkrev(), p.filenode()] = p, linerange1
         if inrange:
-            yield c
+            yield c, linerange2
 
 class committablectx(basectx):
     """A committablectx object provides common functionality for a context that
--- a/mercurial/crecord.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/crecord.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1375,7 +1375,8 @@
             pass
         helpwin.refresh()
         try:
-            helpwin.getkey()
+            with self.ui.timeblockedsection('crecord'):
+                helpwin.getkey()
         except curses.error:
             pass
 
@@ -1392,7 +1393,8 @@
         self.stdscr.refresh()
         confirmwin.refresh()
         try:
-            response = chr(self.stdscr.getch())
+            with self.ui.timeblockedsection('crecord'):
+                response = chr(self.stdscr.getch())
         except ValueError:
             response = None
 
@@ -1412,7 +1414,8 @@
 
 are you sure you want to review/edit and confirm the selected changes [yn]?
 """)
-        response = self.confirmationwindow(confirmtext)
+        with self.ui.timeblockedsection('crecord'):
+            response = self.confirmationwindow(confirmtext)
         if response is None:
             response = "n"
         if response.lower().startswith("y"):
@@ -1655,7 +1658,8 @@
         while True:
             self.updatescreen()
             try:
-                keypressed = self.statuswin.getkey()
+                with self.ui.timeblockedsection('crecord'):
+                    keypressed = self.statuswin.getkey()
                 if self.errorstr is not None:
                     self.errorstr = None
                     continue
--- a/mercurial/debugcommands.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/debugcommands.py	Tue Feb 28 11:13:25 2017 -0800
@@ -7,15 +7,24 @@
 
 from __future__ import absolute_import
 
+import difflib
+import errno
 import operator
 import os
 import random
+import socket
+import string
+import sys
+import tempfile
+import time
 
 from .i18n import _
 from .node import (
     bin,
     hex,
+    nullhex,
     nullid,
+    nullrev,
     short,
 )
 from . import (
@@ -26,20 +35,31 @@
     context,
     dagparser,
     dagutil,
+    encoding,
     error,
     exchange,
     extensions,
     fileset,
+    formatter,
     hg,
     localrepo,
     lock as lockmod,
+    merge as mergemod,
+    obsolete,
+    policy,
+    pvec,
     pycompat,
     repair,
     revlog,
+    revset,
+    revsetlang,
     scmutil,
     setdiscovery,
     simplemerge,
+    smartset,
+    sslutil,
     streamclone,
+    templater,
     treediscovery,
     util,
 )
@@ -567,6 +587,37 @@
 
     fm.end()
 
+@command('debugdirstate|debugstate',
+    [('', 'nodates', None, _('do not display the saved mtime')),
+    ('', 'datesort', None, _('sort by saved mtime'))],
+    _('[OPTION]...'))
+def debugstate(ui, repo, **opts):
+    """show the contents of the current dirstate"""
+
+    nodates = opts.get('nodates')
+    datesort = opts.get('datesort')
+
+    timestr = ""
+    if datesort:
+        keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
+    else:
+        keyfunc = None # sort by filename
+    for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
+        if ent[3] == -1:
+            timestr = 'unset               '
+        elif nodates:
+            timestr = 'set                 '
+        else:
+            timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
+                                    time.localtime(ent[3]))
+        if ent[1] & 0o20000:
+            mode = 'lnk'
+        else:
+            mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
+        ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
+    for f in repo.dirstate.copies():
+        ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
+
 @command('debugdiscovery',
     [('', 'old', None, _('use old-style discovery')),
     ('', 'nonheads', None,
@@ -641,7 +692,7 @@
     fm = ui.formatter('debugextensions', opts)
     for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
         isinternal = extensions.ismoduleinternal(extmod)
-        extsource = extmod.__file__
+        extsource = pycompat.fsencode(extmod.__file__)
         if isinternal:
             exttestedwith = []  # never expose magic string to users
         else:
@@ -851,6 +902,1106 @@
             ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
     ui.write("}\n")
 
+@command('debuginstall', [] + commands.formatteropts, '', norepo=True)
+def debuginstall(ui, **opts):
+    '''test Mercurial installation
+
+    Returns 0 on success.
+    '''
+
+    def writetemp(contents):
+        (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
+        f = os.fdopen(fd, pycompat.sysstr("wb"))
+        f.write(contents)
+        f.close()
+        return name
+
+    problems = 0
+
+    fm = ui.formatter('debuginstall', opts)
+    fm.startitem()
+
+    # encoding
+    fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
+    err = None
+    try:
+        encoding.fromlocal("test")
+    except error.Abort as inst:
+        err = inst
+        problems += 1
+    fm.condwrite(err, 'encodingerror', _(" %s\n"
+                 " (check that your locale is properly set)\n"), err)
+
+    # Python
+    fm.write('pythonexe', _("checking Python executable (%s)\n"),
+             pycompat.sysexecutable)
+    fm.write('pythonver', _("checking Python version (%s)\n"),
+             ("%d.%d.%d" % sys.version_info[:3]))
+    fm.write('pythonlib', _("checking Python lib (%s)...\n"),
+             os.path.dirname(pycompat.fsencode(os.__file__)))
+
+    security = set(sslutil.supportedprotocols)
+    if sslutil.hassni:
+        security.add('sni')
+
+    fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
+             fm.formatlist(sorted(security), name='protocol',
+                           fmt='%s', sep=','))
+
+    # These are warnings, not errors. So don't increment problem count. This
+    # may change in the future.
+    if 'tls1.2' not in security:
+        fm.plain(_('  TLS 1.2 not supported by Python install; '
+                   'network connections lack modern security\n'))
+    if 'sni' not in security:
+        fm.plain(_('  SNI not supported by Python install; may have '
+                   'connectivity issues with some servers\n'))
+
+    # TODO print CA cert info
+
+    # hg version
+    hgver = util.version()
+    fm.write('hgver', _("checking Mercurial version (%s)\n"),
+             hgver.split('+')[0])
+    fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
+             '+'.join(hgver.split('+')[1:]))
+
+    # compiled modules
+    fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
+             policy.policy)
+    fm.write('hgmodules', _("checking installed modules (%s)...\n"),
+             os.path.dirname(pycompat.fsencode(__file__)))
+
+    err = None
+    try:
+        from . import (
+            base85,
+            bdiff,
+            mpatch,
+            osutil,
+        )
+        dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
+    except Exception as inst:
+        err = inst
+        problems += 1
+    fm.condwrite(err, 'extensionserror', " %s\n", err)
+
+    compengines = util.compengines._engines.values()
+    fm.write('compengines', _('checking registered compression engines (%s)\n'),
+             fm.formatlist(sorted(e.name() for e in compengines),
+                           name='compengine', fmt='%s', sep=', '))
+    fm.write('compenginesavail', _('checking available compression engines '
+                                   '(%s)\n'),
+             fm.formatlist(sorted(e.name() for e in compengines
+                                  if e.available()),
+                           name='compengine', fmt='%s', sep=', '))
+    wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
+    fm.write('compenginesserver', _('checking available compression engines '
+                                    'for wire protocol (%s)\n'),
+             fm.formatlist([e.name() for e in wirecompengines
+                            if e.wireprotosupport()],
+                           name='compengine', fmt='%s', sep=', '))
+
+    # templates
+    p = templater.templatepaths()
+    fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
+    fm.condwrite(not p, '', _(" no template directories found\n"))
+    if p:
+        m = templater.templatepath("map-cmdline.default")
+        if m:
+            # template found, check if it is working
+            err = None
+            try:
+                templater.templater.frommapfile(m)
+            except Exception as inst:
+                err = inst
+                p = None
+            fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
+        else:
+            p = None
+        fm.condwrite(p, 'defaulttemplate',
+                     _("checking default template (%s)\n"), m)
+        fm.condwrite(not m, 'defaulttemplatenotfound',
+                     _(" template '%s' not found\n"), "default")
+    if not p:
+        problems += 1
+    fm.condwrite(not p, '',
+                 _(" (templates seem to have been installed incorrectly)\n"))
+
+    # editor
+    editor = ui.geteditor()
+    editor = util.expandpath(editor)
+    fm.write('editor', _("checking commit editor... (%s)\n"), editor)
+    cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
+    fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
+                 _(" No commit editor set and can't find %s in PATH\n"
+                   " (specify a commit editor in your configuration"
+                   " file)\n"), not cmdpath and editor == 'vi' and editor)
+    fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
+                 _(" Can't find editor '%s' in PATH\n"
+                   " (specify a commit editor in your configuration"
+                   " file)\n"), not cmdpath and editor)
+    if not cmdpath and editor != 'vi':
+        problems += 1
+
+    # check username
+    username = None
+    err = None
+    try:
+        username = ui.username()
+    except error.Abort as e:
+        err = e
+        problems += 1
+
+    fm.condwrite(username, 'username',  _("checking username (%s)\n"), username)
+    fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
+        " (specify a username in your configuration file)\n"), err)
+
+    fm.condwrite(not problems, '',
+                 _("no problems detected\n"))
+    if not problems:
+        fm.data(problems=problems)
+    fm.condwrite(problems, 'problems',
+                 _("%d problems detected,"
+                   " please check your install!\n"), problems)
+    fm.end()
+
+    return problems
+
+@command('debugknown', [], _('REPO ID...'), norepo=True)
+def debugknown(ui, repopath, *ids, **opts):
+    """test whether node ids are known to a repo
+
+    Every ID must be a full-length hex node id string. Returns a list of 0s
+    and 1s indicating unknown/known.
+    """
+    repo = hg.peer(ui, opts, repopath)
+    if not repo.capable('known'):
+        raise error.Abort("known() not supported by target repository")
+    flags = repo.known([bin(s) for s in ids])
+    ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
+
+@command('debuglabelcomplete', [], _('LABEL...'))
+def debuglabelcomplete(ui, repo, *args):
+    '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
+    commands.debugnamecomplete(ui, repo, *args)
+
+@command('debuglocks',
+         [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
+          ('W', 'force-wlock', None,
+           _('free the working state lock (DANGEROUS)'))],
+         _('[OPTION]...'))
+def debuglocks(ui, repo, **opts):
+    """show or modify state of locks
+
+    By default, this command will show which locks are held. This
+    includes the user and process holding the lock, the amount of time
+    the lock has been held, and the machine name where the process is
+    running if it's not local.
+
+    Locks protect the integrity of Mercurial's data, so should be
+    treated with care. System crashes or other interruptions may cause
+    locks to not be properly released, though Mercurial will usually
+    detect and remove such stale locks automatically.
+
+    However, detecting stale locks may not always be possible (for
+    instance, on a shared filesystem). Removing locks may also be
+    blocked by filesystem permissions.
+
+    Returns 0 if no locks are held.
+
+    """
+
+    if opts.get('force_lock'):
+        repo.svfs.unlink('lock')
+    if opts.get('force_wlock'):
+        repo.vfs.unlink('wlock')
+    if opts.get('force_lock') or opts.get('force_lock'):
+        return 0
+
+    now = time.time()
+    held = 0
+
+    def report(vfs, name, method):
+        # this causes stale locks to get reaped for more accurate reporting
+        try:
+            l = method(False)
+        except error.LockHeld:
+            l = None
+
+        if l:
+            l.release()
+        else:
+            try:
+                stat = vfs.lstat(name)
+                age = now - stat.st_mtime
+                user = util.username(stat.st_uid)
+                locker = vfs.readlock(name)
+                if ":" in locker:
+                    host, pid = locker.split(':')
+                    if host == socket.gethostname():
+                        locker = 'user %s, process %s' % (user, pid)
+                    else:
+                        locker = 'user %s, process %s, host %s' \
+                                 % (user, pid, host)
+                ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
+                return 1
+            except OSError as e:
+                if e.errno != errno.ENOENT:
+                    raise
+
+        ui.write(("%-6s free\n") % (name + ":"))
+        return 0
+
+    held += report(repo.svfs, "lock", repo.lock)
+    held += report(repo.vfs, "wlock", repo.wlock)
+
+    return held
+
+@command('debugmergestate', [], '')
+def debugmergestate(ui, repo, *args):
+    """print merge state
+
+    Use --verbose to print out information about whether v1 or v2 merge state
+    was chosen."""
+    def _hashornull(h):
+        if h == nullhex:
+            return 'null'
+        else:
+            return h
+
+    def printrecords(version):
+        ui.write(('* version %s records\n') % version)
+        if version == 1:
+            records = v1records
+        else:
+            records = v2records
+
+        for rtype, record in records:
+            # pretty print some record types
+            if rtype == 'L':
+                ui.write(('local: %s\n') % record)
+            elif rtype == 'O':
+                ui.write(('other: %s\n') % record)
+            elif rtype == 'm':
+                driver, mdstate = record.split('\0', 1)
+                ui.write(('merge driver: %s (state "%s")\n')
+                         % (driver, mdstate))
+            elif rtype in 'FDC':
+                r = record.split('\0')
+                f, state, hash, lfile, afile, anode, ofile = r[0:7]
+                if version == 1:
+                    onode = 'not stored in v1 format'
+                    flags = r[7]
+                else:
+                    onode, flags = r[7:9]
+                ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
+                         % (f, rtype, state, _hashornull(hash)))
+                ui.write(('  local path: %s (flags "%s")\n') % (lfile, flags))
+                ui.write(('  ancestor path: %s (node %s)\n')
+                         % (afile, _hashornull(anode)))
+                ui.write(('  other path: %s (node %s)\n')
+                         % (ofile, _hashornull(onode)))
+            elif rtype == 'f':
+                filename, rawextras = record.split('\0', 1)
+                extras = rawextras.split('\0')
+                i = 0
+                extrastrings = []
+                while i < len(extras):
+                    extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
+                    i += 2
+
+                ui.write(('file extras: %s (%s)\n')
+                         % (filename, ', '.join(extrastrings)))
+            elif rtype == 'l':
+                labels = record.split('\0', 2)
+                labels = [l for l in labels if len(l) > 0]
+                ui.write(('labels:\n'))
+                ui.write(('  local: %s\n' % labels[0]))
+                ui.write(('  other: %s\n' % labels[1]))
+                if len(labels) > 2:
+                    ui.write(('  base:  %s\n' % labels[2]))
+            else:
+                ui.write(('unrecognized entry: %s\t%s\n')
+                         % (rtype, record.replace('\0', '\t')))
+
+    # Avoid mergestate.read() since it may raise an exception for unsupported
+    # merge state records. We shouldn't be doing this, but this is OK since this
+    # command is pretty low-level.
+    ms = mergemod.mergestate(repo)
+
+    # sort so that reasonable information is on top
+    v1records = ms._readrecordsv1()
+    v2records = ms._readrecordsv2()
+    order = 'LOml'
+    def key(r):
+        idx = order.find(r[0])
+        if idx == -1:
+            return (1, r[1])
+        else:
+            return (0, idx)
+    v1records.sort(key=key)
+    v2records.sort(key=key)
+
+    if not v1records and not v2records:
+        ui.write(('no merge state found\n'))
+    elif not v2records:
+        ui.note(('no version 2 merge state\n'))
+        printrecords(1)
+    elif ms._v1v2match(v1records, v2records):
+        ui.note(('v1 and v2 states match: using v2\n'))
+        printrecords(2)
+    else:
+        ui.note(('v1 and v2 states mismatch: using v1\n'))
+        printrecords(1)
+        if ui.verbose:
+            printrecords(2)
+
+@command('debugnamecomplete', [], _('NAME...'))
+def debugnamecomplete(ui, repo, *args):
+    '''complete "names" - tags, open branch names, bookmark names'''
+
+    names = set()
+    # since we previously only listed open branches, we will handle that
+    # specially (after this for loop)
+    for name, ns in repo.names.iteritems():
+        if name != 'branches':
+            names.update(ns.listnames(repo))
+    names.update(tag for (tag, heads, tip, closed)
+                 in repo.branchmap().iterbranches() if not closed)
+    completions = set()
+    if not args:
+        args = ['']
+    for a in args:
+        completions.update(n for n in names if n.startswith(a))
+    ui.write('\n'.join(sorted(completions)))
+    ui.write('\n')
+
+@command('debugobsolete',
+        [('', 'flags', 0, _('markers flag')),
+         ('', 'record-parents', False,
+          _('record parent information for the precursor')),
+         ('r', 'rev', [], _('display markers relevant to REV')),
+         ('', 'index', False, _('display index of the marker')),
+         ('', 'delete', [], _('delete markers specified by indices')),
+        ] + commands.commitopts2 + commands.formatteropts,
+         _('[OBSOLETED [REPLACEMENT ...]]'))
+def debugobsolete(ui, repo, precursor=None, *successors, **opts):
+    """create arbitrary obsolete marker
+
+    With no arguments, displays the list of obsolescence markers."""
+
+    def parsenodeid(s):
+        try:
+            # We do not use revsingle/revrange functions here to accept
+            # arbitrary node identifiers, possibly not present in the
+            # local repository.
+            n = bin(s)
+            if len(n) != len(nullid):
+                raise TypeError()
+            return n
+        except TypeError:
+            raise error.Abort('changeset references must be full hexadecimal '
+                             'node identifiers')
+
+    if opts.get('delete'):
+        indices = []
+        for v in opts.get('delete'):
+            try:
+                indices.append(int(v))
+            except ValueError:
+                raise error.Abort(_('invalid index value: %r') % v,
+                                  hint=_('use integers for indices'))
+
+        if repo.currenttransaction():
+            raise error.Abort(_('cannot delete obsmarkers in the middle '
+                                'of transaction.'))
+
+        with repo.lock():
+            n = repair.deleteobsmarkers(repo.obsstore, indices)
+            ui.write(_('deleted %i obsolescence markers\n') % n)
+
+        return
+
+    if precursor is not None:
+        if opts['rev']:
+            raise error.Abort('cannot select revision when creating marker')
+        metadata = {}
+        metadata['user'] = opts['user'] or ui.username()
+        succs = tuple(parsenodeid(succ) for succ in successors)
+        l = repo.lock()
+        try:
+            tr = repo.transaction('debugobsolete')
+            try:
+                date = opts.get('date')
+                if date:
+                    date = util.parsedate(date)
+                else:
+                    date = None
+                prec = parsenodeid(precursor)
+                parents = None
+                if opts['record_parents']:
+                    if prec not in repo.unfiltered():
+                        raise error.Abort('cannot used --record-parents on '
+                                         'unknown changesets')
+                    parents = repo.unfiltered()[prec].parents()
+                    parents = tuple(p.node() for p in parents)
+                repo.obsstore.create(tr, prec, succs, opts['flags'],
+                                     parents=parents, date=date,
+                                     metadata=metadata)
+                tr.close()
+            except ValueError as exc:
+                raise error.Abort(_('bad obsmarker input: %s') % exc)
+            finally:
+                tr.release()
+        finally:
+            l.release()
+    else:
+        if opts['rev']:
+            revs = scmutil.revrange(repo, opts['rev'])
+            nodes = [repo[r].node() for r in revs]
+            markers = list(obsolete.getmarkers(repo, nodes=nodes))
+            markers.sort(key=lambda x: x._data)
+        else:
+            markers = obsolete.getmarkers(repo)
+
+        markerstoiter = markers
+        isrelevant = lambda m: True
+        if opts.get('rev') and opts.get('index'):
+            markerstoiter = obsolete.getmarkers(repo)
+            markerset = set(markers)
+            isrelevant = lambda m: m in markerset
+
+        fm = ui.formatter('debugobsolete', opts)
+        for i, m in enumerate(markerstoiter):
+            if not isrelevant(m):
+                # marker can be irrelevant when we're iterating over a set
+                # of markers (markerstoiter) which is bigger than the set
+                # of markers we want to display (markers)
+                # this can happen if both --index and --rev options are
+                # provided and thus we need to iterate over all of the markers
+                # to get the correct indices, but only display the ones that
+                # are relevant to --rev value
+                continue
+            fm.startitem()
+            ind = i if opts.get('index') else None
+            cmdutil.showmarker(fm, m, index=ind)
+        fm.end()
+
+@command('debugpathcomplete',
+         [('f', 'full', None, _('complete an entire path')),
+          ('n', 'normal', None, _('show only normal files')),
+          ('a', 'added', None, _('show only added files')),
+          ('r', 'removed', None, _('show only removed files'))],
+         _('FILESPEC...'))
+def debugpathcomplete(ui, repo, *specs, **opts):
+    '''complete part or all of a tracked path
+
+    This command supports shells that offer path name completion. It
+    currently completes only files already known to the dirstate.
+
+    Completion extends only to the next path segment unless
+    --full is specified, in which case entire paths are used.'''
+
+    def complete(path, acceptable):
+        dirstate = repo.dirstate
+        spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
+        rootdir = repo.root + pycompat.ossep
+        if spec != repo.root and not spec.startswith(rootdir):
+            return [], []
+        if os.path.isdir(spec):
+            spec += '/'
+        spec = spec[len(rootdir):]
+        fixpaths = pycompat.ossep != '/'
+        if fixpaths:
+            spec = spec.replace(pycompat.ossep, '/')
+        speclen = len(spec)
+        fullpaths = opts['full']
+        files, dirs = set(), set()
+        adddir, addfile = dirs.add, files.add
+        for f, st in dirstate.iteritems():
+            if f.startswith(spec) and st[0] in acceptable:
+                if fixpaths:
+                    f = f.replace('/', pycompat.ossep)
+                if fullpaths:
+                    addfile(f)
+                    continue
+                s = f.find(pycompat.ossep, speclen)
+                if s >= 0:
+                    adddir(f[:s])
+                else:
+                    addfile(f)
+        return files, dirs
+
+    acceptable = ''
+    if opts['normal']:
+        acceptable += 'nm'
+    if opts['added']:
+        acceptable += 'a'
+    if opts['removed']:
+        acceptable += 'r'
+    cwd = repo.getcwd()
+    if not specs:
+        specs = ['.']
+
+    files, dirs = set(), set()
+    for spec in specs:
+        f, d = complete(spec, acceptable or 'nmar')
+        files.update(f)
+        dirs.update(d)
+    files.update(dirs)
+    ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
+    ui.write('\n')
+
+@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
+def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
+    '''access the pushkey key/value protocol
+
+    With two args, list the keys in the given namespace.
+
+    With five args, set a key to new if it currently is set to old.
+    Reports success or failure.
+    '''
+
+    target = hg.peer(ui, {}, repopath)
+    if keyinfo:
+        key, old, new = keyinfo
+        r = target.pushkey(namespace, key, old, new)
+        ui.status(str(r) + '\n')
+        return not r
+    else:
+        for k, v in sorted(target.listkeys(namespace).iteritems()):
+            ui.write("%s\t%s\n" % (k.encode('string-escape'),
+                                   v.encode('string-escape')))
+
+@command('debugpvec', [], _('A B'))
+def debugpvec(ui, repo, a, b=None):
+    ca = scmutil.revsingle(repo, a)
+    cb = scmutil.revsingle(repo, b)
+    pa = pvec.ctxpvec(ca)
+    pb = pvec.ctxpvec(cb)
+    if pa == pb:
+        rel = "="
+    elif pa > pb:
+        rel = ">"
+    elif pa < pb:
+        rel = "<"
+    elif pa | pb:
+        rel = "|"
+    ui.write(_("a: %s\n") % pa)
+    ui.write(_("b: %s\n") % pb)
+    ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
+    ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
+             (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
+              pa.distance(pb), rel))
+
+@command('debugrebuilddirstate|debugrebuildstate',
+    [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
+     ('', 'minimal', None, _('only rebuild files that are inconsistent with '
+                             'the working copy parent')),
+    ],
+    _('[-r REV]'))
+def debugrebuilddirstate(ui, repo, rev, **opts):
+    """rebuild the dirstate as it would look like for the given revision
+
+    If no revision is specified the first current parent will be used.
+
+    The dirstate will be set to the files of the given revision.
+    The actual working directory content or existing dirstate
+    information such as adds or removes is not considered.
+
+    ``minimal`` will only rebuild the dirstate status for files that claim to be
+    tracked but are not in the parent manifest, or that exist in the parent
+    manifest but are not in the dirstate. It will not change adds, removes, or
+    modified files that are in the working copy parent.
+
+    One use of this command is to make the next :hg:`status` invocation
+    check the actual file content.
+    """
+    ctx = scmutil.revsingle(repo, rev)
+    with repo.wlock():
+        dirstate = repo.dirstate
+        changedfiles = None
+        # See command doc for what minimal does.
+        if opts.get('minimal'):
+            manifestfiles = set(ctx.manifest().keys())
+            dirstatefiles = set(dirstate)
+            manifestonly = manifestfiles - dirstatefiles
+            dsonly = dirstatefiles - manifestfiles
+            dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
+            changedfiles = manifestonly | dsnotadded
+
+        dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
+
+@command('debugrebuildfncache', [], '')
+def debugrebuildfncache(ui, repo):
+    """rebuild the fncache file"""
+    repair.rebuildfncache(ui, repo)
+
+@command('debugrename',
+    [('r', 'rev', '', _('revision to debug'), _('REV'))],
+    _('[-r REV] FILE'))
+def debugrename(ui, repo, file1, *pats, **opts):
+    """dump rename information"""
+
+    ctx = scmutil.revsingle(repo, opts.get('rev'))
+    m = scmutil.match(ctx, (file1,) + pats, opts)
+    for abs in ctx.walk(m):
+        fctx = ctx[abs]
+        o = fctx.filelog().renamed(fctx.filenode())
+        rel = m.rel(abs)
+        if o:
+            ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
+        else:
+            ui.write(_("%s not renamed\n") % rel)
+
+@command('debugrevlog', commands.debugrevlogopts +
+    [('d', 'dump', False, _('dump index data'))],
+    _('-c|-m|FILE'),
+    optionalrepo=True)
+def debugrevlog(ui, repo, file_=None, **opts):
+    """show data and statistics about a revlog"""
+    r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
+
+    if opts.get("dump"):
+        numrevs = len(r)
+        ui.write(("# rev p1rev p2rev start   end deltastart base   p1   p2"
+                 " rawsize totalsize compression heads chainlen\n"))
+        ts = 0
+        heads = set()
+
+        for rev in xrange(numrevs):
+            dbase = r.deltaparent(rev)
+            if dbase == -1:
+                dbase = rev
+            cbase = r.chainbase(rev)
+            clen = r.chainlen(rev)
+            p1, p2 = r.parentrevs(rev)
+            rs = r.rawsize(rev)
+            ts = ts + rs
+            heads -= set(r.parentrevs(rev))
+            heads.add(rev)
+            try:
+                compression = ts / r.end(rev)
+            except ZeroDivisionError:
+                compression = 0
+            ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
+                     "%11d %5d %8d\n" %
+                     (rev, p1, p2, r.start(rev), r.end(rev),
+                      r.start(dbase), r.start(cbase),
+                      r.start(p1), r.start(p2),
+                      rs, ts, compression, len(heads), clen))
+        return 0
+
+    v = r.version
+    format = v & 0xFFFF
+    flags = []
+    gdelta = False
+    if v & revlog.REVLOGNGINLINEDATA:
+        flags.append('inline')
+    if v & revlog.REVLOGGENERALDELTA:
+        gdelta = True
+        flags.append('generaldelta')
+    if not flags:
+        flags = ['(none)']
+
+    nummerges = 0
+    numfull = 0
+    numprev = 0
+    nump1 = 0
+    nump2 = 0
+    numother = 0
+    nump1prev = 0
+    nump2prev = 0
+    chainlengths = []
+
+    datasize = [None, 0, 0]
+    fullsize = [None, 0, 0]
+    deltasize = [None, 0, 0]
+    chunktypecounts = {}
+    chunktypesizes = {}
+
+    def addsize(size, l):
+        if l[0] is None or size < l[0]:
+            l[0] = size
+        if size > l[1]:
+            l[1] = size
+        l[2] += size
+
+    numrevs = len(r)
+    for rev in xrange(numrevs):
+        p1, p2 = r.parentrevs(rev)
+        delta = r.deltaparent(rev)
+        if format > 0:
+            addsize(r.rawsize(rev), datasize)
+        if p2 != nullrev:
+            nummerges += 1
+        size = r.length(rev)
+        if delta == nullrev:
+            chainlengths.append(0)
+            numfull += 1
+            addsize(size, fullsize)
+        else:
+            chainlengths.append(chainlengths[delta] + 1)
+            addsize(size, deltasize)
+            if delta == rev - 1:
+                numprev += 1
+                if delta == p1:
+                    nump1prev += 1
+                elif delta == p2:
+                    nump2prev += 1
+            elif delta == p1:
+                nump1 += 1
+            elif delta == p2:
+                nump2 += 1
+            elif delta != nullrev:
+                numother += 1
+
+        # Obtain data on the raw chunks in the revlog.
+        chunk = r._chunkraw(rev, rev)[1]
+        if chunk:
+            chunktype = chunk[0]
+        else:
+            chunktype = 'empty'
+
+        if chunktype not in chunktypecounts:
+            chunktypecounts[chunktype] = 0
+            chunktypesizes[chunktype] = 0
+
+        chunktypecounts[chunktype] += 1
+        chunktypesizes[chunktype] += size
+
+    # Adjust size min value for empty cases
+    for size in (datasize, fullsize, deltasize):
+        if size[0] is None:
+            size[0] = 0
+
+    numdeltas = numrevs - numfull
+    numoprev = numprev - nump1prev - nump2prev
+    totalrawsize = datasize[2]
+    datasize[2] /= numrevs
+    fulltotal = fullsize[2]
+    fullsize[2] /= numfull
+    deltatotal = deltasize[2]
+    if numrevs - numfull > 0:
+        deltasize[2] /= numrevs - numfull
+    totalsize = fulltotal + deltatotal
+    avgchainlen = sum(chainlengths) / numrevs
+    maxchainlen = max(chainlengths)
+    compratio = 1
+    if totalsize:
+        compratio = totalrawsize / totalsize
+
+    basedfmtstr = '%%%dd\n'
+    basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
+
+    def dfmtstr(max):
+        return basedfmtstr % len(str(max))
+    def pcfmtstr(max, padding=0):
+        return basepcfmtstr % (len(str(max)), ' ' * padding)
+
+    def pcfmt(value, total):
+        if total:
+            return (value, 100 * float(value) / total)
+        else:
+            return value, 100.0
+
+    ui.write(('format : %d\n') % format)
+    ui.write(('flags  : %s\n') % ', '.join(flags))
+
+    ui.write('\n')
+    fmt = pcfmtstr(totalsize)
+    fmt2 = dfmtstr(totalsize)
+    ui.write(('revisions     : ') + fmt2 % numrevs)
+    ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
+    ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
+    ui.write(('revisions     : ') + fmt2 % numrevs)
+    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
+    ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
+    ui.write(('revision size : ') + fmt2 % totalsize)
+    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
+    ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
+
+    def fmtchunktype(chunktype):
+        if chunktype == 'empty':
+            return '    %s     : ' % chunktype
+        elif chunktype in string.ascii_letters:
+            return '    0x%s (%s)  : ' % (hex(chunktype), chunktype)
+        else:
+            return '    0x%s      : ' % hex(chunktype)
+
+    ui.write('\n')
+    ui.write(('chunks        : ') + fmt2 % numrevs)
+    for chunktype in sorted(chunktypecounts):
+        ui.write(fmtchunktype(chunktype))
+        ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
+    ui.write(('chunks size   : ') + fmt2 % totalsize)
+    for chunktype in sorted(chunktypecounts):
+        ui.write(fmtchunktype(chunktype))
+        ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
+
+    ui.write('\n')
+    fmt = dfmtstr(max(avgchainlen, compratio))
+    ui.write(('avg chain length  : ') + fmt % avgchainlen)
+    ui.write(('max chain length  : ') + fmt % maxchainlen)
+    ui.write(('compression ratio : ') + fmt % compratio)
+
+    if format > 0:
+        ui.write('\n')
+        ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
+                 % tuple(datasize))
+    ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
+             % tuple(fullsize))
+    ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
+             % tuple(deltasize))
+
+    if numdeltas > 0:
+        ui.write('\n')
+        fmt = pcfmtstr(numdeltas)
+        fmt2 = pcfmtstr(numdeltas, 4)
+        ui.write(('deltas against prev  : ') + fmt % pcfmt(numprev, numdeltas))
+        if numprev > 0:
+            ui.write(('    where prev = p1  : ') + fmt2 % pcfmt(nump1prev,
+                                                              numprev))
+            ui.write(('    where prev = p2  : ') + fmt2 % pcfmt(nump2prev,
+                                                              numprev))
+            ui.write(('    other            : ') + fmt2 % pcfmt(numoprev,
+                                                              numprev))
+        if gdelta:
+            ui.write(('deltas against p1    : ')
+                     + fmt % pcfmt(nump1, numdeltas))
+            ui.write(('deltas against p2    : ')
+                     + fmt % pcfmt(nump2, numdeltas))
+            ui.write(('deltas against other : ') + fmt % pcfmt(numother,
+                                                             numdeltas))
+
+@command('debugrevspec',
+    [('', 'optimize', None,
+      _('print parsed tree after optimizing (DEPRECATED)')),
+     ('p', 'show-stage', [],
+      _('print parsed tree at the given stage'), _('NAME')),
+     ('', 'no-optimized', False, _('evaluate tree without optimization')),
+     ('', 'verify-optimized', False, _('verify optimized result')),
+     ],
+    ('REVSPEC'))
+def debugrevspec(ui, repo, expr, **opts):
+    """parse and apply a revision specification
+
+    Use -p/--show-stage option to print the parsed tree at the given stages.
+    Use -p all to print tree at every stage.
+
+    Use --verify-optimized to compare the optimized result with the unoptimized
+    one. Returns 1 if the optimized result differs.
+    """
+    stages = [
+        ('parsed', lambda tree: tree),
+        ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
+        ('concatenated', revsetlang.foldconcat),
+        ('analyzed', revsetlang.analyze),
+        ('optimized', revsetlang.optimize),
+    ]
+    if opts['no_optimized']:
+        stages = stages[:-1]
+    if opts['verify_optimized'] and opts['no_optimized']:
+        raise error.Abort(_('cannot use --verify-optimized with '
+                            '--no-optimized'))
+    stagenames = set(n for n, f in stages)
+
+    showalways = set()
+    showchanged = set()
+    if ui.verbose and not opts['show_stage']:
+        # show parsed tree by --verbose (deprecated)
+        showalways.add('parsed')
+        showchanged.update(['expanded', 'concatenated'])
+        if opts['optimize']:
+            showalways.add('optimized')
+    if opts['show_stage'] and opts['optimize']:
+        raise error.Abort(_('cannot use --optimize with --show-stage'))
+    if opts['show_stage'] == ['all']:
+        showalways.update(stagenames)
+    else:
+        for n in opts['show_stage']:
+            if n not in stagenames:
+                raise error.Abort(_('invalid stage name: %s') % n)
+        showalways.update(opts['show_stage'])
+
+    treebystage = {}
+    printedtree = None
+    tree = revsetlang.parse(expr, lookup=repo.__contains__)
+    for n, f in stages:
+        treebystage[n] = tree = f(tree)
+        if n in showalways or (n in showchanged and tree != printedtree):
+            if opts['show_stage'] or n != 'parsed':
+                ui.write(("* %s:\n") % n)
+            ui.write(revsetlang.prettyformat(tree), "\n")
+            printedtree = tree
+
+    if opts['verify_optimized']:
+        arevs = revset.makematcher(treebystage['analyzed'])(repo)
+        brevs = revset.makematcher(treebystage['optimized'])(repo)
+        if ui.verbose:
+            ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
+            ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
+        arevs = list(arevs)
+        brevs = list(brevs)
+        if arevs == brevs:
+            return 0
+        ui.write(('--- analyzed\n'), label='diff.file_a')
+        ui.write(('+++ optimized\n'), label='diff.file_b')
+        sm = difflib.SequenceMatcher(None, arevs, brevs)
+        for tag, alo, ahi, blo, bhi in sm.get_opcodes():
+            if tag in ('delete', 'replace'):
+                for c in arevs[alo:ahi]:
+                    ui.write('-%s\n' % c, label='diff.deleted')
+            if tag in ('insert', 'replace'):
+                for c in brevs[blo:bhi]:
+                    ui.write('+%s\n' % c, label='diff.inserted')
+            if tag == 'equal':
+                for c in arevs[alo:ahi]:
+                    ui.write(' %s\n' % c)
+        return 1
+
+    func = revset.makematcher(tree)
+    revs = func(repo)
+    if ui.verbose:
+        ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
+    for c in revs:
+        ui.write("%s\n" % c)
+
+@command('debugsetparents', [], _('REV1 [REV2]'))
+def debugsetparents(ui, repo, rev1, rev2=None):
+    """manually set the parents of the current working directory
+
+    This is useful for writing repository conversion tools, but should
+    be used with care. For example, neither the working directory nor the
+    dirstate is updated, so file status may be incorrect after running this
+    command.
+
+    Returns 0 on success.
+    """
+
+    r1 = scmutil.revsingle(repo, rev1).node()
+    r2 = scmutil.revsingle(repo, rev2, 'null').node()
+
+    with repo.wlock():
+        repo.setparents(r1, r2)
+
+@command('debugsub',
+    [('r', 'rev', '',
+     _('revision to check'), _('REV'))],
+    _('[-r REV] [REV]'))
+def debugsub(ui, repo, rev=None):
+    ctx = scmutil.revsingle(repo, rev, None)
+    for k, v in sorted(ctx.substate.items()):
+        ui.write(('path %s\n') % k)
+        ui.write((' source   %s\n') % v[0])
+        ui.write((' revision %s\n') % v[1])
+
+@command('debugsuccessorssets',
+    [],
+    _('[REV]'))
+def debugsuccessorssets(ui, repo, *revs):
+    """show set of successors for revision
+
+    A successors set of changeset A is a consistent group of revisions that
+    succeed A. It contains non-obsolete changesets only.
+
+    In most cases a changeset A has a single successors set containing a single
+    successor (changeset A replaced by A').
+
+    A changeset that is made obsolete with no successors are called "pruned".
+    Such changesets have no successors sets at all.
+
+    A changeset that has been "split" will have a successors set containing
+    more than one successor.
+
+    A changeset that has been rewritten in multiple different ways is called
+    "divergent". Such changesets have multiple successor sets (each of which
+    may also be split, i.e. have multiple successors).
+
+    Results are displayed as follows::
+
+        <rev1>
+            <successors-1A>
+        <rev2>
+            <successors-2A>
+            <successors-2B1> <successors-2B2> <successors-2B3>
+
+    Here rev2 has two possible (i.e. divergent) successors sets. The first
+    holds one element, whereas the second holds three (i.e. the changeset has
+    been split).
+    """
+    # passed to successorssets caching computation from one call to another
+    cache = {}
+    ctx2str = str
+    node2str = short
+    if ui.debug():
+        def ctx2str(ctx):
+            return ctx.hex()
+        node2str = hex
+    for rev in scmutil.revrange(repo, revs):
+        ctx = repo[rev]
+        ui.write('%s\n'% ctx2str(ctx))
+        for succsset in obsolete.successorssets(repo, ctx.node(), cache):
+            if succsset:
+                ui.write('    ')
+                ui.write(node2str(succsset[0]))
+                for node in succsset[1:]:
+                    ui.write(' ')
+                    ui.write(node2str(node))
+            ui.write('\n')
+
+@command('debugtemplate',
+    [('r', 'rev', [], _('apply template on changesets'), _('REV')),
+     ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
+    _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
+    optionalrepo=True)
+def debugtemplate(ui, repo, tmpl, **opts):
+    """parse and apply a template
+
+    If -r/--rev is given, the template is processed as a log template and
+    applied to the given changesets. Otherwise, it is processed as a generic
+    template.
+
+    Use --verbose to print the parsed tree.
+    """
+    revs = None
+    if opts['rev']:
+        if repo is None:
+            raise error.RepoError(_('there is no Mercurial repository here '
+                                    '(.hg not found)'))
+        revs = scmutil.revrange(repo, opts['rev'])
+
+    props = {}
+    for d in opts['define']:
+        try:
+            k, v = (e.strip() for e in d.split('=', 1))
+            if not k:
+                raise ValueError
+            props[k] = v
+        except ValueError:
+            raise error.Abort(_('malformed keyword definition: %s') % d)
+
+    if ui.verbose:
+        aliases = ui.configitems('templatealias')
+        tree = templater.parse(tmpl)
+        ui.note(templater.prettyformat(tree), '\n')
+        newtree = templater.expandaliases(tree, aliases)
+        if newtree != tree:
+            ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
+
+    mapfile = None
+    if revs is None:
+        k = 'debugtemplate'
+        t = formatter.maketemplater(ui, k, tmpl)
+        ui.write(templater.stringify(t(k, **props)))
+    else:
+        displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
+                                                mapfile, buffered=False)
+        for r in revs:
+            displayer.show(repo[r], **props)
+        displayer.close()
+
 @command('debugupgraderepo', [
     ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
     ('', 'run', False, _('performs an upgrade')),
@@ -875,3 +2026,43 @@
     unable to access the repository should be low.
     """
     return repair.upgraderepo(ui, repo, run=run, optimize=optimize)
+
+@command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
+         inferrepo=True)
+def debugwalk(ui, repo, *pats, **opts):
+    """show how files match on given patterns"""
+    m = scmutil.match(repo[None], pats, opts)
+    items = list(repo.walk(m))
+    if not items:
+        return
+    f = lambda fn: fn
+    if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
+        f = lambda fn: util.normpath(fn)
+    fmt = 'f  %%-%ds  %%-%ds  %%s' % (
+        max([len(abs) for abs in items]),
+        max([len(m.rel(abs)) for abs in items]))
+    for abs in items:
+        line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
+        ui.write("%s\n" % line.rstrip())
+
+@command('debugwireargs',
+    [('', 'three', '', 'three'),
+    ('', 'four', '', 'four'),
+    ('', 'five', '', 'five'),
+    ] + commands.remoteopts,
+    _('REPO [OPTIONS]... [ONE [TWO]]'),
+    norepo=True)
+def debugwireargs(ui, repopath, *vals, **opts):
+    repo = hg.peer(ui, opts, repopath)
+    for opt in commands.remoteopts:
+        del opts[opt[1]]
+    args = {}
+    for k, v in opts.iteritems():
+        if v:
+            args[k] = v
+    # run twice to check that we don't mess up the stream for the next command
+    res1 = repo.debugwireargs(*vals, **args)
+    res2 = repo.debugwireargs(*vals, **args)
+    ui.write("%s\n" % res1)
+    if res1 != res2:
+        ui.warn("%s\n" % res2)
--- a/mercurial/destutil.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/destutil.py	Tue Feb 28 11:13:25 2017 -0800
@@ -12,37 +12,10 @@
     bookmarks,
     error,
     obsolete,
+    scmutil,
 )
 
-def _destupdatevalidate(repo, rev, clean, check):
-    """validate that the destination comply to various rules
-
-    This exists as its own function to help wrapping from extensions."""
-    wc = repo[None]
-    p1 = wc.p1()
-    if not clean:
-        # Check that the update is linear.
-        #
-        # Mercurial do not allow update-merge for non linear pattern
-        # (that would be technically possible but was considered too confusing
-        # for user a long time ago)
-        #
-        # See mercurial.merge.update for details
-        if p1.rev() not in repo.changelog.ancestors([rev], inclusive=True):
-            dirty = wc.dirty(missing=True)
-            foreground = obsolete.foreground(repo, [p1.node()])
-            if not repo[rev].node() in foreground:
-                if dirty:
-                    msg = _("uncommitted changes")
-                    hint = _("commit and merge, or update --clean to"
-                             " discard changes")
-                    raise error.UpdateAbort(msg, hint=hint)
-                elif not check:  # destination is not a descendant.
-                    msg = _("not a linear update")
-                    hint = _("merge or update --check to force update")
-                    raise error.UpdateAbort(msg, hint=hint)
-
-def _destupdateobs(repo, clean, check):
+def _destupdateobs(repo, clean):
     """decide of an update destination from obsolescence markers"""
     node = None
     wc = repo[None]
@@ -78,7 +51,7 @@
                 movemark = repo['.'].node()
     return node, movemark, None
 
-def _destupdatebook(repo, clean, check):
+def _destupdatebook(repo, clean):
     """decide on an update destination from active bookmark"""
     # we also move the active bookmark, if any
     activemark = None
@@ -87,7 +60,7 @@
         activemark = node
     return node, movemark, activemark
 
-def _destupdatebranch(repo, clean, check):
+def _destupdatebranch(repo, clean):
     """decide on an update destination from current branch
 
     This ignores closed branch heads.
@@ -113,7 +86,7 @@
         node = repo['.'].node()
     return node, movemark, None
 
-def _destupdatebranchfallback(repo, clean, check):
+def _destupdatebranchfallback(repo, clean):
     """decide on an update destination from closed heads in current branch"""
     wc = repo[None]
     currentbranch = wc.branch()
@@ -143,7 +116,7 @@
                      'branchfallback': _destupdatebranchfallback,
                      }
 
-def destupdate(repo, clean=False, check=False):
+def destupdate(repo, clean=False):
     """destination for bare update operation
 
     return (rev, movemark, activemark)
@@ -156,13 +129,11 @@
     node = movemark = activemark = None
 
     for step in destupdatesteps:
-        node, movemark, activemark = destupdatestepmap[step](repo, clean, check)
+        node, movemark, activemark = destupdatestepmap[step](repo, clean)
         if node is not None:
             break
     rev = repo[node].rev()
 
-    _destupdatevalidate(repo, rev, clean, check)
-
     return rev, movemark, activemark
 
 msgdestmerge = {
@@ -372,9 +343,6 @@
 
 def desthistedit(ui, repo):
     """Default base revision to edit for `hg histedit`."""
-    # Avoid cycle: scmutil -> revset -> destutil
-    from . import scmutil
-
     default = ui.config('histedit', 'defaultrev', histeditdefaultrevset)
     if default:
         revs = scmutil.revrange(repo, [default])
--- a/mercurial/dirstate.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/dirstate.py	Tue Feb 28 11:13:25 2017 -0800
@@ -23,6 +23,7 @@
     pathutil,
     pycompat,
     scmutil,
+    txnutil,
     util,
 )
 
@@ -59,22 +60,6 @@
         return set(fname for fname, e in dmap.iteritems()
                    if e[0] != 'n' or e[3] == -1)
 
-def _trypending(root, vfs, filename):
-    '''Open  file to be read according to HG_PENDING environment variable
-
-    This opens '.pending' of specified 'filename' only when HG_PENDING
-    is equal to 'root'.
-
-    This returns '(fp, is_pending_opened)' tuple.
-    '''
-    if root == encoding.environ.get('HG_PENDING'):
-        try:
-            return (vfs('%s.pending' % filename), True)
-        except IOError as inst:
-            if inst.errno != errno.ENOENT:
-                raise
-    return (vfs(filename), False)
-
 class dirstate(object):
 
     def __init__(self, opener, ui, root, validate):
@@ -385,7 +370,7 @@
             raise
 
     def _opendirstatefile(self):
-        fp, mode = _trypending(self._root, self._opener, self._filename)
+        fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
         if self._pendingmode is not None and self._pendingmode != mode:
             fp.close()
             raise error.Abort(_('working directory state may be '
--- a/mercurial/dispatch.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/dispatch.py	Tue Feb 28 11:13:25 2017 -0800
@@ -33,6 +33,7 @@
     extensions,
     fancyopts,
     fileset,
+    help,
     hg,
     hook,
     profiling,
@@ -123,7 +124,7 @@
         return -1
 
     msg = ' '.join(' ' in a and repr(a) or a for a in req.args)
-    starttime = time.time()
+    starttime = util.timer()
     ret = None
     try:
         ret = _runcatch(req)
@@ -135,8 +136,11 @@
                 raise
         ret = -1
     finally:
-        duration = time.time() - starttime
+        duration = util.timer() - starttime
         req.ui.flush()
+        if req.ui.logblockedtimes:
+            req.ui._blockedtimes['command_duration'] = duration * 1000
+            req.ui.log('uiblocked', 'ui blocked ms', **req.ui._blockedtimes)
         req.ui.log("commandfinish", "%s exited %s after %0.2f seconds\n",
                    msg, ret or 0, duration)
     return ret
@@ -239,19 +243,24 @@
         _formatparse(ui.warn, inst)
         return -1
     except error.UnknownCommand as inst:
-        ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
+        nocmdmsg = _("hg: unknown command '%s'\n") % inst.args[0]
         try:
             # check if the command is in a disabled extension
             # (but don't check for extensions themselves)
-            commands.help_(ui, inst.args[0], unknowncmd=True)
+            formatted = help.formattedhelp(ui, inst.args[0], unknowncmd=True)
+            ui.warn(nocmdmsg)
+            ui.write(formatted)
         except (error.UnknownCommand, error.Abort):
             suggested = False
             if len(inst.args) == 2:
                 sim = _getsimilar(inst.args[1], inst.args[0])
                 if sim:
+                    ui.warn(nocmdmsg)
                     _reportsimilar(ui.warn, sim)
                     suggested = True
             if not suggested:
+                ui.pager('help')
+                ui.warn(nocmdmsg)
                 commands.help_(ui, 'shortlist')
     except IOError:
         raise
@@ -655,107 +664,120 @@
     rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
     path, lui = _getlocal(ui, rpath)
 
-    # Configure extensions in phases: uisetup, extsetup, cmdtable, and
-    # reposetup. Programs like TortoiseHg will call _dispatch several
-    # times so we keep track of configured extensions in _loaded.
-    extensions.loadall(lui)
-    exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
-    # Propagate any changes to lui.__class__ by extensions
-    ui.__class__ = lui.__class__
-
-    # (uisetup and extsetup are handled in extensions.loadall)
-
-    for name, module in exts:
-        for objname, loadermod, loadername in extraloaders:
-            extraobj = getattr(module, objname, None)
-            if extraobj is not None:
-                getattr(loadermod, loadername)(ui, name, extraobj)
-        _loaded.add(name)
-
-    # (reposetup is handled in hg.repository)
-
     # Side-effect of accessing is debugcommands module is guaranteed to be
     # imported and commands.table is populated.
     debugcommands.command
 
-    addaliases(lui, commands.table)
-
-    # All aliases and commands are completely defined, now.
-    # Check abbreviation/ambiguity of shell alias.
-    shellaliasfn = _checkshellalias(lui, ui, args)
-    if shellaliasfn:
-        with profiling.maybeprofile(lui):
-            return shellaliasfn()
-
-    # check for fallback encoding
-    fallback = lui.config('ui', 'fallbackencoding')
-    if fallback:
-        encoding.fallbackencoding = fallback
-
-    fullargs = args
-    cmd, func, args, options, cmdoptions = _parse(lui, args)
-
-    if options["config"]:
-        raise error.Abort(_("option --config may not be abbreviated!"))
-    if options["cwd"]:
-        raise error.Abort(_("option --cwd may not be abbreviated!"))
-    if options["repository"]:
-        raise error.Abort(_(
-            "option -R has to be separated from other options (e.g. not -qR) "
-            "and --repository may only be abbreviated as --repo!"))
-
-    if options["encoding"]:
-        encoding.encoding = options["encoding"]
-    if options["encodingmode"]:
-        encoding.encodingmode = options["encodingmode"]
-    if options["time"]:
-        def get_times():
-            t = os.times()
-            if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
-                t = (t[0], t[1], t[2], t[3], time.clock())
-            return t
-        s = get_times()
-        def print_time():
-            t = get_times()
-            ui.warn(_("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
-                (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
-        atexit.register(print_time)
-
     uis = set([ui, lui])
 
     if req.repo:
         uis.add(req.repo.ui)
 
-    if options['verbose'] or options['debug'] or options['quiet']:
-        for opt in ('verbose', 'debug', 'quiet'):
-            val = str(bool(options[opt]))
-            for ui_ in uis:
-                ui_.setconfig('ui', opt, val, '--' + opt)
-
-    if options['profile']:
+    if '--profile' in args:
         for ui_ in uis:
             ui_.setconfig('profiling', 'enabled', 'true', '--profile')
 
-    if options['traceback']:
-        for ui_ in uis:
-            ui_.setconfig('ui', 'traceback', 'on', '--traceback')
+    with profiling.maybeprofile(lui):
+        # Configure extensions in phases: uisetup, extsetup, cmdtable, and
+        # reposetup. Programs like TortoiseHg will call _dispatch several
+        # times so we keep track of configured extensions in _loaded.
+        extensions.loadall(lui)
+        exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
+        # Propagate any changes to lui.__class__ by extensions
+        ui.__class__ = lui.__class__
+
+        # (uisetup and extsetup are handled in extensions.loadall)
+
+        for name, module in exts:
+            for objname, loadermod, loadername in extraloaders:
+                extraobj = getattr(module, objname, None)
+                if extraobj is not None:
+                    getattr(loadermod, loadername)(ui, name, extraobj)
+            _loaded.add(name)
+
+        # (reposetup is handled in hg.repository)
+
+        addaliases(lui, commands.table)
+
+        # All aliases and commands are completely defined, now.
+        # Check abbreviation/ambiguity of shell alias.
+        shellaliasfn = _checkshellalias(lui, ui, args)
+        if shellaliasfn:
+            return shellaliasfn()
+
+        # check for fallback encoding
+        fallback = lui.config('ui', 'fallbackencoding')
+        if fallback:
+            encoding.fallbackencoding = fallback
+
+        fullargs = args
+        cmd, func, args, options, cmdoptions = _parse(lui, args)
+
+        if options["config"]:
+            raise error.Abort(_("option --config may not be abbreviated!"))
+        if options["cwd"]:
+            raise error.Abort(_("option --cwd may not be abbreviated!"))
+        if options["repository"]:
+            raise error.Abort(_(
+                "option -R has to be separated from other options (e.g. not "
+                "-qR) and --repository may only be abbreviated as --repo!"))
 
-    if options['noninteractive']:
-        for ui_ in uis:
-            ui_.setconfig('ui', 'interactive', 'off', '-y')
+        if options["encoding"]:
+            encoding.encoding = options["encoding"]
+        if options["encodingmode"]:
+            encoding.encodingmode = options["encodingmode"]
+        if options["time"]:
+            def get_times():
+                t = os.times()
+                if t[4] == 0.0:
+                    # Windows leaves this as zero, so use time.clock()
+                    t = (t[0], t[1], t[2], t[3], time.clock())
+                return t
+            s = get_times()
+            def print_time():
+                t = get_times()
+                ui.warn(
+                    _("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
+                    (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
+            atexit.register(print_time)
 
-    if cmdoptions.get('insecure', False):
+        if options['verbose'] or options['debug'] or options['quiet']:
+            for opt in ('verbose', 'debug', 'quiet'):
+                val = str(bool(options[opt]))
+                for ui_ in uis:
+                    ui_.setconfig('ui', opt, val, '--' + opt)
+
+        if options['traceback']:
+            for ui_ in uis:
+                ui_.setconfig('ui', 'traceback', 'on', '--traceback')
+
+        if options['noninteractive']:
+            for ui_ in uis:
+                ui_.setconfig('ui', 'interactive', 'off', '-y')
+
+        if util.parsebool(options['pager']):
+            ui.pager('internal-always-' + cmd)
+        elif options['pager'] != 'auto':
+            ui.disablepager()
+
+        if cmdoptions.get('insecure', False):
+            for ui_ in uis:
+                ui_.insecureconnections = True
+
+        # setup color handling
+        coloropt = options['color']
         for ui_ in uis:
-            ui_.insecureconnections = True
+            if coloropt:
+                ui_.setconfig('ui', 'color', coloropt, '--color')
+            color.setup(ui_)
 
-    if options['version']:
-        return commands.version_(ui)
-    if options['help']:
-        return commands.help_(ui, cmd, command=cmd is not None)
-    elif not cmd:
-        return commands.help_(ui, 'shortlist')
+        if options['version']:
+            return commands.version_(ui)
+        if options['help']:
+            return commands.help_(ui, cmd, command=cmd is not None)
+        elif not cmd:
+            return commands.help_(ui, 'shortlist')
 
-    with profiling.maybeprofile(lui):
         repo = None
         cmdpats = args[:]
         if not func.norepo:
--- a/mercurial/exchange.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/exchange.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1724,9 +1724,15 @@
     if url.startswith('remote:http:') or url.startswith('remote:https:'):
         captureoutput = True
     try:
+        # note: outside bundle1, 'heads' is expected to be empty and this
+        # 'check_heads' call wil be a no-op
         check_heads(repo, heads, 'uploading changes')
         # push can proceed
-        if util.safehasattr(cg, 'params'):
+        if not util.safehasattr(cg, 'params'):
+            # legacy case: bundle1 (changegroup 01)
+            lockandtr[1] = repo.lock()
+            r = cg.apply(repo, source, url)
+        else:
             r = None
             try:
                 def gettransaction():
@@ -1765,9 +1771,6 @@
                                                   mandatory=False)
                         parts.append(part)
                 raise
-        else:
-            lockandtr[1] = repo.lock()
-            r = cg.apply(repo, source, url)
     finally:
         lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
         if recordout is not None:
--- a/mercurial/extensions.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/extensions.py	Tue Feb 28 11:13:25 2017 -0800
@@ -362,7 +362,8 @@
     '''find paths of disabled extensions. returns a dict of {name: path}
     removes /__init__.py from packages if strip_init is True'''
     import hgext
-    extpath = os.path.dirname(os.path.abspath(hgext.__file__))
+    extpath = os.path.dirname(
+        os.path.abspath(pycompat.fsencode(hgext.__file__)))
     try: # might not be a filesystem path
         files = os.listdir(extpath)
     except OSError:
--- a/mercurial/filemerge.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/filemerge.py	Tue Feb 28 11:13:25 2017 -0800
@@ -489,6 +489,9 @@
     args = util.interpolate(r'\$', replace, args,
                             lambda s: util.shellquote(util.localpath(s)))
     cmd = toolpath + ' ' + args
+    if _toolbool(ui, tool, "gui"):
+        repo.ui.status(_('running merge tool %s for file %s\n') %
+                       (tool, fcd.path()))
     repo.ui.debug('launching merge tool: %s\n' % cmd)
     r = ui.system(cmd, cwd=repo.root, environ=env)
     repo.ui.debug('merge tool returned: %s\n' % r)
@@ -582,7 +585,7 @@
         pre = "%s~%s." % (os.path.basename(fullbase), prefix)
         (fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext)
         data = repo.wwritedata(ctx.path(), ctx.data())
-        f = os.fdopen(fd, "wb")
+        f = os.fdopen(fd, pycompat.sysstr("wb"))
         f.write(data)
         f.close()
         return name
--- a/mercurial/graphmod.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/graphmod.py	Tue Feb 28 11:13:25 2017 -0800
@@ -22,6 +22,7 @@
 from .node import nullrev
 from . import (
     revset,
+    smartset,
     util,
 )
 
@@ -67,8 +68,8 @@
             if gp is None:
                 # precompute slow query as we know reachableroots() goes
                 # through all revs (issue4782)
-                if not isinstance(revs, revset.baseset):
-                    revs = revset.baseset(revs)
+                if not isinstance(revs, smartset.baseset):
+                    revs = smartset.baseset(revs)
                 gp = gpcache[mpar] = sorted(set(revset.reachableroots(
                     repo, revs, [mpar])))
             if not gp:
--- a/mercurial/help.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/help.py	Tue Feb 28 11:13:25 2017 -0800
@@ -33,14 +33,17 @@
     webcommands,
 )
 
-_exclkeywords = [
+_exclkeywords = set([
+    "(ADVANCED)",
     "(DEPRECATED)",
     "(EXPERIMENTAL)",
+    # i18n: "(ADVANCED)" is a keyword, must be translated consistently
+    _("(ADVANCED)"),
     # i18n: "(DEPRECATED)" is a keyword, must be translated consistently
     _("(DEPRECATED)"),
     # i18n: "(EXPERIMENTAL)" is a keyword, must be translated consistently
     _("(EXPERIMENTAL)"),
-    ]
+    ])
 
 def listexts(header, exts, indent=1, showdeprecated=False):
     '''return a text listing of the given extensions'''
@@ -230,6 +233,7 @@
      loaddoc('scripting')),
     (['internals'], _("Technical implementation topics"),
      internalshelp),
+    (['pager'], _("Pager Support"), loaddoc('pager')),
 ])
 
 # Maps topics with sub-topics to a list of their sub-topics.
@@ -605,3 +609,47 @@
         rst.extend(helplist(None, **opts))
 
     return ''.join(rst)
+
+def formattedhelp(ui, name, keep=None, unknowncmd=False, full=True, **opts):
+    """get help for a given topic (as a dotted name) as rendered rst
+
+    Either returns the rendered help text or raises an exception.
+    """
+    if keep is None:
+        keep = []
+    fullname = name
+    section = None
+    subtopic = None
+    if name and '.' in name:
+        name, remaining = name.split('.', 1)
+        remaining = encoding.lower(remaining)
+        if '.' in remaining:
+            subtopic, section = remaining.split('.', 1)
+        else:
+            if name in subtopics:
+                subtopic = remaining
+            else:
+                section = remaining
+    textwidth = ui.configint('ui', 'textwidth', 78)
+    termwidth = ui.termwidth() - 2
+    if textwidth <= 0 or termwidth < textwidth:
+        textwidth = termwidth
+    text = help_(ui, name,
+                 subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts)
+
+    formatted, pruned = minirst.format(text, textwidth, keep=keep,
+                                       section=section)
+
+    # We could have been given a weird ".foo" section without a name
+    # to look for, or we could have simply failed to found "foo.bar"
+    # because bar isn't a section of foo
+    if section and not (formatted and name):
+        raise error.Abort(_("help section not found: %s") % fullname)
+
+    if 'verbose' in pruned:
+        keep.append('omitted')
+    else:
+        keep.append('notomitted')
+    formatted, pruned = minirst.format(text, textwidth, keep=keep,
+                                       section=section)
+    return formatted
--- a/mercurial/help/config.txt	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/help/config.txt	Tue Feb 28 11:13:25 2017 -0800
@@ -56,6 +56,7 @@
 
   - ``<repo>/.hg/hgrc`` (per-repository)
   - ``$HOME/.hgrc`` (per-user)
+  - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
   - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
   - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
   - ``/etc/mercurial/hgrc`` (per-system)
@@ -276,7 +277,7 @@
 will let you do ``hg echo foo`` to have ``foo`` printed in your
 terminal. A better example might be::
 
-   purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm
+   purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
 
 which will make ``hg purge`` delete all unknown files in the
 repository in the same manner as the purge extension.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/pager.txt	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,35 @@
+Some Mercurial commands produce a lot of output, and Mercurial will
+attempt to use a pager to make those commands more pleasant.
+
+To set the pager that should be used, set the application variable::
+
+  [pager]
+  pager = less -FRX
+
+If no pager is set, the pager extensions uses the environment variable
+$PAGER. If neither pager.pager, nor $PAGER is set, a default pager
+will be used, typically `more`.
+
+You can disable the pager for certain commands by adding them to the
+pager.ignore list::
+
+  [pager]
+  ignore = version, help, update
+
+To ignore global commands like :hg:`version` or :hg:`help`, you have
+to specify them in your user configuration file.
+
+To control whether the pager is used at all for an individual command,
+you can use --pager=<value>::
+
+  - use as needed: `auto`.
+  - require the pager: `yes` or `on`.
+  - suppress the pager: `no` or `off` (any unrecognized value
+  will also work).
+
+To globally turn off all attempts to use a pager, set::
+
+  [pager]
+  enable = false
+
+which will prevent the pager from running.
--- a/mercurial/help/patterns.txt	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/help/patterns.txt	Tue Feb 28 11:13:25 2017 -0800
@@ -13,7 +13,10 @@
 
 To use a plain path name without any pattern matching, start it with
 ``path:``. These path names must completely match starting at the
-current repository root.
+current repository root, and when the path points to a directory, it is matched
+recursively. To match all files in a directory non-recursively (not including
+any files in subdirectories), ``rootfilesin:`` can be used, specifying an
+absolute path (relative to the repository root).
 
 To use an extended glob, start a name with ``glob:``. Globs are rooted
 at the current directory; a glob such as ``*.c`` will only match files
@@ -39,12 +42,15 @@
 All patterns, except for ``glob:`` specified in command line (not for
 ``-I`` or ``-X`` options), can match also against directories: files
 under matched directories are treated as matched.
+For ``-I`` and ``-X`` options, ``glob:`` will match directories recursively.
 
 Plain examples::
 
-  path:foo/bar   a name bar in a directory named foo in the root
-                 of the repository
-  path:path:name a file or directory named "path:name"
+  path:foo/bar        a name bar in a directory named foo in the root
+                      of the repository
+  path:path:name      a file or directory named "path:name"
+  rootfilesin:foo/bar the files in a directory called foo/bar, but not any files
+                      in its subdirectories and not a file bar in directory foo
 
 Glob examples::
 
@@ -52,6 +58,8 @@
   *.c            any name ending in ".c" in the current directory
   **.c           any name ending in ".c" in any subdirectory of the
                  current directory including itself.
+  foo/*          any file in directory foo plus all its subdirectories,
+                 recursively
   foo/*.c        any name ending in ".c" in the directory foo
   foo/**.c       any name ending in ".c" in any subdirectory of foo
                  including itself.
--- a/mercurial/hg.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/hg.py	Tue Feb 28 11:13:25 2017 -0800
@@ -732,13 +732,15 @@
         movemarkfrom = None
         warndest = False
         if checkout is None:
-            updata = destutil.destupdate(repo, clean=clean, check=check)
+            updata = destutil.destupdate(repo, clean=clean)
             checkout, movemarkfrom, brev = updata
             warndest = True
 
         if clean:
             ret = _clean(repo, checkout)
         else:
+            if check:
+                cmdutil.bailifchanged(repo, merge=False)
             ret = _update(repo, checkout)
 
         if not ret and movemarkfrom:
@@ -802,7 +804,7 @@
         if not chlist:
             ui.status(_("no changes found\n"))
             return subreporecurse()
-
+        ui.pager('incoming')
         displayer = cmdutil.show_changeset(ui, other, opts, buffered)
         displaychlist(other, chlist, displayer)
         displayer.close()
@@ -870,6 +872,7 @@
 
     if opts.get('newest_first'):
         o.reverse()
+    ui.pager('outgoing')
     displayer = cmdutil.show_changeset(ui, repo, opts)
     count = 0
     for n in o:
--- a/mercurial/hgweb/webcommands.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/hgweb/webcommands.py	Tue Feb 28 11:13:25 2017 -0800
@@ -32,7 +32,9 @@
     error,
     graphmod,
     revset,
+    revsetlang,
     scmutil,
+    smartset,
     templatefilters,
     templater,
     util,
@@ -238,20 +240,20 @@
 
         revdef = 'reverse(%s)' % query
         try:
-            tree = revset.parse(revdef)
+            tree = revsetlang.parse(revdef)
         except error.ParseError:
             # can't parse to a revset tree
             return MODE_KEYWORD, query
 
-        if revset.depth(tree) <= 2:
+        if revsetlang.depth(tree) <= 2:
             # no revset syntax used
             return MODE_KEYWORD, query
 
         if any((token, (value or '')[:3]) == ('string', 're:')
-                    for token, value, pos in revset.tokenize(revdef)):
+               for token, value, pos in revsetlang.tokenize(revdef)):
             return MODE_KEYWORD, query
 
-        funcsused = revset.funcsused(tree)
+        funcsused = revsetlang.funcsused(tree)
         if not funcsused.issubset(revset.safesymbols):
             return MODE_KEYWORD, query
 
@@ -752,13 +754,14 @@
     if fctx is not None:
         path = fctx.path()
         ctx = fctx.changectx()
+    basectx = ctx.p1()
 
     parity = paritygen(web.stripecount)
     style = web.config('web', 'style', 'paper')
     if 'style' in req.form:
         style = req.form['style'][0]
 
-    diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
+    diffs = webutil.diffs(web.repo, tmpl, ctx, basectx, [path], parity, style)
     if fctx is not None:
         rename = webutil.renamelink(fctx)
         ctx = fctx
@@ -1148,7 +1151,7 @@
         # We have to feed a baseset to dagwalker as it is expecting smartset
         # object. This does not have a big impact on hgweb performance itself
         # since hgweb graphing code is not itself lazy yet.
-        dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
+        dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
         # As we said one line above... not lazy.
         tree = list(graphmod.colored(dag, web.repo))
 
--- a/mercurial/hgweb/webutil.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/hgweb/webutil.py	Tue Feb 28 11:13:25 2017 -0800
@@ -442,14 +442,7 @@
         m = match.always(repo.root, repo.getcwd())
 
     diffopts = patch.diffopts(repo.ui, untrusted=True)
-    if basectx is None:
-        parents = ctx.parents()
-        if parents:
-            node1 = parents[0].node()
-        else:
-            node1 = nullid
-    else:
-        node1 = basectx.node()
+    node1 = basectx.node()
     node2 = ctx.node()
 
     block = []
--- a/mercurial/hook.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/hook.py	Tue Feb 28 11:13:25 2017 -0800
@@ -9,7 +9,6 @@
 
 import os
 import sys
-import time
 
 from .i18n import _
 from . import (
@@ -88,7 +87,7 @@
                 % (hname, funcname))
 
     ui.note(_("calling hook %s: %s\n") % (hname, funcname))
-    starttime = time.time()
+    starttime = util.timer()
 
     try:
         r = obj(ui=ui, repo=repo, hooktype=name, **args)
@@ -106,7 +105,7 @@
         ui.traceback()
         return True, True
     finally:
-        duration = time.time() - starttime
+        duration = util.timer() - starttime
         ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n',
                name, funcname, duration)
     if r:
@@ -118,7 +117,7 @@
 def _exthook(ui, repo, name, cmd, args, throw):
     ui.note(_("running hook %s: %s\n") % (name, cmd))
 
-    starttime = time.time()
+    starttime = util.timer()
     env = {}
 
     # make in-memory changes visible to external process
@@ -145,7 +144,7 @@
         cwd = pycompat.getcwd()
     r = ui.system(cmd, environ=env, cwd=cwd)
 
-    duration = time.time() - starttime
+    duration = util.timer() - starttime
     ui.log('exthook', 'exthook-%s: %s finished in %0.2f seconds\n',
            name, cmd, duration)
     if r:
--- a/mercurial/httppeer.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/httppeer.py	Tue Feb 28 11:13:25 2017 -0800
@@ -20,6 +20,7 @@
     bundle2,
     error,
     httpconnection,
+    pycompat,
     statichttprepo,
     url,
     util,
@@ -327,7 +328,7 @@
         try:
             # dump bundle to disk
             fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
-            fh = os.fdopen(fd, "wb")
+            fh = os.fdopen(fd, pycompat.sysstr("wb"))
             d = fp.read(4096)
             while d:
                 fh.write(d)
--- a/mercurial/i18n.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/i18n.py	Tue Feb 28 11:13:25 2017 -0800
@@ -21,7 +21,7 @@
 if getattr(sys, 'frozen', None) is not None:
     module = pycompat.sysexecutable
 else:
-    module = __file__
+    module = pycompat.fsencode(__file__)
 
 try:
     unicode
--- a/mercurial/keepalive.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/keepalive.py	Tue Feb 28 11:13:25 2017 -0800
@@ -310,14 +310,16 @@
         try:
             if req.has_data():
                 data = req.get_data()
-                h.putrequest('POST', req.get_selector(), **skipheaders)
+                h.putrequest(
+                    req.get_method(), req.get_selector(), **skipheaders)
                 if 'content-type' not in headers:
                     h.putheader('Content-type',
                                 'application/x-www-form-urlencoded')
                 if 'content-length' not in headers:
                     h.putheader('Content-length', '%d' % len(data))
             else:
-                h.putrequest('GET', req.get_selector(), **skipheaders)
+                h.putrequest(
+                    req.get_method(), req.get_selector(), **skipheaders)
         except socket.error as err:
             raise urlerr.urlerror(err)
         for k, v in headers.items():
--- a/mercurial/localrepo.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/localrepo.py	Tue Feb 28 11:13:25 2017 -0800
@@ -28,6 +28,7 @@
     bundle2,
     changegroup,
     changelog,
+    color,
     context,
     dirstate,
     dirstateguard,
@@ -50,11 +51,13 @@
     pushkey,
     repoview,
     revset,
+    revsetlang,
     scmutil,
     store,
     subrepo,
     tags as tagsmod,
     transaction,
+    txnutil,
     util,
 )
 
@@ -270,7 +273,7 @@
         self._phasedefaults = []
         try:
             self.ui.readconfig(self.join("hgrc"), self.root)
-            extensions.loadall(self.ui)
+            self._loadextensions()
         except IOError:
             pass
 
@@ -283,6 +286,7 @@
                     setupfunc(self.ui, self.supported)
         else:
             self.supported = self._basesupported
+        color.setup(self.ui)
 
         # Add compression engines.
         for name in util.compengines:
@@ -371,6 +375,9 @@
     def close(self):
         self._writecaches()
 
+    def _loadextensions(self):
+        extensions.loadall(self.ui)
+
     def _writecaches(self):
         if self._revbranchcache:
             self._revbranchcache.write()
@@ -509,10 +516,8 @@
     @storecache('00changelog.i')
     def changelog(self):
         c = changelog.changelog(self.svfs)
-        if 'HG_PENDING' in encoding.environ:
-            p = encoding.environ['HG_PENDING']
-            if p.startswith(self.root):
-                c.readpending('00changelog.i.a')
+        if txnutil.mayhavepending(self.root):
+            c.readpending('00changelog.i.a')
         return c
 
     def _constructmanifest(self):
@@ -570,15 +575,16 @@
         '''Find revisions matching a revset.
 
         The revset is specified as a string ``expr`` that may contain
-        %-formatting to escape certain types. See ``revset.formatspec``.
+        %-formatting to escape certain types. See ``revsetlang.formatspec``.
 
         Revset aliases from the configuration are not expanded. To expand
-        user aliases, consider calling ``scmutil.revrange()``.
+        user aliases, consider calling ``scmutil.revrange()`` or
+        ``repo.anyrevs([expr], user=True)``.
 
         Returns a revset.abstractsmartset, which is a list-like interface
         that contains integer revisions.
         '''
-        expr = revset.formatspec(expr, *args)
+        expr = revsetlang.formatspec(expr, *args)
         m = revset.match(None, expr)
         return m(self)
 
@@ -594,6 +600,18 @@
         for r in self.revs(expr, *args):
             yield self[r]
 
+    def anyrevs(self, specs, user=False):
+        '''Find revisions matching one of the given revsets.
+
+        Revset aliases from the configuration are not expanded by default. To
+        expand user aliases, specify ``user=True``.
+        '''
+        if user:
+            m = revset.matchany(self.ui, specs, repo=self)
+        else:
+            m = revset.matchany(None, specs)
+        return m(self)
+
     def url(self):
         return 'file:' + self.root
 
@@ -1852,6 +1870,11 @@
                                   listsubrepos)
 
     def heads(self, start=None):
+        if start is None:
+            cl = self.changelog
+            headrevs = reversed(cl.headrevs())
+            return [cl.node(rev) for rev in headrevs]
+
         heads = self.changelog.heads(start)
         # sort the output in rev descending order
         return sorted(heads, key=self.changelog.rev, reverse=True)
--- a/mercurial/lock.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/lock.py	Tue Feb 28 11:13:25 2017 -0800
@@ -9,15 +9,33 @@
 
 import contextlib
 import errno
+import os
 import socket
 import time
 import warnings
 
 from . import (
     error,
+    pycompat,
     util,
 )
 
+def _getlockprefix():
+    """Return a string which is used to differentiate pid namespaces
+
+    It's useful to detect "dead" processes and remove stale locks with
+    confidence. Typically it's just hostname. On modern linux, we include an
+    extra Linux-specific pid namespace identifier.
+    """
+    result = socket.gethostname()
+    if pycompat.sysplatform.startswith('linux'):
+        try:
+            result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
+        except OSError as ex:
+            if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR):
+                raise
+    return result
+
 class lock(object):
     '''An advisory lock held by one process to control access to a set
     of files.  Non-cooperating processes or incorrectly written scripts
@@ -99,7 +117,7 @@
             self.held += 1
             return
         if lock._host is None:
-            lock._host = socket.gethostname()
+            lock._host = _getlockprefix()
         lockname = '%s:%s' % (lock._host, self.pid)
         retry = 5
         while not self.held and retry:
--- a/mercurial/manifest.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/manifest.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1386,7 +1386,7 @@
         return self._revlog().parents(self._node)
 
     def read(self):
-        if not self._data:
+        if self._data is None:
             if self._node == revlog.nullid:
                 self._data = manifestdict()
             else:
@@ -1484,7 +1484,7 @@
         return self._repo.manifestlog._revlog.dirlog(self._dir)
 
     def read(self):
-        if not self._data:
+        if self._data is None:
             rl = self._revlog()
             if self._node == revlog.nullid:
                 self._data = treemanifest()
--- a/mercurial/match.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/match.py	Tue Feb 28 11:13:25 2017 -0800
@@ -104,7 +104,10 @@
         a pattern is one of:
         'glob:<glob>' - a glob relative to cwd
         're:<regexp>' - a regular expression
-        'path:<path>' - a path relative to repository root
+        'path:<path>' - a path relative to repository root, which is matched
+                        recursively
+        'rootfilesin:<path>' - a path relative to repository root, which is
+                        matched non-recursively (will not match subdirectories)
         'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
         'relpath:<path>' - a path relative to cwd
         'relre:<regexp>' - a regexp that needn't match the start of a name
@@ -122,9 +125,12 @@
         self._always = False
         self._pathrestricted = bool(include or exclude or patterns)
         self._warn = warn
+
+        # roots are directories which are recursively included/excluded.
         self._includeroots = set()
+        self._excluderoots = set()
+        # dirs are directories which are non-recursively included.
         self._includedirs = set(['.'])
-        self._excluderoots = set()
 
         if badfn is not None:
             self.bad = badfn
@@ -134,14 +140,20 @@
             kindpats = self._normalize(include, 'glob', root, cwd, auditor)
             self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
                                               listsubrepos, root)
-            self._includeroots.update(_roots(kindpats))
-            self._includedirs.update(util.dirs(self._includeroots))
+            roots, dirs = _rootsanddirs(kindpats)
+            self._includeroots.update(roots)
+            self._includedirs.update(dirs)
             matchfns.append(im)
         if exclude:
             kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
             self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
                                               listsubrepos, root)
             if not _anypats(kindpats):
+                # Only consider recursive excludes as such - if a non-recursive
+                # exclude is used, we must still recurse into the excluded
+                # directory, at least to find subdirectories. In such a case,
+                # the regex still won't match the non-recursively-excluded
+                # files.
                 self._excluderoots.update(_roots(kindpats))
             matchfns.append(lambda f: not em(f))
         if exact:
@@ -153,7 +165,7 @@
         elif patterns:
             kindpats = self._normalize(patterns, default, root, cwd, auditor)
             if not _kindpatsalwaysmatch(kindpats):
-                self._files = _roots(kindpats)
+                self._files = _explicitfiles(kindpats)
                 self._anypats = self._anypats or _anypats(kindpats)
                 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
                                                    listsubrepos, root)
@@ -238,7 +250,7 @@
             return 'all'
         if dir in self._excluderoots:
             return False
-        if (self._includeroots and
+        if ((self._includeroots or self._includedirs != set(['.'])) and
             '.' not in self._includeroots and
             dir not in self._includeroots and
             dir not in self._includedirs and
@@ -286,7 +298,7 @@
         for kind, pat in [_patsplit(p, default) for p in patterns]:
             if kind in ('glob', 'relpath'):
                 pat = pathutil.canonpath(root, cwd, pat, auditor)
-            elif kind in ('relglob', 'path'):
+            elif kind in ('relglob', 'path', 'rootfilesin'):
                 pat = util.normpath(pat)
             elif kind in ('listfile', 'listfile0'):
                 try:
@@ -419,7 +431,9 @@
         # m.exact(file) must be based off of the actual user input, otherwise
         # inexact case matches are treated as exact, and not noted without -v.
         if self._files:
-            self._fileroots = set(_roots(self._kp))
+            roots, dirs = _rootsanddirs(self._kp)
+            self._fileroots = set(roots)
+            self._fileroots.update(dirs)
 
     def _normalize(self, patterns, default, root, cwd, auditor):
         self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
@@ -447,7 +461,8 @@
     if ':' in pattern:
         kind, pat = pattern.split(':', 1)
         if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
-                    'listfile', 'listfile0', 'set', 'include', 'subinclude'):
+                    'listfile', 'listfile0', 'set', 'include', 'subinclude',
+                    'rootfilesin'):
             return kind, pat
     return default, pattern
 
@@ -540,6 +555,14 @@
         if pat == '.':
             return ''
         return '^' + util.re.escape(pat) + '(?:/|$)'
+    if kind == 'rootfilesin':
+        if pat == '.':
+            escaped = ''
+        else:
+            # Pattern is a directory name.
+            escaped = util.re.escape(pat) + '/'
+        # Anything after the pattern must be a non-directory.
+        return '^' + escaped + '[^/]+$'
     if kind == 'relglob':
         return '(?:|.*/)' + _globre(pat) + globsuffix
     if kind == 'relpath':
@@ -609,17 +632,16 @@
                     raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
         raise error.Abort(_("invalid pattern"))
 
-def _roots(kindpats):
-    '''return roots and exact explicitly listed files from patterns
+def _patternrootsanddirs(kindpats):
+    '''Returns roots and directories corresponding to each pattern.
 
-    >>> _roots([('glob', 'g/*', ''), ('glob', 'g', ''), ('glob', 'g*', '')])
-    ['g', 'g', '.']
-    >>> _roots([('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
-    ['r', 'p/p', '.']
-    >>> _roots([('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
-    ['.', '.', '.']
+    This calculates the roots and directories exactly matching the patterns and
+    returns a tuple of (roots, dirs) for each. It does not return other
+    directories which may also need to be considered, like the parent
+    directories.
     '''
     r = []
+    d = []
     for kind, pat, source in kindpats:
         if kind == 'glob': # find the non-glob prefix
             root = []
@@ -630,13 +652,63 @@
             r.append('/'.join(root) or '.')
         elif kind in ('relpath', 'path'):
             r.append(pat or '.')
+        elif kind in ('rootfilesin',):
+            d.append(pat or '.')
         else: # relglob, re, relre
             r.append('.')
-    return r
+    return r, d
+
+def _roots(kindpats):
+    '''Returns root directories to match recursively from the given patterns.'''
+    roots, dirs = _patternrootsanddirs(kindpats)
+    return roots
+
+def _rootsanddirs(kindpats):
+    '''Returns roots and exact directories from patterns.
+
+    roots are directories to match recursively, whereas exact directories should
+    be matched non-recursively. The returned (roots, dirs) tuple will also
+    include directories that need to be implicitly considered as either, such as
+    parent directories.
+
+    >>> _rootsanddirs(\
+        [('glob', 'g/h/*', ''), ('glob', 'g/h', ''), ('glob', 'g*', '')])
+    (['g/h', 'g/h', '.'], ['g'])
+    >>> _rootsanddirs(\
+        [('rootfilesin', 'g/h', ''), ('rootfilesin', '', '')])
+    ([], ['g/h', '.', 'g'])
+    >>> _rootsanddirs(\
+        [('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
+    (['r', 'p/p', '.'], ['p'])
+    >>> _rootsanddirs(\
+        [('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
+    (['.', '.', '.'], [])
+    '''
+    r, d = _patternrootsanddirs(kindpats)
+
+    # Append the parents as non-recursive/exact directories, since they must be
+    # scanned to get to either the roots or the other exact directories.
+    d.extend(util.dirs(d))
+    d.extend(util.dirs(r))
+
+    return r, d
+
+def _explicitfiles(kindpats):
+    '''Returns the potential explicit filenames from the patterns.
+
+    >>> _explicitfiles([('path', 'foo/bar', '')])
+    ['foo/bar']
+    >>> _explicitfiles([('rootfilesin', 'foo/bar', '')])
+    []
+    '''
+    # Keep only the pattern kinds where one can specify filenames (vs only
+    # directory names).
+    filable = [kp for kp in kindpats if kp[0] not in ('rootfilesin',)]
+    return _roots(filable)
 
 def _anypats(kindpats):
     for kind, pat, source in kindpats:
-        if kind in ('glob', 're', 'relglob', 'relre', 'set'):
+        if kind in ('glob', 're', 'relglob', 'relre', 'set', 'rootfilesin'):
             return True
 
 _commentre = None
--- a/mercurial/merge.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/merge.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1448,7 +1448,7 @@
     """
     Perform a merge between the working directory and the given node
 
-    node = the node to update to, or None if unspecified
+    node = the node to update to
     branchmerge = whether to merge between branches
     force = whether to force branch merging or file overwriting
     matcher = a matcher to filter file lists (dirstate not updated)
@@ -1491,7 +1491,9 @@
     Return the same tuple as applyupdates().
     """
 
-    onode = node
+    # This functon used to find the default destination if node was None, but
+    # that's now in destutil.py.
+    assert node is not None
     # If we're doing a partial update, we need to skip updating
     # the dirstate, so make a note of any partial-ness to the
     # update here.
@@ -1550,37 +1552,30 @@
 
             if pas not in ([p1], [p2]):  # nonlinear
                 dirty = wc.dirty(missing=True)
-                if dirty or onode is None:
+                if dirty:
                     # Branching is a bit strange to ensure we do the minimal
-                    # amount of call to obsolete.background.
+                    # amount of call to obsolete.foreground.
                     foreground = obsolete.foreground(repo, [p1.node()])
                     # note: the <node> variable contains a random identifier
                     if repo[node].node() in foreground:
-                        pas = [p1]  # allow updating to successors
-                    elif dirty:
+                        pass # allow updating to successors
+                    else:
                         msg = _("uncommitted changes")
-                        if onode is None:
-                            hint = _("commit and merge, or update --clean to"
-                                     " discard changes")
-                        else:
-                            hint = _("commit or update --clean to discard"
-                                     " changes")
-                        raise error.Abort(msg, hint=hint)
-                    else:  # node is none
-                        msg = _("not a linear update")
-                        hint = _("merge or update --check to force update")
-                        raise error.Abort(msg, hint=hint)
+                        hint = _("commit or update --clean to discard changes")
+                        raise error.UpdateAbort(msg, hint=hint)
                 else:
                     # Allow jumping branches if clean and specific rev given
-                    pas = [p1]
+                    pass
+
+        if overwrite:
+            pas = [wc]
+        elif not branchmerge:
+            pas = [p1]
 
         # deprecated config: merge.followcopies
         followcopies = repo.ui.configbool('merge', 'followcopies', True)
         if overwrite:
-            pas = [wc]
             followcopies = False
-        elif pas == [p2]: # backwards
-            pas = [p1]
         elif not pas[0]:
             followcopies = False
         if not branchmerge and not wc.dirty(missing=True):
--- a/mercurial/obsolete.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/obsolete.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1120,7 +1120,7 @@
     """the set of obsolete revisions"""
     obs = set()
     getnode = repo.changelog.node
-    notpublic = repo.revs("not public()")
+    notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
     for r in notpublic:
         if getnode(r) in repo.obsstore.successors:
             obs.add(r)
--- a/mercurial/patch.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/patch.py	Tue Feb 28 11:13:25 2017 -0800
@@ -34,6 +34,7 @@
     mail,
     mdiff,
     pathutil,
+    pycompat,
     scmutil,
     similar,
     util,
@@ -209,7 +210,7 @@
 
     data = {}
     fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
-    tmpfp = os.fdopen(fd, 'w')
+    tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
     try:
         msg = email.Parser.Parser().parse(fileobj)
 
@@ -1055,7 +1056,7 @@
                 ncpatchfp = None
                 try:
                     # Write the initial patch
-                    f = os.fdopen(patchfd, "w")
+                    f = os.fdopen(patchfd, pycompat.sysstr("w"))
                     chunk.header.write(f)
                     chunk.write(f)
                     f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
--- a/mercurial/phases.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/phases.py	Tue Feb 28 11:13:25 2017 -0800
@@ -113,8 +113,9 @@
     short,
 )
 from . import (
-    encoding,
     error,
+    smartset,
+    txnutil,
 )
 
 allphases = public, draft, secret = range(3)
@@ -136,15 +137,7 @@
     dirty = False
     roots = [set() for i in allphases]
     try:
-        f = None
-        if 'HG_PENDING' in encoding.environ:
-            try:
-                f = repo.svfs('phaseroots.pending')
-            except IOError as inst:
-                if inst.errno != errno.ENOENT:
-                    raise
-        if f is None:
-            f = repo.svfs('phaseroots')
+        f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots')
         try:
             for line in f:
                 phase, nh = line.split()
@@ -170,6 +163,27 @@
             self.filterunknown(repo)
             self.opener = repo.svfs
 
+    def getrevset(self, repo, phases):
+        """return a smartset for the given phases"""
+        self.loadphaserevs(repo) # ensure phase's sets are loaded
+
+        if self._phasesets and all(self._phasesets[p] is not None
+                                   for p in phases):
+            # fast path - use _phasesets
+            revs = self._phasesets[phases[0]]
+            if len(phases) > 1:
+                revs = revs.copy() # only copy when needed
+                for p in phases[1:]:
+                    revs.update(self._phasesets[p])
+            if repo.changelog.filteredrevs:
+                revs = revs - repo.changelog.filteredrevs
+            return smartset.baseset(revs)
+        else:
+            # slow path - enumerate all revisions
+            phase = self.phase
+            revs = (r for r in repo if phase(repo, r) in phases)
+            return smartset.generatorset(revs, iterasc=True)
+
     def copy(self):
         # Shallow copy meant to ensure isolation in
         # advance/retractboundary(), nothing more.
--- a/mercurial/profiling.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/profiling.py	Tue Feb 28 11:13:25 2017 -0800
@@ -8,7 +8,6 @@
 from __future__ import absolute_import, print_function
 
 import contextlib
-import time
 
 from .i18n import _
 from . import (
@@ -66,7 +65,7 @@
     collapse_recursion = True
     thread = flamegraph.ProfileThread(fp, 1.0 / freq,
                                       filter_, collapse_recursion)
-    start_time = time.clock()
+    start_time = util.timer()
     try:
         thread.start()
         yield
@@ -74,7 +73,7 @@
         thread.stop()
         thread.join()
         print('Collected %d stack frames (%d unique) in %2.2f seconds.' % (
-            time.clock() - start_time, thread.num_frames(),
+            util.timer() - start_time, thread.num_frames(),
             thread.num_frames(unique=True)))
 
 @contextlib.contextmanager
@@ -103,6 +102,7 @@
             'bymethod': statprof.DisplayFormats.ByMethod,
             'hotpath': statprof.DisplayFormats.Hotpath,
             'json': statprof.DisplayFormats.Json,
+            'chrome': statprof.DisplayFormats.Chrome,
         }
 
         if profformat in formats:
@@ -111,7 +111,23 @@
             ui.warn(_('unknown profiler output format: %s\n') % profformat)
             displayformat = statprof.DisplayFormats.Hotpath
 
-        statprof.display(fp, data=data, format=displayformat)
+        kwargs = {}
+
+        def fraction(s):
+            if s.endswith('%'):
+                v = float(s[:-1]) / 100
+            else:
+                v = float(s)
+            if 0 <= v <= 1:
+                return v
+            raise ValueError(s)
+
+        if profformat == 'chrome':
+            showmin = ui.configwith(fraction, 'profiling', 'showmin', 0.005)
+            showmax = ui.configwith(fraction, 'profiling', 'showmax', 0.999)
+            kwargs.update(minthreshold=showmin, maxthreshold=showmax)
+
+        statprof.display(fp, data=data, format=displayformat, **kwargs)
 
 @contextlib.contextmanager
 def profile(ui):
--- a/mercurial/pure/osutil.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/pure/osutil.py	Tue Feb 28 11:13:25 2017 -0800
@@ -338,7 +338,7 @@
                 _kernel32.CloseHandle(fh)
                 _raiseioerror(name)
 
-            f = os.fdopen(fd, mode, bufsize)
+            f = os.fdopen(fd, pycompat.sysstr(mode), bufsize)
             # unfortunately, f.name is '<fdopen>' at this point -- so we store
             # the name on this wrapper. We cannot just assign to f.name,
             # because that attribute is read-only.
--- a/mercurial/repair.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/repair.py	Tue Feb 28 11:13:25 2017 -0800
@@ -12,7 +12,6 @@
 import hashlib
 import stat
 import tempfile
-import time
 
 from .i18n import _
 from .node import short
@@ -905,10 +904,10 @@
     # the operation nearly instantaneous and atomic (at least in well-behaved
     # environments).
     ui.write(_('replacing store...\n'))
-    tstart = time.time()
+    tstart = util.timer()
     util.rename(srcrepo.spath, backupvfs.join('store'))
     util.rename(dstrepo.spath, srcrepo.spath)
-    elapsed = time.time() - tstart
+    elapsed = util.timer() - tstart
     ui.write(_('store replacement complete; repository was inconsistent for '
                '%0.1fs\n') % elapsed)
 
--- a/mercurial/repoview.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/repoview.py	Tue Feb 28 11:13:25 2017 -0800
@@ -139,15 +139,13 @@
         if wlock:
             wlock.release()
 
-def tryreadcache(repo, hideable):
-    """read a cache if the cache exists and is valid, otherwise returns None."""
+def _readhiddencache(repo, cachefilename, newhash):
     hidden = fh = None
     try:
         if repo.vfs.exists(cachefile):
             fh = repo.vfs.open(cachefile, 'rb')
             version, = struct.unpack(">H", fh.read(2))
             oldhash = fh.read(20)
-            newhash = cachehash(repo, hideable)
             if (cacheversion, oldhash) == (version, newhash):
                 # cache is valid, so we can start reading the hidden revs
                 data = fh.read()
@@ -165,6 +163,11 @@
         if fh:
             fh.close()
 
+def tryreadcache(repo, hideable):
+    """read a cache if the cache exists and is valid, otherwise returns None."""
+    newhash = cachehash(repo, hideable)
+    return _readhiddencache(repo, cachefile, newhash)
+
 def computehidden(repo):
     """compute the set of hidden revision to filter
 
--- a/mercurial/revset.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/revset.py	Tue Feb 28 11:13:25 2017 -0800
@@ -9,7 +9,6 @@
 
 import heapq
 import re
-import string
 
 from .i18n import _
 from . import (
@@ -20,15 +19,34 @@
     match as matchmod,
     node,
     obsolete as obsmod,
-    parser,
     pathutil,
     phases,
-    pycompat,
     registrar,
     repoview,
+    revsetlang,
+    smartset,
     util,
 )
 
+# helpers for processing parsed tree
+getsymbol = revsetlang.getsymbol
+getstring = revsetlang.getstring
+getinteger = revsetlang.getinteger
+getlist = revsetlang.getlist
+getrange = revsetlang.getrange
+getargs = revsetlang.getargs
+getargsdict = revsetlang.getargsdict
+
+# constants used as an argument of match() and matchany()
+anyorder = revsetlang.anyorder
+defineorder = revsetlang.defineorder
+followorder = revsetlang.followorder
+
+baseset = smartset.baseset
+generatorset = smartset.generatorset
+spanset = smartset.spanset
+fullreposet = smartset.fullreposet
+
 def _revancestors(repo, revs, followfirst):
     """Like revlog.ancestors(), but supports followfirst."""
     if followfirst:
@@ -146,213 +164,8 @@
     revs.sort()
     return revs
 
-elements = {
-    # token-type: binding-strength, primary, prefix, infix, suffix
-    "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
-    "##": (20, None, None, ("_concat", 20), None),
-    "~": (18, None, None, ("ancestor", 18), None),
-    "^": (18, None, None, ("parent", 18), "parentpost"),
-    "-": (5, None, ("negate", 19), ("minus", 5), None),
-    "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
-    "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
-    ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
-    "not": (10, None, ("not", 10), None, None),
-    "!": (10, None, ("not", 10), None, None),
-    "and": (5, None, None, ("and", 5), None),
-    "&": (5, None, None, ("and", 5), None),
-    "%": (5, None, None, ("only", 5), "onlypost"),
-    "or": (4, None, None, ("or", 4), None),
-    "|": (4, None, None, ("or", 4), None),
-    "+": (4, None, None, ("or", 4), None),
-    "=": (3, None, None, ("keyvalue", 3), None),
-    ",": (2, None, None, ("list", 2), None),
-    ")": (0, None, None, None, None),
-    "symbol": (0, "symbol", None, None, None),
-    "string": (0, "string", None, None, None),
-    "end": (0, None, None, None, None),
-}
-
-keywords = set(['and', 'or', 'not'])
-
-# default set of valid characters for the initial letter of symbols
-_syminitletters = set(
-    string.ascii_letters +
-    string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
-
-# default set of valid characters for non-initial letters of symbols
-_symletters = _syminitletters | set(pycompat.sysstr('-/'))
-
-def tokenize(program, lookup=None, syminitletters=None, symletters=None):
-    '''
-    Parse a revset statement into a stream of tokens
-
-    ``syminitletters`` is the set of valid characters for the initial
-    letter of symbols.
-
-    By default, character ``c`` is recognized as valid for initial
-    letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
-
-    ``symletters`` is the set of valid characters for non-initial
-    letters of symbols.
-
-    By default, character ``c`` is recognized as valid for non-initial
-    letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
-
-    Check that @ is a valid unquoted token character (issue3686):
-    >>> list(tokenize("@::"))
-    [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
-
-    '''
-    if syminitletters is None:
-        syminitletters = _syminitletters
-    if symletters is None:
-        symletters = _symletters
-
-    if program and lookup:
-        # attempt to parse old-style ranges first to deal with
-        # things like old-tag which contain query metacharacters
-        parts = program.split(':', 1)
-        if all(lookup(sym) for sym in parts if sym):
-            if parts[0]:
-                yield ('symbol', parts[0], 0)
-            if len(parts) > 1:
-                s = len(parts[0])
-                yield (':', None, s)
-                if parts[1]:
-                    yield ('symbol', parts[1], s + 1)
-            yield ('end', None, len(program))
-            return
-
-    pos, l = 0, len(program)
-    while pos < l:
-        c = program[pos]
-        if c.isspace(): # skip inter-token whitespace
-            pass
-        elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
-            yield ('::', None, pos)
-            pos += 1 # skip ahead
-        elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
-            yield ('..', None, pos)
-            pos += 1 # skip ahead
-        elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
-            yield ('##', None, pos)
-            pos += 1 # skip ahead
-        elif c in "():=,-|&+!~^%": # handle simple operators
-            yield (c, None, pos)
-        elif (c in '"\'' or c == 'r' and
-              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
-            if c == 'r':
-                pos += 1
-                c = program[pos]
-                decode = lambda x: x
-            else:
-                decode = parser.unescapestr
-            pos += 1
-            s = pos
-            while pos < l: # find closing quote
-                d = program[pos]
-                if d == '\\': # skip over escaped characters
-                    pos += 2
-                    continue
-                if d == c:
-                    yield ('string', decode(program[s:pos]), s)
-                    break
-                pos += 1
-            else:
-                raise error.ParseError(_("unterminated string"), s)
-        # gather up a symbol/keyword
-        elif c in syminitletters:
-            s = pos
-            pos += 1
-            while pos < l: # find end of symbol
-                d = program[pos]
-                if d not in symletters:
-                    break
-                if d == '.' and program[pos - 1] == '.': # special case for ..
-                    pos -= 1
-                    break
-                pos += 1
-            sym = program[s:pos]
-            if sym in keywords: # operator keywords
-                yield (sym, None, s)
-            elif '-' in sym:
-                # some jerk gave us foo-bar-baz, try to check if it's a symbol
-                if lookup and lookup(sym):
-                    # looks like a real symbol
-                    yield ('symbol', sym, s)
-                else:
-                    # looks like an expression
-                    parts = sym.split('-')
-                    for p in parts[:-1]:
-                        if p: # possible consecutive -
-                            yield ('symbol', p, s)
-                        s += len(p)
-                        yield ('-', None, pos)
-                        s += 1
-                    if parts[-1]: # possible trailing -
-                        yield ('symbol', parts[-1], s)
-            else:
-                yield ('symbol', sym, s)
-            pos -= 1
-        else:
-            raise error.ParseError(_("syntax error in revset '%s'") %
-                                   program, pos)
-        pos += 1
-    yield ('end', None, pos)
-
 # helpers
 
-_notset = object()
-
-def getsymbol(x):
-    if x and x[0] == 'symbol':
-        return x[1]
-    raise error.ParseError(_('not a symbol'))
-
-def getstring(x, err):
-    if x and (x[0] == 'string' or x[0] == 'symbol'):
-        return x[1]
-    raise error.ParseError(err)
-
-def getinteger(x, err, default=_notset):
-    if not x and default is not _notset:
-        return default
-    try:
-        return int(getstring(x, err))
-    except ValueError:
-        raise error.ParseError(err)
-
-def getlist(x):
-    if not x:
-        return []
-    if x[0] == 'list':
-        return list(x[1:])
-    return [x]
-
-def getrange(x, err):
-    if not x:
-        raise error.ParseError(err)
-    op = x[0]
-    if op == 'range':
-        return x[1], x[2]
-    elif op == 'rangepre':
-        return None, x[1]
-    elif op == 'rangepost':
-        return x[1], None
-    elif op == 'rangeall':
-        return None, None
-    raise error.ParseError(err)
-
-def getargs(x, min, max, err):
-    l = getlist(x)
-    if len(l) < min or (max >= 0 and len(l) > max):
-        raise error.ParseError(err)
-    return l
-
-def getargsdict(x, funcname, keys):
-    return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
-                                keyvaluenode='keyvalue', keynode='symbol')
-
 def getset(repo, subset, x):
     if not x:
         raise error.ParseError(_("missing argument"))
@@ -501,7 +314,7 @@
 @predicate('_destupdate')
 def _destupdate(repo, subset, x):
     # experimental revset for update destination
-    args = getargsdict(x, 'limit', 'clean check')
+    args = getargsdict(x, 'limit', 'clean')
     return subset & baseset([destutil.destupdate(repo, **args)[0]])
 
 @predicate('_destmerge')
@@ -1139,7 +952,8 @@
     fromline -= 1
 
     fctx = repo[rev].filectx(fname)
-    revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
+    revs = (c.rev() for c, _linerange
+            in context.blockancestors(fctx, fromline, toline))
     return subset & generatorset(revs, iterasc=False)
 
 @predicate('all()', safe=True)
@@ -1638,19 +1452,10 @@
     ps -= set([node.nullrev])
     return subset & ps
 
-def _phase(repo, subset, target):
-    """helper to select all rev in phase <target>"""
-    repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
-    if repo._phasecache._phasesets:
-        s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
-        s = baseset(s)
-        s.sort() # set are non ordered, so we enforce ascending
-        return subset & s
-    else:
-        phase = repo._phasecache.phase
-        condition = lambda r: phase(repo, r) == target
-        return subset.filter(condition, condrepr=('<phase %r>', target),
-                             cache=False)
+def _phase(repo, subset, *targets):
+    """helper to select all rev in <targets> phases"""
+    s = repo._phasecache.getrevset(repo, targets)
+    return subset & s
 
 @predicate('draft()', safe=True)
 def draft(repo, subset, x):
@@ -1711,20 +1516,7 @@
 @predicate('_notpublic', safe=True)
 def _notpublic(repo, subset, x):
     getargs(x, 0, 0, "_notpublic takes no arguments")
-    repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
-    if repo._phasecache._phasesets:
-        s = set()
-        for u in repo._phasecache._phasesets[1:]:
-            s.update(u)
-        s = baseset(s - repo.changelog.filteredrevs)
-        s.sort()
-        return subset & s
-    else:
-        phase = repo._phasecache.phase
-        target = phases.public
-        condition = lambda r: phase(repo, r) != target
-        return subset.filter(condition, condrepr=('<phase %r>', target),
-                             cache=False)
+    return _phase(repo, subset, phases.draft, phases.secret)
 
 @predicate('public()', safe=True)
 def public(repo, subset, x):
@@ -2428,350 +2220,6 @@
     "parentpost": parentpost,
 }
 
-# Constants for ordering requirement, used in _analyze():
-#
-# If 'define', any nested functions and operations can change the ordering of
-# the entries in the set. If 'follow', any nested functions and operations
-# should take the ordering specified by the first operand to the '&' operator.
-#
-# For instance,
-#
-#   X & (Y | Z)
-#   ^   ^^^^^^^
-#   |   follow
-#   define
-#
-# will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
-# of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
-#
-# 'any' means the order doesn't matter. For instance,
-#
-#   X & !Y
-#        ^
-#        any
-#
-# 'y()' can either enforce its ordering requirement or take the ordering
-# specified by 'x()' because 'not()' doesn't care the order.
-#
-# Transition of ordering requirement:
-#
-# 1. starts with 'define'
-# 2. shifts to 'follow' by 'x & y'
-# 3. changes back to 'define' on function call 'f(x)' or function-like
-#    operation 'x (f) y' because 'f' may have its own ordering requirement
-#    for 'x' and 'y' (e.g. 'first(x)')
-#
-anyorder = 'any'        # don't care the order
-defineorder = 'define'  # should define the order
-followorder = 'follow'  # must follow the current order
-
-# transition table for 'x & y', from the current expression 'x' to 'y'
-_tofolloworder = {
-    anyorder: anyorder,
-    defineorder: followorder,
-    followorder: followorder,
-}
-
-def _matchonly(revs, bases):
-    """
-    >>> f = lambda *args: _matchonly(*map(parse, args))
-    >>> f('ancestors(A)', 'not ancestors(B)')
-    ('list', ('symbol', 'A'), ('symbol', 'B'))
-    """
-    if (revs is not None
-        and revs[0] == 'func'
-        and getsymbol(revs[1]) == 'ancestors'
-        and bases is not None
-        and bases[0] == 'not'
-        and bases[1][0] == 'func'
-        and getsymbol(bases[1][1]) == 'ancestors'):
-        return ('list', revs[2], bases[1][2])
-
-def _fixops(x):
-    """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
-    handled well by our simple top-down parser"""
-    if not isinstance(x, tuple):
-        return x
-
-    op = x[0]
-    if op == 'parent':
-        # x^:y means (x^) : y, not x ^ (:y)
-        # x^:  means (x^) :,   not x ^ (:)
-        post = ('parentpost', x[1])
-        if x[2][0] == 'dagrangepre':
-            return _fixops(('dagrange', post, x[2][1]))
-        elif x[2][0] == 'rangepre':
-            return _fixops(('range', post, x[2][1]))
-        elif x[2][0] == 'rangeall':
-            return _fixops(('rangepost', post))
-    elif op == 'or':
-        # make number of arguments deterministic:
-        # x + y + z -> (or x y z) -> (or (list x y z))
-        return (op, _fixops(('list',) + x[1:]))
-
-    return (op,) + tuple(_fixops(y) for y in x[1:])
-
-def _analyze(x, order):
-    if x is None:
-        return x
-
-    op = x[0]
-    if op == 'minus':
-        return _analyze(('and', x[1], ('not', x[2])), order)
-    elif op == 'only':
-        t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
-        return _analyze(t, order)
-    elif op == 'onlypost':
-        return _analyze(('func', ('symbol', 'only'), x[1]), order)
-    elif op == 'dagrangepre':
-        return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
-    elif op == 'dagrangepost':
-        return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
-    elif op == 'negate':
-        s = getstring(x[1], _("can't negate that"))
-        return _analyze(('string', '-' + s), order)
-    elif op in ('string', 'symbol'):
-        return x
-    elif op == 'and':
-        ta = _analyze(x[1], order)
-        tb = _analyze(x[2], _tofolloworder[order])
-        return (op, ta, tb, order)
-    elif op == 'or':
-        return (op, _analyze(x[1], order), order)
-    elif op == 'not':
-        return (op, _analyze(x[1], anyorder), order)
-    elif op == 'rangeall':
-        return (op, None, order)
-    elif op in ('rangepre', 'rangepost', 'parentpost'):
-        return (op, _analyze(x[1], defineorder), order)
-    elif op == 'group':
-        return _analyze(x[1], order)
-    elif op in ('dagrange', 'range', 'parent', 'ancestor'):
-        ta = _analyze(x[1], defineorder)
-        tb = _analyze(x[2], defineorder)
-        return (op, ta, tb, order)
-    elif op == 'list':
-        return (op,) + tuple(_analyze(y, order) for y in x[1:])
-    elif op == 'keyvalue':
-        return (op, x[1], _analyze(x[2], order))
-    elif op == 'func':
-        f = getsymbol(x[1])
-        d = defineorder
-        if f == 'present':
-            # 'present(set)' is known to return the argument set with no
-            # modification, so forward the current order to its argument
-            d = order
-        return (op, x[1], _analyze(x[2], d), order)
-    raise ValueError('invalid operator %r' % op)
-
-def analyze(x, order=defineorder):
-    """Transform raw parsed tree to evaluatable tree which can be fed to
-    optimize() or getset()
-
-    All pseudo operations should be mapped to real operations or functions
-    defined in methods or symbols table respectively.
-
-    'order' specifies how the current expression 'x' is ordered (see the
-    constants defined above.)
-    """
-    return _analyze(x, order)
-
-def _optimize(x, small):
-    if x is None:
-        return 0, x
-
-    smallbonus = 1
-    if small:
-        smallbonus = .5
-
-    op = x[0]
-    if op in ('string', 'symbol'):
-        return smallbonus, x # single revisions are small
-    elif op == 'and':
-        wa, ta = _optimize(x[1], True)
-        wb, tb = _optimize(x[2], True)
-        order = x[3]
-        w = min(wa, wb)
-
-        # (::x and not ::y)/(not ::y and ::x) have a fast path
-        tm = _matchonly(ta, tb) or _matchonly(tb, ta)
-        if tm:
-            return w, ('func', ('symbol', 'only'), tm, order)
-
-        if tb is not None and tb[0] == 'not':
-            return wa, ('difference', ta, tb[1], order)
-
-        if wa > wb:
-            return w, (op, tb, ta, order)
-        return w, (op, ta, tb, order)
-    elif op == 'or':
-        # fast path for machine-generated expression, that is likely to have
-        # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
-        order = x[2]
-        ws, ts, ss = [], [], []
-        def flushss():
-            if not ss:
-                return
-            if len(ss) == 1:
-                w, t = ss[0]
-            else:
-                s = '\0'.join(t[1] for w, t in ss)
-                y = ('func', ('symbol', '_list'), ('string', s), order)
-                w, t = _optimize(y, False)
-            ws.append(w)
-            ts.append(t)
-            del ss[:]
-        for y in getlist(x[1]):
-            w, t = _optimize(y, False)
-            if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
-                ss.append((w, t))
-                continue
-            flushss()
-            ws.append(w)
-            ts.append(t)
-        flushss()
-        if len(ts) == 1:
-            return ws[0], ts[0] # 'or' operation is fully optimized out
-        # we can't reorder trees by weight because it would change the order.
-        # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
-        #   ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
-        return max(ws), (op, ('list',) + tuple(ts), order)
-    elif op == 'not':
-        # Optimize not public() to _notpublic() because we have a fast version
-        if x[1][:3] == ('func', ('symbol', 'public'), None):
-            order = x[1][3]
-            newsym = ('func', ('symbol', '_notpublic'), None, order)
-            o = _optimize(newsym, not small)
-            return o[0], o[1]
-        else:
-            o = _optimize(x[1], not small)
-            order = x[2]
-            return o[0], (op, o[1], order)
-    elif op == 'rangeall':
-        return smallbonus, x
-    elif op in ('rangepre', 'rangepost', 'parentpost'):
-        o = _optimize(x[1], small)
-        order = x[2]
-        return o[0], (op, o[1], order)
-    elif op in ('dagrange', 'range', 'parent', 'ancestor'):
-        wa, ta = _optimize(x[1], small)
-        wb, tb = _optimize(x[2], small)
-        order = x[3]
-        return wa + wb, (op, ta, tb, order)
-    elif op == 'list':
-        ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
-        return sum(ws), (op,) + ts
-    elif op == 'keyvalue':
-        w, t = _optimize(x[2], small)
-        return w, (op, x[1], t)
-    elif op == 'func':
-        f = getsymbol(x[1])
-        wa, ta = _optimize(x[2], small)
-        if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
-                 'keyword', 'outgoing', 'user', 'destination'):
-            w = 10 # slow
-        elif f in ('modifies', 'adds', 'removes'):
-            w = 30 # slower
-        elif f == "contains":
-            w = 100 # very slow
-        elif f == "ancestor":
-            w = 1 * smallbonus
-        elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
-            w = 0
-        elif f == "sort":
-            w = 10 # assume most sorts look at changelog
-        else:
-            w = 1
-        order = x[3]
-        return w + wa, (op, x[1], ta, order)
-    raise ValueError('invalid operator %r' % op)
-
-def optimize(tree):
-    """Optimize evaluatable tree
-
-    All pseudo operations should be transformed beforehand.
-    """
-    _weight, newtree = _optimize(tree, small=True)
-    return newtree
-
-# the set of valid characters for the initial letter of symbols in
-# alias declarations and definitions
-_aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
-
-def _parsewith(spec, lookup=None, syminitletters=None):
-    """Generate a parse tree of given spec with given tokenizing options
-
-    >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
-    ('func', ('symbol', 'foo'), ('symbol', '$1'))
-    >>> _parsewith('$1')
-    Traceback (most recent call last):
-      ...
-    ParseError: ("syntax error in revset '$1'", 0)
-    >>> _parsewith('foo bar')
-    Traceback (most recent call last):
-      ...
-    ParseError: ('invalid token', 4)
-    """
-    p = parser.parser(elements)
-    tree, pos = p.parse(tokenize(spec, lookup=lookup,
-                                 syminitletters=syminitletters))
-    if pos != len(spec):
-        raise error.ParseError(_('invalid token'), pos)
-    return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
-
-class _aliasrules(parser.basealiasrules):
-    """Parsing and expansion rule set of revset aliases"""
-    _section = _('revset alias')
-
-    @staticmethod
-    def _parse(spec):
-        """Parse alias declaration/definition ``spec``
-
-        This allows symbol names to use also ``$`` as an initial letter
-        (for backward compatibility), and callers of this function should
-        examine whether ``$`` is used also for unexpected symbols or not.
-        """
-        return _parsewith(spec, syminitletters=_aliassyminitletters)
-
-    @staticmethod
-    def _trygetfunc(tree):
-        if tree[0] == 'func' and tree[1][0] == 'symbol':
-            return tree[1][1], getlist(tree[2])
-
-def expandaliases(ui, tree):
-    aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
-    tree = _aliasrules.expand(aliases, tree)
-    # warn about problematic (but not referred) aliases
-    for name, alias in sorted(aliases.iteritems()):
-        if alias.error and not alias.warned:
-            ui.warn(_('warning: %s\n') % (alias.error))
-            alias.warned = True
-    return tree
-
-def foldconcat(tree):
-    """Fold elements to be concatenated by `##`
-    """
-    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
-        return tree
-    if tree[0] == '_concat':
-        pending = [tree]
-        l = []
-        while pending:
-            e = pending.pop()
-            if e[0] == '_concat':
-                pending.extend(reversed(e[1:]))
-            elif e[0] in ('string', 'symbol'):
-                l.append(e[1])
-            else:
-                msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
-                raise error.ParseError(msg)
-        return ('string', ''.join(l))
-    else:
-        return tuple(foldconcat(t) for t in tree)
-
-def parse(spec, lookup=None):
-    return _parsewith(spec, lookup=lookup)
-
 def posttreebuilthook(tree, repo):
     # hook for extensions to execute code on the optimized tree
     pass
@@ -2801,15 +2249,16 @@
     if repo:
         lookup = repo.__contains__
     if len(specs) == 1:
-        tree = parse(specs[0], lookup)
+        tree = revsetlang.parse(specs[0], lookup)
     else:
-        tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
+        tree = ('or',
+                ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
 
     if ui:
-        tree = expandaliases(ui, tree)
-    tree = foldconcat(tree)
-    tree = analyze(tree, order)
-    tree = optimize(tree)
+        tree = revsetlang.expandaliases(ui, tree)
+    tree = revsetlang.foldconcat(tree)
+    tree = revsetlang.analyze(tree, order)
+    tree = revsetlang.optimize(tree)
     posttreebuilthook(tree, repo)
     return makematcher(tree)
 
@@ -2825,1082 +2274,6 @@
         return result
     return mfunc
 
-def formatspec(expr, *args):
-    '''
-    This is a convenience function for using revsets internally, and
-    escapes arguments appropriately. Aliases are intentionally ignored
-    so that intended expression behavior isn't accidentally subverted.
-
-    Supported arguments:
-
-    %r = revset expression, parenthesized
-    %d = int(arg), no quoting
-    %s = string(arg), escaped and single-quoted
-    %b = arg.branch(), escaped and single-quoted
-    %n = hex(arg), single-quoted
-    %% = a literal '%'
-
-    Prefixing the type with 'l' specifies a parenthesized list of that type.
-
-    >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
-    '(10 or 11):: and ((this()) or (that()))'
-    >>> formatspec('%d:: and not %d::', 10, 20)
-    '10:: and not 20::'
-    >>> formatspec('%ld or %ld', [], [1])
-    "_list('') or 1"
-    >>> formatspec('keyword(%s)', 'foo\\xe9')
-    "keyword('foo\\\\xe9')"
-    >>> b = lambda: 'default'
-    >>> b.branch = b
-    >>> formatspec('branch(%b)', b)
-    "branch('default')"
-    >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
-    "root(_list('a\\x00b\\x00c\\x00d'))"
-    '''
-
-    def quote(s):
-        return repr(str(s))
-
-    def argtype(c, arg):
-        if c == 'd':
-            return str(int(arg))
-        elif c == 's':
-            return quote(arg)
-        elif c == 'r':
-            parse(arg) # make sure syntax errors are confined
-            return '(%s)' % arg
-        elif c == 'n':
-            return quote(node.hex(arg))
-        elif c == 'b':
-            return quote(arg.branch())
-
-    def listexp(s, t):
-        l = len(s)
-        if l == 0:
-            return "_list('')"
-        elif l == 1:
-            return argtype(t, s[0])
-        elif t == 'd':
-            return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
-        elif t == 's':
-            return "_list('%s')" % "\0".join(s)
-        elif t == 'n':
-            return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
-        elif t == 'b':
-            return "_list('%s')" % "\0".join(a.branch() for a in s)
-
-        m = l // 2
-        return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
-
-    ret = ''
-    pos = 0
-    arg = 0
-    while pos < len(expr):
-        c = expr[pos]
-        if c == '%':
-            pos += 1
-            d = expr[pos]
-            if d == '%':
-                ret += d
-            elif d in 'dsnbr':
-                ret += argtype(d, args[arg])
-                arg += 1
-            elif d == 'l':
-                # a list of some type
-                pos += 1
-                d = expr[pos]
-                ret += listexp(list(args[arg]), d)
-                arg += 1
-            else:
-                raise error.Abort(_('unexpected revspec format character %s')
-                                  % d)
-        else:
-            ret += c
-        pos += 1
-
-    return ret
-
-def prettyformat(tree):
-    return parser.prettyformat(tree, ('string', 'symbol'))
-
-def depth(tree):
-    if isinstance(tree, tuple):
-        return max(map(depth, tree)) + 1
-    else:
-        return 0
-
-def funcsused(tree):
-    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
-        return set()
-    else:
-        funcs = set()
-        for s in tree[1:]:
-            funcs |= funcsused(s)
-        if tree[0] == 'func':
-            funcs.add(tree[1][1])
-        return funcs
-
-def _formatsetrepr(r):
-    """Format an optional printable representation of a set
-
-    ========  =================================
-    type(r)   example
-    ========  =================================
-    tuple     ('<not %r>', other)
-    str       '<branch closed>'
-    callable  lambda: '<branch %r>' % sorted(b)
-    object    other
-    ========  =================================
-    """
-    if r is None:
-        return ''
-    elif isinstance(r, tuple):
-        return r[0] % r[1:]
-    elif isinstance(r, str):
-        return r
-    elif callable(r):
-        return r()
-    else:
-        return repr(r)
-
-class abstractsmartset(object):
-
-    def __nonzero__(self):
-        """True if the smartset is not empty"""
-        raise NotImplementedError()
-
-    def __contains__(self, rev):
-        """provide fast membership testing"""
-        raise NotImplementedError()
-
-    def __iter__(self):
-        """iterate the set in the order it is supposed to be iterated"""
-        raise NotImplementedError()
-
-    # Attributes containing a function to perform a fast iteration in a given
-    # direction. A smartset can have none, one, or both defined.
-    #
-    # Default value is None instead of a function returning None to avoid
-    # initializing an iterator just for testing if a fast method exists.
-    fastasc = None
-    fastdesc = None
-
-    def isascending(self):
-        """True if the set will iterate in ascending order"""
-        raise NotImplementedError()
-
-    def isdescending(self):
-        """True if the set will iterate in descending order"""
-        raise NotImplementedError()
-
-    def istopo(self):
-        """True if the set will iterate in topographical order"""
-        raise NotImplementedError()
-
-    def min(self):
-        """return the minimum element in the set"""
-        if self.fastasc is None:
-            v = min(self)
-        else:
-            for v in self.fastasc():
-                break
-            else:
-                raise ValueError('arg is an empty sequence')
-        self.min = lambda: v
-        return v
-
-    def max(self):
-        """return the maximum element in the set"""
-        if self.fastdesc is None:
-            return max(self)
-        else:
-            for v in self.fastdesc():
-                break
-            else:
-                raise ValueError('arg is an empty sequence')
-        self.max = lambda: v
-        return v
-
-    def first(self):
-        """return the first element in the set (user iteration perspective)
-
-        Return None if the set is empty"""
-        raise NotImplementedError()
-
-    def last(self):
-        """return the last element in the set (user iteration perspective)
-
-        Return None if the set is empty"""
-        raise NotImplementedError()
-
-    def __len__(self):
-        """return the length of the smartsets
-
-        This can be expensive on smartset that could be lazy otherwise."""
-        raise NotImplementedError()
-
-    def reverse(self):
-        """reverse the expected iteration order"""
-        raise NotImplementedError()
-
-    def sort(self, reverse=True):
-        """get the set to iterate in an ascending or descending order"""
-        raise NotImplementedError()
-
-    def __and__(self, other):
-        """Returns a new object with the intersection of the two collections.
-
-        This is part of the mandatory API for smartset."""
-        if isinstance(other, fullreposet):
-            return self
-        return self.filter(other.__contains__, condrepr=other, cache=False)
-
-    def __add__(self, other):
-        """Returns a new object with the union of the two collections.
-
-        This is part of the mandatory API for smartset."""
-        return addset(self, other)
-
-    def __sub__(self, other):
-        """Returns a new object with the substraction of the two collections.
-
-        This is part of the mandatory API for smartset."""
-        c = other.__contains__
-        return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
-                           cache=False)
-
-    def filter(self, condition, condrepr=None, cache=True):
-        """Returns this smartset filtered by condition as a new smartset.
-
-        `condition` is a callable which takes a revision number and returns a
-        boolean. Optional `condrepr` provides a printable representation of
-        the given `condition`.
-
-        This is part of the mandatory API for smartset."""
-        # builtin cannot be cached. but do not needs to
-        if cache and util.safehasattr(condition, 'func_code'):
-            condition = util.cachefunc(condition)
-        return filteredset(self, condition, condrepr)
-
-class baseset(abstractsmartset):
-    """Basic data structure that represents a revset and contains the basic
-    operation that it should be able to perform.
-
-    Every method in this class should be implemented by any smartset class.
-    """
-    def __init__(self, data=(), datarepr=None, istopo=False):
-        """
-        datarepr: a tuple of (format, obj, ...), a function or an object that
-                  provides a printable representation of the given data.
-        """
-        self._ascending = None
-        self._istopo = istopo
-        if not isinstance(data, list):
-            if isinstance(data, set):
-                self._set = data
-                # set has no order we pick one for stability purpose
-                self._ascending = True
-            data = list(data)
-        self._list = data
-        self._datarepr = datarepr
-
-    @util.propertycache
-    def _set(self):
-        return set(self._list)
-
-    @util.propertycache
-    def _asclist(self):
-        asclist = self._list[:]
-        asclist.sort()
-        return asclist
-
-    def __iter__(self):
-        if self._ascending is None:
-            return iter(self._list)
-        elif self._ascending:
-            return iter(self._asclist)
-        else:
-            return reversed(self._asclist)
-
-    def fastasc(self):
-        return iter(self._asclist)
-
-    def fastdesc(self):
-        return reversed(self._asclist)
-
-    @util.propertycache
-    def __contains__(self):
-        return self._set.__contains__
-
-    def __nonzero__(self):
-        return bool(self._list)
-
-    def sort(self, reverse=False):
-        self._ascending = not bool(reverse)
-        self._istopo = False
-
-    def reverse(self):
-        if self._ascending is None:
-            self._list.reverse()
-        else:
-            self._ascending = not self._ascending
-        self._istopo = False
-
-    def __len__(self):
-        return len(self._list)
-
-    def isascending(self):
-        """Returns True if the collection is ascending order, False if not.
-
-        This is part of the mandatory API for smartset."""
-        if len(self) <= 1:
-            return True
-        return self._ascending is not None and self._ascending
-
-    def isdescending(self):
-        """Returns True if the collection is descending order, False if not.
-
-        This is part of the mandatory API for smartset."""
-        if len(self) <= 1:
-            return True
-        return self._ascending is not None and not self._ascending
-
-    def istopo(self):
-        """Is the collection is in topographical order or not.
-
-        This is part of the mandatory API for smartset."""
-        if len(self) <= 1:
-            return True
-        return self._istopo
-
-    def first(self):
-        if self:
-            if self._ascending is None:
-                return self._list[0]
-            elif self._ascending:
-                return self._asclist[0]
-            else:
-                return self._asclist[-1]
-        return None
-
-    def last(self):
-        if self:
-            if self._ascending is None:
-                return self._list[-1]
-            elif self._ascending:
-                return self._asclist[-1]
-            else:
-                return self._asclist[0]
-        return None
-
-    def __repr__(self):
-        d = {None: '', False: '-', True: '+'}[self._ascending]
-        s = _formatsetrepr(self._datarepr)
-        if not s:
-            l = self._list
-            # if _list has been built from a set, it might have a different
-            # order from one python implementation to another.
-            # We fallback to the sorted version for a stable output.
-            if self._ascending is not None:
-                l = self._asclist
-            s = repr(l)
-        return '<%s%s %s>' % (type(self).__name__, d, s)
-
-class filteredset(abstractsmartset):
-    """Duck type for baseset class which iterates lazily over the revisions in
-    the subset and contains a function which tests for membership in the
-    revset
-    """
-    def __init__(self, subset, condition=lambda x: True, condrepr=None):
-        """
-        condition: a function that decide whether a revision in the subset
-                   belongs to the revset or not.
-        condrepr: a tuple of (format, obj, ...), a function or an object that
-                  provides a printable representation of the given condition.
-        """
-        self._subset = subset
-        self._condition = condition
-        self._condrepr = condrepr
-
-    def __contains__(self, x):
-        return x in self._subset and self._condition(x)
-
-    def __iter__(self):
-        return self._iterfilter(self._subset)
-
-    def _iterfilter(self, it):
-        cond = self._condition
-        for x in it:
-            if cond(x):
-                yield x
-
-    @property
-    def fastasc(self):
-        it = self._subset.fastasc
-        if it is None:
-            return None
-        return lambda: self._iterfilter(it())
-
-    @property
-    def fastdesc(self):
-        it = self._subset.fastdesc
-        if it is None:
-            return None
-        return lambda: self._iterfilter(it())
-
-    def __nonzero__(self):
-        fast = None
-        candidates = [self.fastasc if self.isascending() else None,
-                      self.fastdesc if self.isdescending() else None,
-                      self.fastasc,
-                      self.fastdesc]
-        for candidate in candidates:
-            if candidate is not None:
-                fast = candidate
-                break
-
-        if fast is not None:
-            it = fast()
-        else:
-            it = self
-
-        for r in it:
-            return True
-        return False
-
-    def __len__(self):
-        # Basic implementation to be changed in future patches.
-        # until this gets improved, we use generator expression
-        # here, since list comprehensions are free to call __len__ again
-        # causing infinite recursion
-        l = baseset(r for r in self)
-        return len(l)
-
-    def sort(self, reverse=False):
-        self._subset.sort(reverse=reverse)
-
-    def reverse(self):
-        self._subset.reverse()
-
-    def isascending(self):
-        return self._subset.isascending()
-
-    def isdescending(self):
-        return self._subset.isdescending()
-
-    def istopo(self):
-        return self._subset.istopo()
-
-    def first(self):
-        for x in self:
-            return x
-        return None
-
-    def last(self):
-        it = None
-        if self.isascending():
-            it = self.fastdesc
-        elif self.isdescending():
-            it = self.fastasc
-        if it is not None:
-            for x in it():
-                return x
-            return None #empty case
-        else:
-            x = None
-            for x in self:
-                pass
-            return x
-
-    def __repr__(self):
-        xs = [repr(self._subset)]
-        s = _formatsetrepr(self._condrepr)
-        if s:
-            xs.append(s)
-        return '<%s %s>' % (type(self).__name__, ', '.join(xs))
-
-def _iterordered(ascending, iter1, iter2):
-    """produce an ordered iteration from two iterators with the same order
-
-    The ascending is used to indicated the iteration direction.
-    """
-    choice = max
-    if ascending:
-        choice = min
-
-    val1 = None
-    val2 = None
-    try:
-        # Consume both iterators in an ordered way until one is empty
-        while True:
-            if val1 is None:
-                val1 = next(iter1)
-            if val2 is None:
-                val2 = next(iter2)
-            n = choice(val1, val2)
-            yield n
-            if val1 == n:
-                val1 = None
-            if val2 == n:
-                val2 = None
-    except StopIteration:
-        # Flush any remaining values and consume the other one
-        it = iter2
-        if val1 is not None:
-            yield val1
-            it = iter1
-        elif val2 is not None:
-            # might have been equality and both are empty
-            yield val2
-        for val in it:
-            yield val
-
-class addset(abstractsmartset):
-    """Represent the addition of two sets
-
-    Wrapper structure for lazily adding two structures without losing much
-    performance on the __contains__ method
-
-    If the ascending attribute is set, that means the two structures are
-    ordered in either an ascending or descending way. Therefore, we can add
-    them maintaining the order by iterating over both at the same time
-
-    >>> xs = baseset([0, 3, 2])
-    >>> ys = baseset([5, 2, 4])
-
-    >>> rs = addset(xs, ys)
-    >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
-    (True, True, False, True, 0, 4)
-    >>> rs = addset(xs, baseset([]))
-    >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
-    (True, True, False, 0, 2)
-    >>> rs = addset(baseset([]), baseset([]))
-    >>> bool(rs), 0 in rs, rs.first(), rs.last()
-    (False, False, None, None)
-
-    iterate unsorted:
-    >>> rs = addset(xs, ys)
-    >>> # (use generator because pypy could call len())
-    >>> list(x for x in rs)  # without _genlist
-    [0, 3, 2, 5, 4]
-    >>> assert not rs._genlist
-    >>> len(rs)
-    5
-    >>> [x for x in rs]  # with _genlist
-    [0, 3, 2, 5, 4]
-    >>> assert rs._genlist
-
-    iterate ascending:
-    >>> rs = addset(xs, ys, ascending=True)
-    >>> # (use generator because pypy could call len())
-    >>> list(x for x in rs), list(x for x in rs.fastasc())  # without _asclist
-    ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
-    >>> assert not rs._asclist
-    >>> len(rs)
-    5
-    >>> [x for x in rs], [x for x in rs.fastasc()]
-    ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
-    >>> assert rs._asclist
-
-    iterate descending:
-    >>> rs = addset(xs, ys, ascending=False)
-    >>> # (use generator because pypy could call len())
-    >>> list(x for x in rs), list(x for x in rs.fastdesc())  # without _asclist
-    ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
-    >>> assert not rs._asclist
-    >>> len(rs)
-    5
-    >>> [x for x in rs], [x for x in rs.fastdesc()]
-    ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
-    >>> assert rs._asclist
-
-    iterate ascending without fastasc:
-    >>> rs = addset(xs, generatorset(ys), ascending=True)
-    >>> assert rs.fastasc is None
-    >>> [x for x in rs]
-    [0, 2, 3, 4, 5]
-
-    iterate descending without fastdesc:
-    >>> rs = addset(generatorset(xs), ys, ascending=False)
-    >>> assert rs.fastdesc is None
-    >>> [x for x in rs]
-    [5, 4, 3, 2, 0]
-    """
-    def __init__(self, revs1, revs2, ascending=None):
-        self._r1 = revs1
-        self._r2 = revs2
-        self._iter = None
-        self._ascending = ascending
-        self._genlist = None
-        self._asclist = None
-
-    def __len__(self):
-        return len(self._list)
-
-    def __nonzero__(self):
-        return bool(self._r1) or bool(self._r2)
-
-    @util.propertycache
-    def _list(self):
-        if not self._genlist:
-            self._genlist = baseset(iter(self))
-        return self._genlist
-
-    def __iter__(self):
-        """Iterate over both collections without repeating elements
-
-        If the ascending attribute is not set, iterate over the first one and
-        then over the second one checking for membership on the first one so we
-        dont yield any duplicates.
-
-        If the ascending attribute is set, iterate over both collections at the
-        same time, yielding only one value at a time in the given order.
-        """
-        if self._ascending is None:
-            if self._genlist:
-                return iter(self._genlist)
-            def arbitraryordergen():
-                for r in self._r1:
-                    yield r
-                inr1 = self._r1.__contains__
-                for r in self._r2:
-                    if not inr1(r):
-                        yield r
-            return arbitraryordergen()
-        # try to use our own fast iterator if it exists
-        self._trysetasclist()
-        if self._ascending:
-            attr = 'fastasc'
-        else:
-            attr = 'fastdesc'
-        it = getattr(self, attr)
-        if it is not None:
-            return it()
-        # maybe half of the component supports fast
-        # get iterator for _r1
-        iter1 = getattr(self._r1, attr)
-        if iter1 is None:
-            # let's avoid side effect (not sure it matters)
-            iter1 = iter(sorted(self._r1, reverse=not self._ascending))
-        else:
-            iter1 = iter1()
-        # get iterator for _r2
-        iter2 = getattr(self._r2, attr)
-        if iter2 is None:
-            # let's avoid side effect (not sure it matters)
-            iter2 = iter(sorted(self._r2, reverse=not self._ascending))
-        else:
-            iter2 = iter2()
-        return _iterordered(self._ascending, iter1, iter2)
-
-    def _trysetasclist(self):
-        """populate the _asclist attribute if possible and necessary"""
-        if self._genlist is not None and self._asclist is None:
-            self._asclist = sorted(self._genlist)
-
-    @property
-    def fastasc(self):
-        self._trysetasclist()
-        if self._asclist is not None:
-            return self._asclist.__iter__
-        iter1 = self._r1.fastasc
-        iter2 = self._r2.fastasc
-        if None in (iter1, iter2):
-            return None
-        return lambda: _iterordered(True, iter1(), iter2())
-
-    @property
-    def fastdesc(self):
-        self._trysetasclist()
-        if self._asclist is not None:
-            return self._asclist.__reversed__
-        iter1 = self._r1.fastdesc
-        iter2 = self._r2.fastdesc
-        if None in (iter1, iter2):
-            return None
-        return lambda: _iterordered(False, iter1(), iter2())
-
-    def __contains__(self, x):
-        return x in self._r1 or x in self._r2
-
-    def sort(self, reverse=False):
-        """Sort the added set
-
-        For this we use the cached list with all the generated values and if we
-        know they are ascending or descending we can sort them in a smart way.
-        """
-        self._ascending = not reverse
-
-    def isascending(self):
-        return self._ascending is not None and self._ascending
-
-    def isdescending(self):
-        return self._ascending is not None and not self._ascending
-
-    def istopo(self):
-        # not worth the trouble asserting if the two sets combined are still
-        # in topographical order. Use the sort() predicate to explicitly sort
-        # again instead.
-        return False
-
-    def reverse(self):
-        if self._ascending is None:
-            self._list.reverse()
-        else:
-            self._ascending = not self._ascending
-
-    def first(self):
-        for x in self:
-            return x
-        return None
-
-    def last(self):
-        self.reverse()
-        val = self.first()
-        self.reverse()
-        return val
-
-    def __repr__(self):
-        d = {None: '', False: '-', True: '+'}[self._ascending]
-        return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
-
-class generatorset(abstractsmartset):
-    """Wrap a generator for lazy iteration
-
-    Wrapper structure for generators that provides lazy membership and can
-    be iterated more than once.
-    When asked for membership it generates values until either it finds the
-    requested one or has gone through all the elements in the generator
-    """
-    def __init__(self, gen, iterasc=None):
-        """
-        gen: a generator producing the values for the generatorset.
-        """
-        self._gen = gen
-        self._asclist = None
-        self._cache = {}
-        self._genlist = []
-        self._finished = False
-        self._ascending = True
-        if iterasc is not None:
-            if iterasc:
-                self.fastasc = self._iterator
-                self.__contains__ = self._asccontains
-            else:
-                self.fastdesc = self._iterator
-                self.__contains__ = self._desccontains
-
-    def __nonzero__(self):
-        # Do not use 'for r in self' because it will enforce the iteration
-        # order (default ascending), possibly unrolling a whole descending
-        # iterator.
-        if self._genlist:
-            return True
-        for r in self._consumegen():
-            return True
-        return False
-
-    def __contains__(self, x):
-        if x in self._cache:
-            return self._cache[x]
-
-        # Use new values only, as existing values would be cached.
-        for l in self._consumegen():
-            if l == x:
-                return True
-
-        self._cache[x] = False
-        return False
-
-    def _asccontains(self, x):
-        """version of contains optimised for ascending generator"""
-        if x in self._cache:
-            return self._cache[x]
-
-        # Use new values only, as existing values would be cached.
-        for l in self._consumegen():
-            if l == x:
-                return True
-            if l > x:
-                break
-
-        self._cache[x] = False
-        return False
-
-    def _desccontains(self, x):
-        """version of contains optimised for descending generator"""
-        if x in self._cache:
-            return self._cache[x]
-
-        # Use new values only, as existing values would be cached.
-        for l in self._consumegen():
-            if l == x:
-                return True
-            if l < x:
-                break
-
-        self._cache[x] = False
-        return False
-
-    def __iter__(self):
-        if self._ascending:
-            it = self.fastasc
-        else:
-            it = self.fastdesc
-        if it is not None:
-            return it()
-        # we need to consume the iterator
-        for x in self._consumegen():
-            pass
-        # recall the same code
-        return iter(self)
-
-    def _iterator(self):
-        if self._finished:
-            return iter(self._genlist)
-
-        # We have to use this complex iteration strategy to allow multiple
-        # iterations at the same time. We need to be able to catch revision
-        # removed from _consumegen and added to genlist in another instance.
-        #
-        # Getting rid of it would provide an about 15% speed up on this
-        # iteration.
-        genlist = self._genlist
-        nextrev = self._consumegen().next
-        _len = len # cache global lookup
-        def gen():
-            i = 0
-            while True:
-                if i < _len(genlist):
-                    yield genlist[i]
-                else:
-                    yield nextrev()
-                i += 1
-        return gen()
-
-    def _consumegen(self):
-        cache = self._cache
-        genlist = self._genlist.append
-        for item in self._gen:
-            cache[item] = True
-            genlist(item)
-            yield item
-        if not self._finished:
-            self._finished = True
-            asc = self._genlist[:]
-            asc.sort()
-            self._asclist = asc
-            self.fastasc = asc.__iter__
-            self.fastdesc = asc.__reversed__
-
-    def __len__(self):
-        for x in self._consumegen():
-            pass
-        return len(self._genlist)
-
-    def sort(self, reverse=False):
-        self._ascending = not reverse
-
-    def reverse(self):
-        self._ascending = not self._ascending
-
-    def isascending(self):
-        return self._ascending
-
-    def isdescending(self):
-        return not self._ascending
-
-    def istopo(self):
-        # not worth the trouble asserting if the two sets combined are still
-        # in topographical order. Use the sort() predicate to explicitly sort
-        # again instead.
-        return False
-
-    def first(self):
-        if self._ascending:
-            it = self.fastasc
-        else:
-            it = self.fastdesc
-        if it is None:
-            # we need to consume all and try again
-            for x in self._consumegen():
-                pass
-            return self.first()
-        return next(it(), None)
-
-    def last(self):
-        if self._ascending:
-            it = self.fastdesc
-        else:
-            it = self.fastasc
-        if it is None:
-            # we need to consume all and try again
-            for x in self._consumegen():
-                pass
-            return self.first()
-        return next(it(), None)
-
-    def __repr__(self):
-        d = {False: '-', True: '+'}[self._ascending]
-        return '<%s%s>' % (type(self).__name__, d)
-
-class spanset(abstractsmartset):
-    """Duck type for baseset class which represents a range of revisions and
-    can work lazily and without having all the range in memory
-
-    Note that spanset(x, y) behave almost like xrange(x, y) except for two
-    notable points:
-    - when x < y it will be automatically descending,
-    - revision filtered with this repoview will be skipped.
-
-    """
-    def __init__(self, repo, start=0, end=None):
-        """
-        start: first revision included the set
-               (default to 0)
-        end:   first revision excluded (last+1)
-               (default to len(repo)
-
-        Spanset will be descending if `end` < `start`.
-        """
-        if end is None:
-            end = len(repo)
-        self._ascending = start <= end
-        if not self._ascending:
-            start, end = end + 1, start +1
-        self._start = start
-        self._end = end
-        self._hiddenrevs = repo.changelog.filteredrevs
-
-    def sort(self, reverse=False):
-        self._ascending = not reverse
-
-    def reverse(self):
-        self._ascending = not self._ascending
-
-    def istopo(self):
-        # not worth the trouble asserting if the two sets combined are still
-        # in topographical order. Use the sort() predicate to explicitly sort
-        # again instead.
-        return False
-
-    def _iterfilter(self, iterrange):
-        s = self._hiddenrevs
-        for r in iterrange:
-            if r not in s:
-                yield r
-
-    def __iter__(self):
-        if self._ascending:
-            return self.fastasc()
-        else:
-            return self.fastdesc()
-
-    def fastasc(self):
-        iterrange = xrange(self._start, self._end)
-        if self._hiddenrevs:
-            return self._iterfilter(iterrange)
-        return iter(iterrange)
-
-    def fastdesc(self):
-        iterrange = xrange(self._end - 1, self._start - 1, -1)
-        if self._hiddenrevs:
-            return self._iterfilter(iterrange)
-        return iter(iterrange)
-
-    def __contains__(self, rev):
-        hidden = self._hiddenrevs
-        return ((self._start <= rev < self._end)
-                and not (hidden and rev in hidden))
-
-    def __nonzero__(self):
-        for r in self:
-            return True
-        return False
-
-    def __len__(self):
-        if not self._hiddenrevs:
-            return abs(self._end - self._start)
-        else:
-            count = 0
-            start = self._start
-            end = self._end
-            for rev in self._hiddenrevs:
-                if (end < rev <= start) or (start <= rev < end):
-                    count += 1
-            return abs(self._end - self._start) - count
-
-    def isascending(self):
-        return self._ascending
-
-    def isdescending(self):
-        return not self._ascending
-
-    def first(self):
-        if self._ascending:
-            it = self.fastasc
-        else:
-            it = self.fastdesc
-        for x in it():
-            return x
-        return None
-
-    def last(self):
-        if self._ascending:
-            it = self.fastdesc
-        else:
-            it = self.fastasc
-        for x in it():
-            return x
-        return None
-
-    def __repr__(self):
-        d = {False: '-', True: '+'}[self._ascending]
-        return '<%s%s %d:%d>' % (type(self).__name__, d,
-                                 self._start, self._end - 1)
-
-class fullreposet(spanset):
-    """a set containing all revisions in the repo
-
-    This class exists to host special optimization and magic to handle virtual
-    revisions such as "null".
-    """
-
-    def __init__(self, repo):
-        super(fullreposet, self).__init__(repo)
-
-    def __and__(self, other):
-        """As self contains the whole repo, all of the other set should also be
-        in self. Therefore `self & other = other`.
-
-        This boldly assumes the other contains valid revs only.
-        """
-        # other not a smartset, make is so
-        if not util.safehasattr(other, 'isascending'):
-            # filter out hidden revision
-            # (this boldly assumes all smartset are pure)
-            #
-            # `other` was used with "&", let's assume this is a set like
-            # object.
-            other = baseset(other - self._hiddenrevs)
-
-        other.sort(reverse=self.isdescending())
-        return other
-
-def prettyformatset(revs):
-    lines = []
-    rs = repr(revs)
-    p = 0
-    while p < len(rs):
-        q = rs.find('<', p + 1)
-        if q < 0:
-            q = len(rs)
-        l = rs.count('<', 0, p) - rs.count('>', 0, p)
-        assert l >= 0
-        lines.append((l, rs[p:q].rstrip()))
-        p = q
-    return '\n'.join('  ' * l + s for l, s in lines)
-
 def loadpredicate(ui, extname, registrarobj):
     """Load revset predicates from specified registrarobj
     """
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revsetlang.py	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,684 @@
+# revsetlang.py - parser, tokenizer and utility for revision set language
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import string
+
+from .i18n import _
+from . import (
+    error,
+    node,
+    parser,
+    pycompat,
+)
+
+elements = {
+    # token-type: binding-strength, primary, prefix, infix, suffix
+    "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
+    "##": (20, None, None, ("_concat", 20), None),
+    "~": (18, None, None, ("ancestor", 18), None),
+    "^": (18, None, None, ("parent", 18), "parentpost"),
+    "-": (5, None, ("negate", 19), ("minus", 5), None),
+    "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
+    "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
+    ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
+    "not": (10, None, ("not", 10), None, None),
+    "!": (10, None, ("not", 10), None, None),
+    "and": (5, None, None, ("and", 5), None),
+    "&": (5, None, None, ("and", 5), None),
+    "%": (5, None, None, ("only", 5), "onlypost"),
+    "or": (4, None, None, ("or", 4), None),
+    "|": (4, None, None, ("or", 4), None),
+    "+": (4, None, None, ("or", 4), None),
+    "=": (3, None, None, ("keyvalue", 3), None),
+    ",": (2, None, None, ("list", 2), None),
+    ")": (0, None, None, None, None),
+    "symbol": (0, "symbol", None, None, None),
+    "string": (0, "string", None, None, None),
+    "end": (0, None, None, None, None),
+}
+
+keywords = set(['and', 'or', 'not'])
+
+# default set of valid characters for the initial letter of symbols
+_syminitletters = set(
+    string.ascii_letters +
+    string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
+
+# default set of valid characters for non-initial letters of symbols
+_symletters = _syminitletters | set(pycompat.sysstr('-/'))
+
+def tokenize(program, lookup=None, syminitletters=None, symletters=None):
+    '''
+    Parse a revset statement into a stream of tokens
+
+    ``syminitletters`` is the set of valid characters for the initial
+    letter of symbols.
+
+    By default, character ``c`` is recognized as valid for initial
+    letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
+
+    ``symletters`` is the set of valid characters for non-initial
+    letters of symbols.
+
+    By default, character ``c`` is recognized as valid for non-initial
+    letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
+
+    Check that @ is a valid unquoted token character (issue3686):
+    >>> list(tokenize("@::"))
+    [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
+
+    '''
+    if syminitletters is None:
+        syminitletters = _syminitletters
+    if symletters is None:
+        symletters = _symletters
+
+    if program and lookup:
+        # attempt to parse old-style ranges first to deal with
+        # things like old-tag which contain query metacharacters
+        parts = program.split(':', 1)
+        if all(lookup(sym) for sym in parts if sym):
+            if parts[0]:
+                yield ('symbol', parts[0], 0)
+            if len(parts) > 1:
+                s = len(parts[0])
+                yield (':', None, s)
+                if parts[1]:
+                    yield ('symbol', parts[1], s + 1)
+            yield ('end', None, len(program))
+            return
+
+    pos, l = 0, len(program)
+    while pos < l:
+        c = program[pos]
+        if c.isspace(): # skip inter-token whitespace
+            pass
+        elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
+            yield ('::', None, pos)
+            pos += 1 # skip ahead
+        elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
+            yield ('..', None, pos)
+            pos += 1 # skip ahead
+        elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
+            yield ('##', None, pos)
+            pos += 1 # skip ahead
+        elif c in "():=,-|&+!~^%": # handle simple operators
+            yield (c, None, pos)
+        elif (c in '"\'' or c == 'r' and
+              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
+            if c == 'r':
+                pos += 1
+                c = program[pos]
+                decode = lambda x: x
+            else:
+                decode = parser.unescapestr
+            pos += 1
+            s = pos
+            while pos < l: # find closing quote
+                d = program[pos]
+                if d == '\\': # skip over escaped characters
+                    pos += 2
+                    continue
+                if d == c:
+                    yield ('string', decode(program[s:pos]), s)
+                    break
+                pos += 1
+            else:
+                raise error.ParseError(_("unterminated string"), s)
+        # gather up a symbol/keyword
+        elif c in syminitletters:
+            s = pos
+            pos += 1
+            while pos < l: # find end of symbol
+                d = program[pos]
+                if d not in symletters:
+                    break
+                if d == '.' and program[pos - 1] == '.': # special case for ..
+                    pos -= 1
+                    break
+                pos += 1
+            sym = program[s:pos]
+            if sym in keywords: # operator keywords
+                yield (sym, None, s)
+            elif '-' in sym:
+                # some jerk gave us foo-bar-baz, try to check if it's a symbol
+                if lookup and lookup(sym):
+                    # looks like a real symbol
+                    yield ('symbol', sym, s)
+                else:
+                    # looks like an expression
+                    parts = sym.split('-')
+                    for p in parts[:-1]:
+                        if p: # possible consecutive -
+                            yield ('symbol', p, s)
+                        s += len(p)
+                        yield ('-', None, pos)
+                        s += 1
+                    if parts[-1]: # possible trailing -
+                        yield ('symbol', parts[-1], s)
+            else:
+                yield ('symbol', sym, s)
+            pos -= 1
+        else:
+            raise error.ParseError(_("syntax error in revset '%s'") %
+                                   program, pos)
+        pos += 1
+    yield ('end', None, pos)
+
+# helpers
+
+_notset = object()
+
+def getsymbol(x):
+    if x and x[0] == 'symbol':
+        return x[1]
+    raise error.ParseError(_('not a symbol'))
+
+def getstring(x, err):
+    if x and (x[0] == 'string' or x[0] == 'symbol'):
+        return x[1]
+    raise error.ParseError(err)
+
+def getinteger(x, err, default=_notset):
+    if not x and default is not _notset:
+        return default
+    try:
+        return int(getstring(x, err))
+    except ValueError:
+        raise error.ParseError(err)
+
+def getlist(x):
+    if not x:
+        return []
+    if x[0] == 'list':
+        return list(x[1:])
+    return [x]
+
+def getrange(x, err):
+    if not x:
+        raise error.ParseError(err)
+    op = x[0]
+    if op == 'range':
+        return x[1], x[2]
+    elif op == 'rangepre':
+        return None, x[1]
+    elif op == 'rangepost':
+        return x[1], None
+    elif op == 'rangeall':
+        return None, None
+    raise error.ParseError(err)
+
+def getargs(x, min, max, err):
+    l = getlist(x)
+    if len(l) < min or (max >= 0 and len(l) > max):
+        raise error.ParseError(err)
+    return l
+
+def getargsdict(x, funcname, keys):
+    return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
+                                keyvaluenode='keyvalue', keynode='symbol')
+
+# Constants for ordering requirement, used in _analyze():
+#
+# If 'define', any nested functions and operations can change the ordering of
+# the entries in the set. If 'follow', any nested functions and operations
+# should take the ordering specified by the first operand to the '&' operator.
+#
+# For instance,
+#
+#   X & (Y | Z)
+#   ^   ^^^^^^^
+#   |   follow
+#   define
+#
+# will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
+# of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
+#
+# 'any' means the order doesn't matter. For instance,
+#
+#   X & !Y
+#        ^
+#        any
+#
+# 'y()' can either enforce its ordering requirement or take the ordering
+# specified by 'x()' because 'not()' doesn't care the order.
+#
+# Transition of ordering requirement:
+#
+# 1. starts with 'define'
+# 2. shifts to 'follow' by 'x & y'
+# 3. changes back to 'define' on function call 'f(x)' or function-like
+#    operation 'x (f) y' because 'f' may have its own ordering requirement
+#    for 'x' and 'y' (e.g. 'first(x)')
+#
+anyorder = 'any'        # don't care the order
+defineorder = 'define'  # should define the order
+followorder = 'follow'  # must follow the current order
+
+# transition table for 'x & y', from the current expression 'x' to 'y'
+_tofolloworder = {
+    anyorder: anyorder,
+    defineorder: followorder,
+    followorder: followorder,
+}
+
+def _matchonly(revs, bases):
+    """
+    >>> f = lambda *args: _matchonly(*map(parse, args))
+    >>> f('ancestors(A)', 'not ancestors(B)')
+    ('list', ('symbol', 'A'), ('symbol', 'B'))
+    """
+    if (revs is not None
+        and revs[0] == 'func'
+        and getsymbol(revs[1]) == 'ancestors'
+        and bases is not None
+        and bases[0] == 'not'
+        and bases[1][0] == 'func'
+        and getsymbol(bases[1][1]) == 'ancestors'):
+        return ('list', revs[2], bases[1][2])
+
+def _fixops(x):
+    """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
+    handled well by our simple top-down parser"""
+    if not isinstance(x, tuple):
+        return x
+
+    op = x[0]
+    if op == 'parent':
+        # x^:y means (x^) : y, not x ^ (:y)
+        # x^:  means (x^) :,   not x ^ (:)
+        post = ('parentpost', x[1])
+        if x[2][0] == 'dagrangepre':
+            return _fixops(('dagrange', post, x[2][1]))
+        elif x[2][0] == 'rangepre':
+            return _fixops(('range', post, x[2][1]))
+        elif x[2][0] == 'rangeall':
+            return _fixops(('rangepost', post))
+    elif op == 'or':
+        # make number of arguments deterministic:
+        # x + y + z -> (or x y z) -> (or (list x y z))
+        return (op, _fixops(('list',) + x[1:]))
+
+    return (op,) + tuple(_fixops(y) for y in x[1:])
+
+def _analyze(x, order):
+    if x is None:
+        return x
+
+    op = x[0]
+    if op == 'minus':
+        return _analyze(('and', x[1], ('not', x[2])), order)
+    elif op == 'only':
+        t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
+        return _analyze(t, order)
+    elif op == 'onlypost':
+        return _analyze(('func', ('symbol', 'only'), x[1]), order)
+    elif op == 'dagrangepre':
+        return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
+    elif op == 'dagrangepost':
+        return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
+    elif op == 'negate':
+        s = getstring(x[1], _("can't negate that"))
+        return _analyze(('string', '-' + s), order)
+    elif op in ('string', 'symbol'):
+        return x
+    elif op == 'and':
+        ta = _analyze(x[1], order)
+        tb = _analyze(x[2], _tofolloworder[order])
+        return (op, ta, tb, order)
+    elif op == 'or':
+        return (op, _analyze(x[1], order), order)
+    elif op == 'not':
+        return (op, _analyze(x[1], anyorder), order)
+    elif op == 'rangeall':
+        return (op, None, order)
+    elif op in ('rangepre', 'rangepost', 'parentpost'):
+        return (op, _analyze(x[1], defineorder), order)
+    elif op == 'group':
+        return _analyze(x[1], order)
+    elif op in ('dagrange', 'range', 'parent', 'ancestor'):
+        ta = _analyze(x[1], defineorder)
+        tb = _analyze(x[2], defineorder)
+        return (op, ta, tb, order)
+    elif op == 'list':
+        return (op,) + tuple(_analyze(y, order) for y in x[1:])
+    elif op == 'keyvalue':
+        return (op, x[1], _analyze(x[2], order))
+    elif op == 'func':
+        f = getsymbol(x[1])
+        d = defineorder
+        if f == 'present':
+            # 'present(set)' is known to return the argument set with no
+            # modification, so forward the current order to its argument
+            d = order
+        return (op, x[1], _analyze(x[2], d), order)
+    raise ValueError('invalid operator %r' % op)
+
+def analyze(x, order=defineorder):
+    """Transform raw parsed tree to evaluatable tree which can be fed to
+    optimize() or getset()
+
+    All pseudo operations should be mapped to real operations or functions
+    defined in methods or symbols table respectively.
+
+    'order' specifies how the current expression 'x' is ordered (see the
+    constants defined above.)
+    """
+    return _analyze(x, order)
+
+def _optimize(x, small):
+    if x is None:
+        return 0, x
+
+    smallbonus = 1
+    if small:
+        smallbonus = .5
+
+    op = x[0]
+    if op in ('string', 'symbol'):
+        return smallbonus, x # single revisions are small
+    elif op == 'and':
+        wa, ta = _optimize(x[1], True)
+        wb, tb = _optimize(x[2], True)
+        order = x[3]
+        w = min(wa, wb)
+
+        # (::x and not ::y)/(not ::y and ::x) have a fast path
+        tm = _matchonly(ta, tb) or _matchonly(tb, ta)
+        if tm:
+            return w, ('func', ('symbol', 'only'), tm, order)
+
+        if tb is not None and tb[0] == 'not':
+            return wa, ('difference', ta, tb[1], order)
+
+        if wa > wb:
+            return w, (op, tb, ta, order)
+        return w, (op, ta, tb, order)
+    elif op == 'or':
+        # fast path for machine-generated expression, that is likely to have
+        # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
+        order = x[2]
+        ws, ts, ss = [], [], []
+        def flushss():
+            if not ss:
+                return
+            if len(ss) == 1:
+                w, t = ss[0]
+            else:
+                s = '\0'.join(t[1] for w, t in ss)
+                y = ('func', ('symbol', '_list'), ('string', s), order)
+                w, t = _optimize(y, False)
+            ws.append(w)
+            ts.append(t)
+            del ss[:]
+        for y in getlist(x[1]):
+            w, t = _optimize(y, False)
+            if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
+                ss.append((w, t))
+                continue
+            flushss()
+            ws.append(w)
+            ts.append(t)
+        flushss()
+        if len(ts) == 1:
+            return ws[0], ts[0] # 'or' operation is fully optimized out
+        # we can't reorder trees by weight because it would change the order.
+        # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
+        #   ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
+        return max(ws), (op, ('list',) + tuple(ts), order)
+    elif op == 'not':
+        # Optimize not public() to _notpublic() because we have a fast version
+        if x[1][:3] == ('func', ('symbol', 'public'), None):
+            order = x[1][3]
+            newsym = ('func', ('symbol', '_notpublic'), None, order)
+            o = _optimize(newsym, not small)
+            return o[0], o[1]
+        else:
+            o = _optimize(x[1], not small)
+            order = x[2]
+            return o[0], (op, o[1], order)
+    elif op == 'rangeall':
+        return smallbonus, x
+    elif op in ('rangepre', 'rangepost', 'parentpost'):
+        o = _optimize(x[1], small)
+        order = x[2]
+        return o[0], (op, o[1], order)
+    elif op in ('dagrange', 'range', 'parent', 'ancestor'):
+        wa, ta = _optimize(x[1], small)
+        wb, tb = _optimize(x[2], small)
+        order = x[3]
+        return wa + wb, (op, ta, tb, order)
+    elif op == 'list':
+        ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
+        return sum(ws), (op,) + ts
+    elif op == 'keyvalue':
+        w, t = _optimize(x[2], small)
+        return w, (op, x[1], t)
+    elif op == 'func':
+        f = getsymbol(x[1])
+        wa, ta = _optimize(x[2], small)
+        if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
+                 'keyword', 'outgoing', 'user', 'destination'):
+            w = 10 # slow
+        elif f in ('modifies', 'adds', 'removes'):
+            w = 30 # slower
+        elif f == "contains":
+            w = 100 # very slow
+        elif f == "ancestor":
+            w = 1 * smallbonus
+        elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
+            w = 0
+        elif f == "sort":
+            w = 10 # assume most sorts look at changelog
+        else:
+            w = 1
+        order = x[3]
+        return w + wa, (op, x[1], ta, order)
+    raise ValueError('invalid operator %r' % op)
+
+def optimize(tree):
+    """Optimize evaluatable tree
+
+    All pseudo operations should be transformed beforehand.
+    """
+    _weight, newtree = _optimize(tree, small=True)
+    return newtree
+
+# the set of valid characters for the initial letter of symbols in
+# alias declarations and definitions
+_aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
+
+def _parsewith(spec, lookup=None, syminitletters=None):
+    """Generate a parse tree of given spec with given tokenizing options
+
+    >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
+    ('func', ('symbol', 'foo'), ('symbol', '$1'))
+    >>> _parsewith('$1')
+    Traceback (most recent call last):
+      ...
+    ParseError: ("syntax error in revset '$1'", 0)
+    >>> _parsewith('foo bar')
+    Traceback (most recent call last):
+      ...
+    ParseError: ('invalid token', 4)
+    """
+    p = parser.parser(elements)
+    tree, pos = p.parse(tokenize(spec, lookup=lookup,
+                                 syminitletters=syminitletters))
+    if pos != len(spec):
+        raise error.ParseError(_('invalid token'), pos)
+    return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
+
+class _aliasrules(parser.basealiasrules):
+    """Parsing and expansion rule set of revset aliases"""
+    _section = _('revset alias')
+
+    @staticmethod
+    def _parse(spec):
+        """Parse alias declaration/definition ``spec``
+
+        This allows symbol names to use also ``$`` as an initial letter
+        (for backward compatibility), and callers of this function should
+        examine whether ``$`` is used also for unexpected symbols or not.
+        """
+        return _parsewith(spec, syminitletters=_aliassyminitletters)
+
+    @staticmethod
+    def _trygetfunc(tree):
+        if tree[0] == 'func' and tree[1][0] == 'symbol':
+            return tree[1][1], getlist(tree[2])
+
+def expandaliases(ui, tree):
+    aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
+    tree = _aliasrules.expand(aliases, tree)
+    # warn about problematic (but not referred) aliases
+    for name, alias in sorted(aliases.iteritems()):
+        if alias.error and not alias.warned:
+            ui.warn(_('warning: %s\n') % (alias.error))
+            alias.warned = True
+    return tree
+
+def foldconcat(tree):
+    """Fold elements to be concatenated by `##`
+    """
+    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
+        return tree
+    if tree[0] == '_concat':
+        pending = [tree]
+        l = []
+        while pending:
+            e = pending.pop()
+            if e[0] == '_concat':
+                pending.extend(reversed(e[1:]))
+            elif e[0] in ('string', 'symbol'):
+                l.append(e[1])
+            else:
+                msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
+                raise error.ParseError(msg)
+        return ('string', ''.join(l))
+    else:
+        return tuple(foldconcat(t) for t in tree)
+
+def parse(spec, lookup=None):
+    return _parsewith(spec, lookup=lookup)
+
+def formatspec(expr, *args):
+    '''
+    This is a convenience function for using revsets internally, and
+    escapes arguments appropriately. Aliases are intentionally ignored
+    so that intended expression behavior isn't accidentally subverted.
+
+    Supported arguments:
+
+    %r = revset expression, parenthesized
+    %d = int(arg), no quoting
+    %s = string(arg), escaped and single-quoted
+    %b = arg.branch(), escaped and single-quoted
+    %n = hex(arg), single-quoted
+    %% = a literal '%'
+
+    Prefixing the type with 'l' specifies a parenthesized list of that type.
+
+    >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
+    '(10 or 11):: and ((this()) or (that()))'
+    >>> formatspec('%d:: and not %d::', 10, 20)
+    '10:: and not 20::'
+    >>> formatspec('%ld or %ld', [], [1])
+    "_list('') or 1"
+    >>> formatspec('keyword(%s)', 'foo\\xe9')
+    "keyword('foo\\\\xe9')"
+    >>> b = lambda: 'default'
+    >>> b.branch = b
+    >>> formatspec('branch(%b)', b)
+    "branch('default')"
+    >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
+    "root(_list('a\\x00b\\x00c\\x00d'))"
+    '''
+
+    def quote(s):
+        return repr(str(s))
+
+    def argtype(c, arg):
+        if c == 'd':
+            return str(int(arg))
+        elif c == 's':
+            return quote(arg)
+        elif c == 'r':
+            parse(arg) # make sure syntax errors are confined
+            return '(%s)' % arg
+        elif c == 'n':
+            return quote(node.hex(arg))
+        elif c == 'b':
+            return quote(arg.branch())
+
+    def listexp(s, t):
+        l = len(s)
+        if l == 0:
+            return "_list('')"
+        elif l == 1:
+            return argtype(t, s[0])
+        elif t == 'd':
+            return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
+        elif t == 's':
+            return "_list('%s')" % "\0".join(s)
+        elif t == 'n':
+            return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
+        elif t == 'b':
+            return "_list('%s')" % "\0".join(a.branch() for a in s)
+
+        m = l // 2
+        return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
+
+    ret = ''
+    pos = 0
+    arg = 0
+    while pos < len(expr):
+        c = expr[pos]
+        if c == '%':
+            pos += 1
+            d = expr[pos]
+            if d == '%':
+                ret += d
+            elif d in 'dsnbr':
+                ret += argtype(d, args[arg])
+                arg += 1
+            elif d == 'l':
+                # a list of some type
+                pos += 1
+                d = expr[pos]
+                ret += listexp(list(args[arg]), d)
+                arg += 1
+            else:
+                raise error.Abort(_('unexpected revspec format character %s')
+                                  % d)
+        else:
+            ret += c
+        pos += 1
+
+    return ret
+
+def prettyformat(tree):
+    return parser.prettyformat(tree, ('string', 'symbol'))
+
+def depth(tree):
+    if isinstance(tree, tuple):
+        return max(map(depth, tree)) + 1
+    else:
+        return 0
+
+def funcsused(tree):
+    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
+        return set()
+    else:
+        funcs = set()
+        for s in tree[1:]:
+            funcs |= funcsused(s)
+        if tree[0] == 'func':
+            funcs.add(tree[1][1])
+        return funcs
--- a/mercurial/scmposix.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/scmposix.py	Tue Feb 28 11:13:25 2017 -0800
@@ -40,8 +40,15 @@
 def userrcpath():
     if pycompat.sysplatform == 'plan9':
         return [encoding.environ['home'] + '/lib/hgrc']
+    elif pycompat.sysplatform == 'darwin':
+        return [os.path.expanduser('~/.hgrc')]
     else:
-        return [os.path.expanduser('~/.hgrc')]
+        confighome = encoding.environ.get('XDG_CONFIG_HOME')
+        if confighome is None or not os.path.isabs(confighome):
+            confighome = os.path.expanduser('~/.config')
+
+        return [os.path.expanduser('~/.hgrc'),
+                os.path.join(confighome, 'hg', 'hgrc')]
 
 def termsize(ui):
     try:
--- a/mercurial/scmutil.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/scmutil.py	Tue Feb 28 11:13:25 2017 -0800
@@ -29,7 +29,7 @@
     pathutil,
     phases,
     pycompat,
-    revset,
+    revsetlang,
     similar,
     util,
 )
@@ -890,7 +890,7 @@
     return repo[l.last()]
 
 def _pairspec(revspec):
-    tree = revset.parse(revspec)
+    tree = revsetlang.parse(revspec)
     return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
 
 def revpair(repo, revs):
@@ -936,7 +936,7 @@
     revision numbers.
 
     It is assumed the revsets are already formatted. If you have arguments
-    that need to be expanded in the revset, call ``revset.formatspec()``
+    that need to be expanded in the revset, call ``revsetlang.formatspec()``
     and pass the result as an element of ``specs``.
 
     Specifying a single revset is allowed.
@@ -947,10 +947,9 @@
     allspecs = []
     for spec in specs:
         if isinstance(spec, int):
-            spec = revset.formatspec('rev(%d)', spec)
+            spec = revsetlang.formatspec('rev(%d)', spec)
         allspecs.append(spec)
-    m = revset.matchany(repo.ui, allspecs, repo)
-    return m(repo)
+    return repo.anyrevs(allspecs, user=True)
 
 def meaningfulparents(repo, ctx):
     """Return list of meaningful (or all if debug) parentrevs for rev.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/smartset.py	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,1056 @@
+# smartset.py - data structure for revision set
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from . import (
+    util,
+)
+
+def _formatsetrepr(r):
+    """Format an optional printable representation of a set
+
+    ========  =================================
+    type(r)   example
+    ========  =================================
+    tuple     ('<not %r>', other)
+    str       '<branch closed>'
+    callable  lambda: '<branch %r>' % sorted(b)
+    object    other
+    ========  =================================
+    """
+    if r is None:
+        return ''
+    elif isinstance(r, tuple):
+        return r[0] % r[1:]
+    elif isinstance(r, str):
+        return r
+    elif callable(r):
+        return r()
+    else:
+        return repr(r)
+
+class abstractsmartset(object):
+
+    def __nonzero__(self):
+        """True if the smartset is not empty"""
+        raise NotImplementedError()
+
+    def __contains__(self, rev):
+        """provide fast membership testing"""
+        raise NotImplementedError()
+
+    def __iter__(self):
+        """iterate the set in the order it is supposed to be iterated"""
+        raise NotImplementedError()
+
+    # Attributes containing a function to perform a fast iteration in a given
+    # direction. A smartset can have none, one, or both defined.
+    #
+    # Default value is None instead of a function returning None to avoid
+    # initializing an iterator just for testing if a fast method exists.
+    fastasc = None
+    fastdesc = None
+
+    def isascending(self):
+        """True if the set will iterate in ascending order"""
+        raise NotImplementedError()
+
+    def isdescending(self):
+        """True if the set will iterate in descending order"""
+        raise NotImplementedError()
+
+    def istopo(self):
+        """True if the set will iterate in topographical order"""
+        raise NotImplementedError()
+
+    def min(self):
+        """return the minimum element in the set"""
+        if self.fastasc is None:
+            v = min(self)
+        else:
+            for v in self.fastasc():
+                break
+            else:
+                raise ValueError('arg is an empty sequence')
+        self.min = lambda: v
+        return v
+
+    def max(self):
+        """return the maximum element in the set"""
+        if self.fastdesc is None:
+            return max(self)
+        else:
+            for v in self.fastdesc():
+                break
+            else:
+                raise ValueError('arg is an empty sequence')
+        self.max = lambda: v
+        return v
+
+    def first(self):
+        """return the first element in the set (user iteration perspective)
+
+        Return None if the set is empty"""
+        raise NotImplementedError()
+
+    def last(self):
+        """return the last element in the set (user iteration perspective)
+
+        Return None if the set is empty"""
+        raise NotImplementedError()
+
+    def __len__(self):
+        """return the length of the smartsets
+
+        This can be expensive on smartset that could be lazy otherwise."""
+        raise NotImplementedError()
+
+    def reverse(self):
+        """reverse the expected iteration order"""
+        raise NotImplementedError()
+
+    def sort(self, reverse=True):
+        """get the set to iterate in an ascending or descending order"""
+        raise NotImplementedError()
+
+    def __and__(self, other):
+        """Returns a new object with the intersection of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        if isinstance(other, fullreposet):
+            return self
+        return self.filter(other.__contains__, condrepr=other, cache=False)
+
+    def __add__(self, other):
+        """Returns a new object with the union of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        return addset(self, other)
+
+    def __sub__(self, other):
+        """Returns a new object with the substraction of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        c = other.__contains__
+        return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
+                           cache=False)
+
+    def filter(self, condition, condrepr=None, cache=True):
+        """Returns this smartset filtered by condition as a new smartset.
+
+        `condition` is a callable which takes a revision number and returns a
+        boolean. Optional `condrepr` provides a printable representation of
+        the given `condition`.
+
+        This is part of the mandatory API for smartset."""
+        # builtin cannot be cached. but do not needs to
+        if cache and util.safehasattr(condition, 'func_code'):
+            condition = util.cachefunc(condition)
+        return filteredset(self, condition, condrepr)
+
+class baseset(abstractsmartset):
+    """Basic data structure that represents a revset and contains the basic
+    operation that it should be able to perform.
+
+    Every method in this class should be implemented by any smartset class.
+
+    This class could be constructed by an (unordered) set, or an (ordered)
+    list-like object. If a set is provided, it'll be sorted lazily.
+
+    >>> x = [4, 0, 7, 6]
+    >>> y = [5, 6, 7, 3]
+
+    Construct by a set:
+    >>> xs = baseset(set(x))
+    >>> ys = baseset(set(y))
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[0, 4, 6, 7, 3, 5], [6, 7], [0, 4]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'baseset', 'baseset']
+
+    Construct by a list-like:
+    >>> xs = baseset(x)
+    >>> ys = baseset(i for i in y)
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'filteredset', 'filteredset']
+
+    Populate "_set" fields in the lists so set optimization may be used:
+    >>> [1 in xs, 3 in ys]
+    [False, True]
+
+    Without sort(), results won't be changed:
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'filteredset', 'filteredset']
+
+    With sort(), set optimization could be used:
+    >>> xs.sort(reverse=True)
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[7, 6, 4, 0, 5, 3], [7, 6], [4, 0]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'baseset', 'baseset']
+
+    >>> ys.sort()
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[7, 6, 4, 0, 3, 5], [7, 6], [4, 0]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'baseset', 'baseset']
+
+    istopo is preserved across set operations
+    >>> xs = baseset(set(x), istopo=True)
+    >>> rs = xs & ys
+    >>> type(rs).__name__
+    'baseset'
+    >>> rs._istopo
+    True
+    """
+    def __init__(self, data=(), datarepr=None, istopo=False):
+        """
+        datarepr: a tuple of (format, obj, ...), a function or an object that
+                  provides a printable representation of the given data.
+        """
+        self._ascending = None
+        self._istopo = istopo
+        if not isinstance(data, list):
+            if isinstance(data, set):
+                self._set = data
+                # set has no order we pick one for stability purpose
+                self._ascending = True
+                # converting set to list has a cost, do it lazily
+                data = None
+            else:
+                data = list(data)
+        if data is not None:
+            self._list = data
+        self._datarepr = datarepr
+
+    @util.propertycache
+    def _set(self):
+        return set(self._list)
+
+    @util.propertycache
+    def _asclist(self):
+        asclist = self._list[:]
+        asclist.sort()
+        return asclist
+
+    @util.propertycache
+    def _list(self):
+        # _list is only lazily constructed if we have _set
+        assert '_set' in self.__dict__
+        return list(self._set)
+
+    def __iter__(self):
+        if self._ascending is None:
+            return iter(self._list)
+        elif self._ascending:
+            return iter(self._asclist)
+        else:
+            return reversed(self._asclist)
+
+    def fastasc(self):
+        return iter(self._asclist)
+
+    def fastdesc(self):
+        return reversed(self._asclist)
+
+    @util.propertycache
+    def __contains__(self):
+        return self._set.__contains__
+
+    def __nonzero__(self):
+        return bool(len(self))
+
+    def sort(self, reverse=False):
+        self._ascending = not bool(reverse)
+        self._istopo = False
+
+    def reverse(self):
+        if self._ascending is None:
+            self._list.reverse()
+        else:
+            self._ascending = not self._ascending
+        self._istopo = False
+
+    def __len__(self):
+        if '_list' in self.__dict__:
+            return len(self._list)
+        else:
+            return len(self._set)
+
+    def isascending(self):
+        """Returns True if the collection is ascending order, False if not.
+
+        This is part of the mandatory API for smartset."""
+        if len(self) <= 1:
+            return True
+        return self._ascending is not None and self._ascending
+
+    def isdescending(self):
+        """Returns True if the collection is descending order, False if not.
+
+        This is part of the mandatory API for smartset."""
+        if len(self) <= 1:
+            return True
+        return self._ascending is not None and not self._ascending
+
+    def istopo(self):
+        """Is the collection is in topographical order or not.
+
+        This is part of the mandatory API for smartset."""
+        if len(self) <= 1:
+            return True
+        return self._istopo
+
+    def first(self):
+        if self:
+            if self._ascending is None:
+                return self._list[0]
+            elif self._ascending:
+                return self._asclist[0]
+            else:
+                return self._asclist[-1]
+        return None
+
+    def last(self):
+        if self:
+            if self._ascending is None:
+                return self._list[-1]
+            elif self._ascending:
+                return self._asclist[-1]
+            else:
+                return self._asclist[0]
+        return None
+
+    def _fastsetop(self, other, op):
+        # try to use native set operations as fast paths
+        if (type(other) is baseset and '_set' in other.__dict__ and '_set' in
+            self.__dict__ and self._ascending is not None):
+            s = baseset(data=getattr(self._set, op)(other._set),
+                        istopo=self._istopo)
+            s._ascending = self._ascending
+        else:
+            s = getattr(super(baseset, self), op)(other)
+        return s
+
+    def __and__(self, other):
+        return self._fastsetop(other, '__and__')
+
+    def __sub__(self, other):
+        return self._fastsetop(other, '__sub__')
+
+    def __repr__(self):
+        d = {None: '', False: '-', True: '+'}[self._ascending]
+        s = _formatsetrepr(self._datarepr)
+        if not s:
+            l = self._list
+            # if _list has been built from a set, it might have a different
+            # order from one python implementation to another.
+            # We fallback to the sorted version for a stable output.
+            if self._ascending is not None:
+                l = self._asclist
+            s = repr(l)
+        return '<%s%s %s>' % (type(self).__name__, d, s)
+
+class filteredset(abstractsmartset):
+    """Duck type for baseset class which iterates lazily over the revisions in
+    the subset and contains a function which tests for membership in the
+    revset
+    """
+    def __init__(self, subset, condition=lambda x: True, condrepr=None):
+        """
+        condition: a function that decide whether a revision in the subset
+                   belongs to the revset or not.
+        condrepr: a tuple of (format, obj, ...), a function or an object that
+                  provides a printable representation of the given condition.
+        """
+        self._subset = subset
+        self._condition = condition
+        self._condrepr = condrepr
+
+    def __contains__(self, x):
+        return x in self._subset and self._condition(x)
+
+    def __iter__(self):
+        return self._iterfilter(self._subset)
+
+    def _iterfilter(self, it):
+        cond = self._condition
+        for x in it:
+            if cond(x):
+                yield x
+
+    @property
+    def fastasc(self):
+        it = self._subset.fastasc
+        if it is None:
+            return None
+        return lambda: self._iterfilter(it())
+
+    @property
+    def fastdesc(self):
+        it = self._subset.fastdesc
+        if it is None:
+            return None
+        return lambda: self._iterfilter(it())
+
+    def __nonzero__(self):
+        fast = None
+        candidates = [self.fastasc if self.isascending() else None,
+                      self.fastdesc if self.isdescending() else None,
+                      self.fastasc,
+                      self.fastdesc]
+        for candidate in candidates:
+            if candidate is not None:
+                fast = candidate
+                break
+
+        if fast is not None:
+            it = fast()
+        else:
+            it = self
+
+        for r in it:
+            return True
+        return False
+
+    def __len__(self):
+        # Basic implementation to be changed in future patches.
+        # until this gets improved, we use generator expression
+        # here, since list comprehensions are free to call __len__ again
+        # causing infinite recursion
+        l = baseset(r for r in self)
+        return len(l)
+
+    def sort(self, reverse=False):
+        self._subset.sort(reverse=reverse)
+
+    def reverse(self):
+        self._subset.reverse()
+
+    def isascending(self):
+        return self._subset.isascending()
+
+    def isdescending(self):
+        return self._subset.isdescending()
+
+    def istopo(self):
+        return self._subset.istopo()
+
+    def first(self):
+        for x in self:
+            return x
+        return None
+
+    def last(self):
+        it = None
+        if self.isascending():
+            it = self.fastdesc
+        elif self.isdescending():
+            it = self.fastasc
+        if it is not None:
+            for x in it():
+                return x
+            return None #empty case
+        else:
+            x = None
+            for x in self:
+                pass
+            return x
+
+    def __repr__(self):
+        xs = [repr(self._subset)]
+        s = _formatsetrepr(self._condrepr)
+        if s:
+            xs.append(s)
+        return '<%s %s>' % (type(self).__name__, ', '.join(xs))
+
+def _iterordered(ascending, iter1, iter2):
+    """produce an ordered iteration from two iterators with the same order
+
+    The ascending is used to indicated the iteration direction.
+    """
+    choice = max
+    if ascending:
+        choice = min
+
+    val1 = None
+    val2 = None
+    try:
+        # Consume both iterators in an ordered way until one is empty
+        while True:
+            if val1 is None:
+                val1 = next(iter1)
+            if val2 is None:
+                val2 = next(iter2)
+            n = choice(val1, val2)
+            yield n
+            if val1 == n:
+                val1 = None
+            if val2 == n:
+                val2 = None
+    except StopIteration:
+        # Flush any remaining values and consume the other one
+        it = iter2
+        if val1 is not None:
+            yield val1
+            it = iter1
+        elif val2 is not None:
+            # might have been equality and both are empty
+            yield val2
+        for val in it:
+            yield val
+
+class addset(abstractsmartset):
+    """Represent the addition of two sets
+
+    Wrapper structure for lazily adding two structures without losing much
+    performance on the __contains__ method
+
+    If the ascending attribute is set, that means the two structures are
+    ordered in either an ascending or descending way. Therefore, we can add
+    them maintaining the order by iterating over both at the same time
+
+    >>> xs = baseset([0, 3, 2])
+    >>> ys = baseset([5, 2, 4])
+
+    >>> rs = addset(xs, ys)
+    >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
+    (True, True, False, True, 0, 4)
+    >>> rs = addset(xs, baseset([]))
+    >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
+    (True, True, False, 0, 2)
+    >>> rs = addset(baseset([]), baseset([]))
+    >>> bool(rs), 0 in rs, rs.first(), rs.last()
+    (False, False, None, None)
+
+    iterate unsorted:
+    >>> rs = addset(xs, ys)
+    >>> # (use generator because pypy could call len())
+    >>> list(x for x in rs)  # without _genlist
+    [0, 3, 2, 5, 4]
+    >>> assert not rs._genlist
+    >>> len(rs)
+    5
+    >>> [x for x in rs]  # with _genlist
+    [0, 3, 2, 5, 4]
+    >>> assert rs._genlist
+
+    iterate ascending:
+    >>> rs = addset(xs, ys, ascending=True)
+    >>> # (use generator because pypy could call len())
+    >>> list(x for x in rs), list(x for x in rs.fastasc())  # without _asclist
+    ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
+    >>> assert not rs._asclist
+    >>> len(rs)
+    5
+    >>> [x for x in rs], [x for x in rs.fastasc()]
+    ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
+    >>> assert rs._asclist
+
+    iterate descending:
+    >>> rs = addset(xs, ys, ascending=False)
+    >>> # (use generator because pypy could call len())
+    >>> list(x for x in rs), list(x for x in rs.fastdesc())  # without _asclist
+    ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
+    >>> assert not rs._asclist
+    >>> len(rs)
+    5
+    >>> [x for x in rs], [x for x in rs.fastdesc()]
+    ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
+    >>> assert rs._asclist
+
+    iterate ascending without fastasc:
+    >>> rs = addset(xs, generatorset(ys), ascending=True)
+    >>> assert rs.fastasc is None
+    >>> [x for x in rs]
+    [0, 2, 3, 4, 5]
+
+    iterate descending without fastdesc:
+    >>> rs = addset(generatorset(xs), ys, ascending=False)
+    >>> assert rs.fastdesc is None
+    >>> [x for x in rs]
+    [5, 4, 3, 2, 0]
+    """
+    def __init__(self, revs1, revs2, ascending=None):
+        self._r1 = revs1
+        self._r2 = revs2
+        self._iter = None
+        self._ascending = ascending
+        self._genlist = None
+        self._asclist = None
+
+    def __len__(self):
+        return len(self._list)
+
+    def __nonzero__(self):
+        return bool(self._r1) or bool(self._r2)
+
+    @util.propertycache
+    def _list(self):
+        if not self._genlist:
+            self._genlist = baseset(iter(self))
+        return self._genlist
+
+    def __iter__(self):
+        """Iterate over both collections without repeating elements
+
+        If the ascending attribute is not set, iterate over the first one and
+        then over the second one checking for membership on the first one so we
+        dont yield any duplicates.
+
+        If the ascending attribute is set, iterate over both collections at the
+        same time, yielding only one value at a time in the given order.
+        """
+        if self._ascending is None:
+            if self._genlist:
+                return iter(self._genlist)
+            def arbitraryordergen():
+                for r in self._r1:
+                    yield r
+                inr1 = self._r1.__contains__
+                for r in self._r2:
+                    if not inr1(r):
+                        yield r
+            return arbitraryordergen()
+        # try to use our own fast iterator if it exists
+        self._trysetasclist()
+        if self._ascending:
+            attr = 'fastasc'
+        else:
+            attr = 'fastdesc'
+        it = getattr(self, attr)
+        if it is not None:
+            return it()
+        # maybe half of the component supports fast
+        # get iterator for _r1
+        iter1 = getattr(self._r1, attr)
+        if iter1 is None:
+            # let's avoid side effect (not sure it matters)
+            iter1 = iter(sorted(self._r1, reverse=not self._ascending))
+        else:
+            iter1 = iter1()
+        # get iterator for _r2
+        iter2 = getattr(self._r2, attr)
+        if iter2 is None:
+            # let's avoid side effect (not sure it matters)
+            iter2 = iter(sorted(self._r2, reverse=not self._ascending))
+        else:
+            iter2 = iter2()
+        return _iterordered(self._ascending, iter1, iter2)
+
+    def _trysetasclist(self):
+        """populate the _asclist attribute if possible and necessary"""
+        if self._genlist is not None and self._asclist is None:
+            self._asclist = sorted(self._genlist)
+
+    @property
+    def fastasc(self):
+        self._trysetasclist()
+        if self._asclist is not None:
+            return self._asclist.__iter__
+        iter1 = self._r1.fastasc
+        iter2 = self._r2.fastasc
+        if None in (iter1, iter2):
+            return None
+        return lambda: _iterordered(True, iter1(), iter2())
+
+    @property
+    def fastdesc(self):
+        self._trysetasclist()
+        if self._asclist is not None:
+            return self._asclist.__reversed__
+        iter1 = self._r1.fastdesc
+        iter2 = self._r2.fastdesc
+        if None in (iter1, iter2):
+            return None
+        return lambda: _iterordered(False, iter1(), iter2())
+
+    def __contains__(self, x):
+        return x in self._r1 or x in self._r2
+
+    def sort(self, reverse=False):
+        """Sort the added set
+
+        For this we use the cached list with all the generated values and if we
+        know they are ascending or descending we can sort them in a smart way.
+        """
+        self._ascending = not reverse
+
+    def isascending(self):
+        return self._ascending is not None and self._ascending
+
+    def isdescending(self):
+        return self._ascending is not None and not self._ascending
+
+    def istopo(self):
+        # not worth the trouble asserting if the two sets combined are still
+        # in topographical order. Use the sort() predicate to explicitly sort
+        # again instead.
+        return False
+
+    def reverse(self):
+        if self._ascending is None:
+            self._list.reverse()
+        else:
+            self._ascending = not self._ascending
+
+    def first(self):
+        for x in self:
+            return x
+        return None
+
+    def last(self):
+        self.reverse()
+        val = self.first()
+        self.reverse()
+        return val
+
+    def __repr__(self):
+        d = {None: '', False: '-', True: '+'}[self._ascending]
+        return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
+
+class generatorset(abstractsmartset):
+    """Wrap a generator for lazy iteration
+
+    Wrapper structure for generators that provides lazy membership and can
+    be iterated more than once.
+    When asked for membership it generates values until either it finds the
+    requested one or has gone through all the elements in the generator
+    """
+    def __init__(self, gen, iterasc=None):
+        """
+        gen: a generator producing the values for the generatorset.
+        """
+        self._gen = gen
+        self._asclist = None
+        self._cache = {}
+        self._genlist = []
+        self._finished = False
+        self._ascending = True
+        if iterasc is not None:
+            if iterasc:
+                self.fastasc = self._iterator
+                self.__contains__ = self._asccontains
+            else:
+                self.fastdesc = self._iterator
+                self.__contains__ = self._desccontains
+
+    def __nonzero__(self):
+        # Do not use 'for r in self' because it will enforce the iteration
+        # order (default ascending), possibly unrolling a whole descending
+        # iterator.
+        if self._genlist:
+            return True
+        for r in self._consumegen():
+            return True
+        return False
+
+    def __contains__(self, x):
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+
+        self._cache[x] = False
+        return False
+
+    def _asccontains(self, x):
+        """version of contains optimised for ascending generator"""
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+            if l > x:
+                break
+
+        self._cache[x] = False
+        return False
+
+    def _desccontains(self, x):
+        """version of contains optimised for descending generator"""
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+            if l < x:
+                break
+
+        self._cache[x] = False
+        return False
+
+    def __iter__(self):
+        if self._ascending:
+            it = self.fastasc
+        else:
+            it = self.fastdesc
+        if it is not None:
+            return it()
+        # we need to consume the iterator
+        for x in self._consumegen():
+            pass
+        # recall the same code
+        return iter(self)
+
+    def _iterator(self):
+        if self._finished:
+            return iter(self._genlist)
+
+        # We have to use this complex iteration strategy to allow multiple
+        # iterations at the same time. We need to be able to catch revision
+        # removed from _consumegen and added to genlist in another instance.
+        #
+        # Getting rid of it would provide an about 15% speed up on this
+        # iteration.
+        genlist = self._genlist
+        nextrev = self._consumegen().next
+        _len = len # cache global lookup
+        def gen():
+            i = 0
+            while True:
+                if i < _len(genlist):
+                    yield genlist[i]
+                else:
+                    yield nextrev()
+                i += 1
+        return gen()
+
+    def _consumegen(self):
+        cache = self._cache
+        genlist = self._genlist.append
+        for item in self._gen:
+            cache[item] = True
+            genlist(item)
+            yield item
+        if not self._finished:
+            self._finished = True
+            asc = self._genlist[:]
+            asc.sort()
+            self._asclist = asc
+            self.fastasc = asc.__iter__
+            self.fastdesc = asc.__reversed__
+
+    def __len__(self):
+        for x in self._consumegen():
+            pass
+        return len(self._genlist)
+
+    def sort(self, reverse=False):
+        self._ascending = not reverse
+
+    def reverse(self):
+        self._ascending = not self._ascending
+
+    def isascending(self):
+        return self._ascending
+
+    def isdescending(self):
+        return not self._ascending
+
+    def istopo(self):
+        # not worth the trouble asserting if the two sets combined are still
+        # in topographical order. Use the sort() predicate to explicitly sort
+        # again instead.
+        return False
+
+    def first(self):
+        if self._ascending:
+            it = self.fastasc
+        else:
+            it = self.fastdesc
+        if it is None:
+            # we need to consume all and try again
+            for x in self._consumegen():
+                pass
+            return self.first()
+        return next(it(), None)
+
+    def last(self):
+        if self._ascending:
+            it = self.fastdesc
+        else:
+            it = self.fastasc
+        if it is None:
+            # we need to consume all and try again
+            for x in self._consumegen():
+                pass
+            return self.first()
+        return next(it(), None)
+
+    def __repr__(self):
+        d = {False: '-', True: '+'}[self._ascending]
+        return '<%s%s>' % (type(self).__name__, d)
+
+class spanset(abstractsmartset):
+    """Duck type for baseset class which represents a range of revisions and
+    can work lazily and without having all the range in memory
+
+    Note that spanset(x, y) behave almost like xrange(x, y) except for two
+    notable points:
+    - when x < y it will be automatically descending,
+    - revision filtered with this repoview will be skipped.
+
+    """
+    def __init__(self, repo, start=0, end=None):
+        """
+        start: first revision included the set
+               (default to 0)
+        end:   first revision excluded (last+1)
+               (default to len(repo)
+
+        Spanset will be descending if `end` < `start`.
+        """
+        if end is None:
+            end = len(repo)
+        self._ascending = start <= end
+        if not self._ascending:
+            start, end = end + 1, start +1
+        self._start = start
+        self._end = end
+        self._hiddenrevs = repo.changelog.filteredrevs
+
+    def sort(self, reverse=False):
+        self._ascending = not reverse
+
+    def reverse(self):
+        self._ascending = not self._ascending
+
+    def istopo(self):
+        # not worth the trouble asserting if the two sets combined are still
+        # in topographical order. Use the sort() predicate to explicitly sort
+        # again instead.
+        return False
+
+    def _iterfilter(self, iterrange):
+        s = self._hiddenrevs
+        for r in iterrange:
+            if r not in s:
+                yield r
+
+    def __iter__(self):
+        if self._ascending:
+            return self.fastasc()
+        else:
+            return self.fastdesc()
+
+    def fastasc(self):
+        iterrange = xrange(self._start, self._end)
+        if self._hiddenrevs:
+            return self._iterfilter(iterrange)
+        return iter(iterrange)
+
+    def fastdesc(self):
+        iterrange = xrange(self._end - 1, self._start - 1, -1)
+        if self._hiddenrevs:
+            return self._iterfilter(iterrange)
+        return iter(iterrange)
+
+    def __contains__(self, rev):
+        hidden = self._hiddenrevs
+        return ((self._start <= rev < self._end)
+                and not (hidden and rev in hidden))
+
+    def __nonzero__(self):
+        for r in self:
+            return True
+        return False
+
+    def __len__(self):
+        if not self._hiddenrevs:
+            return abs(self._end - self._start)
+        else:
+            count = 0
+            start = self._start
+            end = self._end
+            for rev in self._hiddenrevs:
+                if (end < rev <= start) or (start <= rev < end):
+                    count += 1
+            return abs(self._end - self._start) - count
+
+    def isascending(self):
+        return self._ascending
+
+    def isdescending(self):
+        return not self._ascending
+
+    def first(self):
+        if self._ascending:
+            it = self.fastasc
+        else:
+            it = self.fastdesc
+        for x in it():
+            return x
+        return None
+
+    def last(self):
+        if self._ascending:
+            it = self.fastdesc
+        else:
+            it = self.fastasc
+        for x in it():
+            return x
+        return None
+
+    def __repr__(self):
+        d = {False: '-', True: '+'}[self._ascending]
+        return '<%s%s %d:%d>' % (type(self).__name__, d,
+                                 self._start, self._end - 1)
+
+class fullreposet(spanset):
+    """a set containing all revisions in the repo
+
+    This class exists to host special optimization and magic to handle virtual
+    revisions such as "null".
+    """
+
+    def __init__(self, repo):
+        super(fullreposet, self).__init__(repo)
+
+    def __and__(self, other):
+        """As self contains the whole repo, all of the other set should also be
+        in self. Therefore `self & other = other`.
+
+        This boldly assumes the other contains valid revs only.
+        """
+        # other not a smartset, make is so
+        if not util.safehasattr(other, 'isascending'):
+            # filter out hidden revision
+            # (this boldly assumes all smartset are pure)
+            #
+            # `other` was used with "&", let's assume this is a set like
+            # object.
+            other = baseset(other - self._hiddenrevs)
+
+        other.sort(reverse=self.isdescending())
+        return other
+
+def prettyformat(revs):
+    lines = []
+    rs = repr(revs)
+    p = 0
+    while p < len(rs):
+        q = rs.find('<', p + 1)
+        if q < 0:
+            q = len(rs)
+        l = rs.count('<', 0, p) - rs.count('>', 0, p)
+        assert l >= 0
+        lines.append((l, rs[p:q].rstrip()))
+        p = q
+    return '\n'.join('  ' * l + s for l, s in lines)
--- a/mercurial/sslutil.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/sslutil.py	Tue Feb 28 11:13:25 2017 -0800
@@ -720,7 +720,8 @@
     # to load the system CA store. If we're running on Apple Python, use this
     # trick.
     if _plainapplepython():
-        dummycert = os.path.join(os.path.dirname(__file__), 'dummycert.pem')
+        dummycert = os.path.join(
+            os.path.dirname(pycompat.fsencode(__file__)), 'dummycert.pem')
         if os.path.exists(dummycert):
             return dummycert
 
--- a/mercurial/statprof.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/statprof.py	Tue Feb 28 11:13:25 2017 -0800
@@ -433,6 +433,7 @@
     Hotpath = 3
     FlameGraph = 4
     Json = 5
+    Chrome = 6
 
 def display(fp=None, format=3, data=None, **kwargs):
     '''Print statistics, either to stdout or the given file object.'''
@@ -457,10 +458,12 @@
         write_to_flame(data, fp, **kwargs)
     elif format == DisplayFormats.Json:
         write_to_json(data, fp)
+    elif format == DisplayFormats.Chrome:
+        write_to_chrome(data, fp, **kwargs)
     else:
         raise Exception("Invalid display format")
 
-    if format != DisplayFormats.Json:
+    if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
         print('---', file=fp)
         print('Sample count: %d' % len(data.samples), file=fp)
         print('Total time: %f seconds' % data.accumulated_time, file=fp)
@@ -713,6 +716,23 @@
     os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile))
     print("Written to %s" % outputfile, file=fp)
 
+_pathcache = {}
+def simplifypath(path):
+    '''Attempt to make the path to a Python module easier to read by
+    removing whatever part of the Python search path it was found
+    on.'''
+
+    if path in _pathcache:
+        return _pathcache[path]
+    hgpath = pycompat.fsencode(encoding.__file__).rsplit(os.sep, 2)[0]
+    for p in [hgpath] + sys.path:
+        prefix = p + os.sep
+        if path.startswith(prefix):
+            path = path[len(prefix):]
+            break
+    _pathcache[path] = path
+    return path
+
 def write_to_json(data, fp):
     samples = []
 
@@ -726,6 +746,102 @@
 
     print(json.dumps(samples), file=fp)
 
+def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999):
+    samples = []
+    laststack = collections.deque()
+    lastseen = collections.deque()
+
+    # The Chrome tracing format allows us to use a compact stack
+    # representation to save space. It's fiddly but worth it.
+    # We maintain a bijection between stack and ID.
+    stack2id = {}
+    id2stack = [] # will eventually be rendered
+
+    def stackid(stack):
+        if not stack:
+            return
+        if stack in stack2id:
+            return stack2id[stack]
+        parent = stackid(stack[1:])
+        myid = len(stack2id)
+        stack2id[stack] = myid
+        id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0]))
+        if parent is not None:
+            id2stack[-1].update(parent=parent)
+        return myid
+
+    def endswith(a, b):
+        return list(a)[-len(b):] == list(b)
+
+    # The sampling profiler can sample multiple times without
+    # advancing the clock, potentially causing the Chrome trace viewer
+    # to render single-pixel columns that we cannot zoom in on.  We
+    # work around this by pretending that zero-duration samples are a
+    # millisecond in length.
+
+    clamp = 0.001
+
+    # We provide knobs that by default attempt to filter out stack
+    # frames that are too noisy:
+    #
+    # * A few take almost all execution time. These are usually boring
+    #   setup functions, giving a stack that is deep but uninformative.
+    #
+    # * Numerous samples take almost no time, but introduce lots of
+    #   noisy, oft-deep "spines" into a rendered profile.
+
+    blacklist = set()
+    totaltime = data.samples[-1].time - data.samples[0].time
+    minthreshold = totaltime * minthreshold
+    maxthreshold = max(totaltime * maxthreshold, clamp)
+
+    def poplast():
+        oldsid = stackid(tuple(laststack))
+        oldcat, oldfunc = laststack.popleft()
+        oldtime, oldidx = lastseen.popleft()
+        duration = sample.time - oldtime
+        if minthreshold <= duration <= maxthreshold:
+            # ensure no zero-duration events
+            sampletime = max(oldtime + clamp, sample.time)
+            samples.append(dict(ph='E', name=oldfunc, cat=oldcat, sf=oldsid,
+                                ts=sampletime*1e6, pid=0))
+        else:
+            blacklist.add(oldidx)
+
+    # Much fiddling to synthesize correctly(ish) nested begin/end
+    # events given only stack snapshots.
+
+    for sample in data.samples:
+        tos = sample.stack[0]
+        name = tos.function
+        path = simplifypath(tos.path)
+        category = '%s:%d' % (path, tos.lineno)
+        stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno),
+                        frame.function) for frame in sample.stack))
+        qstack = collections.deque(stack)
+        if laststack == qstack:
+            continue
+        while laststack and qstack and laststack[-1] == qstack[-1]:
+            laststack.pop()
+            qstack.pop()
+        while laststack:
+            poplast()
+        for f in reversed(qstack):
+            lastseen.appendleft((sample.time, len(samples)))
+            laststack.appendleft(f)
+            path, name = f
+            sid = stackid(tuple(laststack))
+            samples.append(dict(ph='B', name=name, cat=path, ts=sample.time*1e6,
+                                sf=sid, pid=0))
+        laststack = collections.deque(stack)
+    while laststack:
+        poplast()
+    events = [s[1] for s in enumerate(samples) if s[0] not in blacklist]
+    frames = collections.OrderedDict((str(k), v)
+                                     for (k,v) in enumerate(id2stack))
+    json.dump(dict(traceEvents=events, stackFrames=frames), fp, indent=1)
+    fp.write('\n')
+
 def printusage():
     print("""
 The statprof command line allows you to inspect the last profile's results in
--- a/mercurial/store.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/store.py	Tue Feb 28 11:13:25 2017 -0800
@@ -101,7 +101,7 @@
     e = '_'
     if pycompat.ispy3:
         xchr = lambda x: bytes([x])
-        asciistr = bytes(xrange(127))
+        asciistr = [bytes(a) for a in range(127)]
     else:
         xchr = chr
         asciistr = map(chr, xrange(127))
--- a/mercurial/streamclone.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/streamclone.py	Tue Feb 28 11:13:25 2017 -0800
@@ -8,7 +8,6 @@
 from __future__ import absolute_import
 
 import struct
-import time
 
 from .i18n import _
 from . import (
@@ -297,7 +296,7 @@
                        (filecount, util.bytecount(bytecount)))
         handled_bytes = 0
         repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
-        start = time.time()
+        start = util.timer()
 
         # TODO: get rid of (potential) inconsistency
         #
@@ -340,7 +339,7 @@
             # streamclone-ed file at next access
             repo.invalidate(clearfilecache=True)
 
-        elapsed = time.time() - start
+        elapsed = util.timer() - start
         if elapsed <= 0:
             elapsed = 0.001
         repo.ui.progress(_('clone'), None)
--- a/mercurial/subrepo.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/subrepo.py	Tue Feb 28 11:13:25 2017 -0800
@@ -542,8 +542,8 @@
         """return filename iterator"""
         raise NotImplementedError
 
-    def filedata(self, name):
-        """return file data"""
+    def filedata(self, name, decode):
+        """return file data, optionally passed through repo decoders"""
         raise NotImplementedError
 
     def fileflags(self, name):
@@ -558,7 +558,7 @@
         """handle the files command for this subrepo"""
         return 1
 
-    def archive(self, archiver, prefix, match=None):
+    def archive(self, archiver, prefix, match=None, decode=True):
         if match is not None:
             files = [f for f in self.files() if match(f)]
         else:
@@ -572,7 +572,7 @@
             mode = 'x' in flags and 0o755 or 0o644
             symlink = 'l' in flags
             archiver.addfile(prefix + self._path + '/' + name,
-                             mode, symlink, self.filedata(name))
+                             mode, symlink, self.filedata(name, decode))
             self.ui.progress(_('archiving (%s)') % relpath, i + 1,
                              unit=_('files'), total=total)
         self.ui.progress(_('archiving (%s)') % relpath, None)
@@ -782,7 +782,7 @@
                           % (inst, subrelpath(self)))
 
     @annotatesubrepoerror
-    def archive(self, archiver, prefix, match=None):
+    def archive(self, archiver, prefix, match=None, decode=True):
         self._get(self._state + ('hg',))
         total = abstractsubrepo.archive(self, archiver, prefix, match)
         rev = self._state[1]
@@ -790,7 +790,8 @@
         for subpath in ctx.substate:
             s = subrepo(ctx, subpath, True)
             submatch = matchmod.subdirmatcher(subpath, match)
-            total += s.archive(archiver, prefix + self._path + '/', submatch)
+            total += s.archive(archiver, prefix + self._path + '/', submatch,
+                               decode)
         return total
 
     @annotatesubrepoerror
@@ -956,9 +957,12 @@
         ctx = self._repo[rev]
         return ctx.manifest().keys()
 
-    def filedata(self, name):
+    def filedata(self, name, decode):
         rev = self._state[1]
-        return self._repo[rev][name].data()
+        data = self._repo[rev][name].data()
+        if decode:
+            data = self._repo.wwritedata(name, data)
+        return data
 
     def fileflags(self, name):
         rev = self._state[1]
@@ -1292,7 +1296,7 @@
             paths.append(name.encode('utf-8'))
         return paths
 
-    def filedata(self, name):
+    def filedata(self, name, decode):
         return self._svncommand(['cat'], name)[0]
 
 
@@ -1410,6 +1414,10 @@
         errpipe = None
         if self.ui.quiet:
             errpipe = open(os.devnull, 'w')
+        if self.ui._colormode and len(commands) and commands[0] == "diff":
+            # insert the argument in the front,
+            # the end of git diff arguments is used for paths
+            commands.insert(1, '--color')
         p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
                              cwd=cwd, env=env, close_fds=util.closefds,
                              stdout=subprocess.PIPE, stderr=errpipe)
@@ -1772,7 +1780,7 @@
             else:
                 self.wvfs.unlink(f)
 
-    def archive(self, archiver, prefix, match=None):
+    def archive(self, archiver, prefix, match=None, decode=True):
         total = 0
         source, revision = self._state
         if not revision:
--- a/mercurial/tags.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/tags.py	Tue Feb 28 11:13:25 2017 -0800
@@ -14,7 +14,6 @@
 
 import array
 import errno
-import time
 
 from .node import (
     bin,
@@ -25,6 +24,7 @@
 from . import (
     encoding,
     error,
+    scmutil,
     util,
 )
 
@@ -278,8 +278,6 @@
     If the cache is not up to date, the caller is responsible for reading tag
     info from each returned head. (See findglobaltags().)
     '''
-    from . import scmutil  # avoid cycle
-
     try:
         cachefile = repo.vfs(_filename(repo), 'r')
         # force reading the file for static-http
@@ -344,7 +342,7 @@
         # potentially expensive search.
         return ([], {}, valid, None, True)
 
-    starttime = time.time()
+    starttime = util.timer()
 
     # Now we have to lookup the .hgtags filenode for every new head.
     # This is the most expensive part of finding tags, so performance
@@ -359,7 +357,7 @@
 
     fnodescache.write()
 
-    duration = time.time() - starttime
+    duration = util.timer() - starttime
     ui.log('tagscache',
            '%d/%d cache hits/lookups in %0.4f '
            'seconds\n',
--- a/mercurial/templater.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/templater.py	Tue Feb 28 11:13:25 2017 -0800
@@ -20,6 +20,7 @@
     pycompat,
     registrar,
     revset as revsetmod,
+    revsetlang,
     templatefilters,
     templatekw,
     util,
@@ -778,7 +779,7 @@
 
     if len(args) > 1:
         formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
-        revs = query(revsetmod.formatspec(raw, *formatargs))
+        revs = query(revsetlang.formatspec(raw, *formatargs))
         revs = list(revs)
     else:
         revsetcache = mapping['cache'].setdefault("revsetcache", {})
--- a/mercurial/templates/gitweb/filelog.tmpl	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/templates/gitweb/filelog.tmpl	Tue Feb 28 11:13:25 2017 -0800
@@ -38,6 +38,8 @@
 </table>
 
 <div class="page_nav">
+<a href="{url|urlescape}log/{symrev}/{file|urlescape}{lessvars%urlparameter}">less</a>
+<a href="{url|urlescape}log/{symrev}/{file|urlescape}{morevars%urlparameter}">more</a>
 {nav%filenav}
 </div>
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/txnutil.py	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,36 @@
+# txnutil.py - transaction related utilities
+#
+#  Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+
+from . import (
+    encoding,
+)
+
+def mayhavepending(root):
+    '''return whether 'root' may have pending changes, which are
+    visible to this process.
+    '''
+    return root == encoding.environ.get('HG_PENDING')
+
+def trypending(root, vfs, filename, **kwargs):
+    '''Open  file to be read according to HG_PENDING environment variable
+
+    This opens '.pending' of specified 'filename' only when HG_PENDING
+    is equal to 'root'.
+
+    This returns '(fp, is_pending_opened)' tuple.
+    '''
+    if mayhavepending(root):
+        try:
+            return (vfs('%s.pending' % filename, **kwargs), True)
+        except IOError as inst:
+            if inst.errno != errno.ENOENT:
+                raise
+    return (vfs(filename, **kwargs), False)
--- a/mercurial/ui.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/ui.py	Tue Feb 28 11:13:25 2017 -0800
@@ -7,13 +7,17 @@
 
 from __future__ import absolute_import
 
+import atexit
+import collections
 import contextlib
 import errno
 import getpass
 import inspect
 import os
 import re
+import signal
 import socket
+import subprocess
 import sys
 import tempfile
 import traceback
@@ -22,6 +26,7 @@
 from .node import hex
 
 from . import (
+    color,
     config,
     encoding,
     error,
@@ -34,6 +39,14 @@
 
 urlreq = util.urlreq
 
+# for use with str.translate(None, _keepalnum), to keep just alphanumerics
+if pycompat.ispy3:
+    _bytes = [bytes([c]) for c in range(256)]
+    _notalnum = [s for s in _bytes if not s.isalnum()]
+else:
+    _notalnum = [c for c in map(chr, range(256)) if not c.isalnum()]
+_keepalnum = ''.join(_notalnum)
+
 samplehgrcs = {
     'user':
 """# example user config (see 'hg help config' for more info)
@@ -94,6 +107,26 @@
 # pager =""",
 }
 
+
+class httppasswordmgrdbproxy(object):
+    """Delays loading urllib2 until it's needed."""
+    def __init__(self):
+        self._mgr = None
+
+    def _get_mgr(self):
+        if self._mgr is None:
+            self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
+        return self._mgr
+
+    def add_password(self, *args, **kwargs):
+        return self._get_mgr().add_password(*args, **kwargs)
+
+    def find_user_password(self, *args, **kwargs):
+        return self._get_mgr().find_user_password(*args, **kwargs)
+
+def _catchterm(*args):
+    raise error.SignalInterrupt
+
 class ui(object):
     def __init__(self, src=None):
         """Create a fresh new ui object if no src given
@@ -120,11 +153,19 @@
         self.callhooks = True
         # Insecure server connections requested.
         self.insecureconnections = False
+        # Blocked time
+        self.logblockedtimes = False
+        # color mode: see mercurial/color.py for possible value
+        self._colormode = None
+        self._terminfoparams = {}
+        self._styles = {}
 
         if src:
             self.fout = src.fout
             self.ferr = src.ferr
             self.fin = src.fin
+            self.pageractive = src.pageractive
+            self._disablepager = src._disablepager
 
             self._tcfg = src._tcfg.copy()
             self._ucfg = src._ucfg.copy()
@@ -134,18 +175,26 @@
             self.environ = src.environ
             self.callhooks = src.callhooks
             self.insecureconnections = src.insecureconnections
+            self._colormode = src._colormode
+            self._terminfoparams = src._terminfoparams.copy()
+            self._styles = src._styles.copy()
+
             self.fixconfig()
 
             self.httppasswordmgrdb = src.httppasswordmgrdb
+            self._blockedtimes = src._blockedtimes
         else:
             self.fout = util.stdout
             self.ferr = util.stderr
             self.fin = util.stdin
+            self.pageractive = False
+            self._disablepager = False
 
             # shared read-only environment
             self.environ = encoding.environ
 
-            self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
+            self.httppasswordmgrdb = httppasswordmgrdbproxy()
+            self._blockedtimes = collections.defaultdict(int)
 
         allowed = self.configlist('experimental', 'exportableenviron')
         if '*' in allowed:
@@ -172,7 +221,17 @@
         """Clear internal state that shouldn't persist across commands"""
         if self._progbar:
             self._progbar.resetstate()  # reset last-print time of progress bar
-        self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
+        self.httppasswordmgrdb = httppasswordmgrdbproxy()
+
+    @contextlib.contextmanager
+    def timeblockedsection(self, key):
+        # this is open-coded below - search for timeblockedsection to find them
+        starttime = util.timer()
+        try:
+            yield
+        finally:
+            self._blockedtimes[key + '_blocked'] += \
+                (util.timer() - starttime) * 1000
 
     def formatter(self, topic, opts):
         return formatter.formatter(self, topic, opts)
@@ -277,6 +336,7 @@
             self._reportuntrusted = self.debugflag or self.configbool("ui",
                 "report_untrusted", True)
             self.tracebackflag = self.configbool('ui', 'traceback', False)
+            self.logblockedtimes = self.configbool('ui', 'logblockedtimes')
 
         if section in (None, 'trusted'):
             # update trust information
@@ -402,6 +462,41 @@
                                     % (section, name, v))
         return b
 
+    def configwith(self, convert, section, name, default=None,
+                   desc=None, untrusted=False):
+        """parse a configuration element with a conversion function
+
+        >>> u = ui(); s = 'foo'
+        >>> u.setconfig(s, 'float1', '42')
+        >>> u.configwith(float, s, 'float1')
+        42.0
+        >>> u.setconfig(s, 'float2', '-4.25')
+        >>> u.configwith(float, s, 'float2')
+        -4.25
+        >>> u.configwith(float, s, 'unknown', 7)
+        7
+        >>> u.setconfig(s, 'invalid', 'somevalue')
+        >>> u.configwith(float, s, 'invalid')
+        Traceback (most recent call last):
+            ...
+        ConfigError: foo.invalid is not a valid float ('somevalue')
+        >>> u.configwith(float, s, 'invalid', desc='womble')
+        Traceback (most recent call last):
+            ...
+        ConfigError: foo.invalid is not a valid womble ('somevalue')
+        """
+
+        v = self.config(section, name, None, untrusted)
+        if v is None:
+            return default
+        try:
+            return convert(v)
+        except ValueError:
+            if desc is None:
+                desc = convert.__name__
+            raise error.ConfigError(_("%s.%s is not a valid %s ('%s')")
+                                    % (section, name, desc, v))
+
     def configint(self, section, name, default=None, untrusted=False):
         """parse a configuration element as an integer
 
@@ -418,17 +513,11 @@
         >>> u.configint(s, 'invalid')
         Traceback (most recent call last):
             ...
-        ConfigError: foo.invalid is not an integer ('somevalue')
+        ConfigError: foo.invalid is not a valid integer ('somevalue')
         """
 
-        v = self.config(section, name, None, untrusted)
-        if v is None:
-            return default
-        try:
-            return int(v)
-        except ValueError:
-            raise error.ConfigError(_("%s.%s is not an integer ('%s')")
-                                    % (section, name, v))
+        return self.configwith(int, section, name, default, 'integer',
+                               untrusted)
 
     def configbytes(self, section, name, default=0, untrusted=False):
         """parse a configuration element as a quantity in bytes
@@ -696,55 +785,176 @@
     def write(self, *args, **opts):
         '''write args to output
 
-        By default, this method simply writes to the buffer or stdout,
-        but extensions or GUI tools may override this method,
-        write_err(), popbuffer(), and label() to style output from
-        various parts of hg.
+        By default, this method simply writes to the buffer or stdout.
+        Color mode can be set on the UI class to have the output decorated
+        with color modifier before being written to stdout.
 
-        An optional keyword argument, "label", can be passed in.
-        This should be a string containing label names separated by
-        space. Label names take the form of "topic.type". For example,
-        ui.debug() issues a label of "ui.debug".
+        The color used is controlled by an optional keyword argument, "label".
+        This should be a string containing label names separated by space.
+        Label names take the form of "topic.type". For example, ui.debug()
+        issues a label of "ui.debug".
 
         When labeling output for a specific command, a label of
         "cmdname.type" is recommended. For example, status issues
         a label of "status.modified" for modified files.
         '''
         if self._buffers and not opts.get('prompt', False):
-            self._buffers[-1].extend(a for a in args)
+            if self._bufferapplylabels:
+                label = opts.get('label', '')
+                self._buffers[-1].extend(self.label(a, label) for a in args)
+            else:
+                self._buffers[-1].extend(args)
+        elif self._colormode == 'win32':
+            # windows color printing is its own can of crab, defer to
+            # the color module and that is it.
+            color.win32print(self, self._write, *args, **opts)
         else:
+            msgs = args
+            if self._colormode is not None:
+                label = opts.get('label', '')
+                msgs = [self.label(a, label) for a in args]
+            self._write(*msgs, **opts)
+
+    def _write(self, *msgs, **opts):
             self._progclear()
-            for a in args:
-                self.fout.write(a)
+            # opencode timeblockedsection because this is a critical path
+            starttime = util.timer()
+            try:
+                for a in msgs:
+                    self.fout.write(a)
+            finally:
+                self._blockedtimes['stdio_blocked'] += \
+                    (util.timer() - starttime) * 1000
 
     def write_err(self, *args, **opts):
         self._progclear()
+        if self._bufferstates and self._bufferstates[-1][0]:
+            self.write(*args, **opts)
+        elif self._colormode == 'win32':
+            # windows color printing is its own can of crab, defer to
+            # the color module and that is it.
+            color.win32print(self, self._write_err, *args, **opts)
+        else:
+            msgs = args
+            if self._colormode is not None:
+                label = opts.get('label', '')
+                msgs = [self.label(a, label) for a in args]
+            self._write_err(*msgs, **opts)
+
+    def _write_err(self, *msgs, **opts):
         try:
-            if self._bufferstates and self._bufferstates[-1][0]:
-                return self.write(*args, **opts)
-            if not getattr(self.fout, 'closed', False):
-                self.fout.flush()
-            for a in args:
-                self.ferr.write(a)
-            # stderr may be buffered under win32 when redirected to files,
-            # including stdout.
-            if not getattr(self.ferr, 'closed', False):
-                self.ferr.flush()
+            with self.timeblockedsection('stdio'):
+                if not getattr(self.fout, 'closed', False):
+                    self.fout.flush()
+                for a in msgs:
+                    self.ferr.write(a)
+                # stderr may be buffered under win32 when redirected to files,
+                # including stdout.
+                if not getattr(self.ferr, 'closed', False):
+                    self.ferr.flush()
         except IOError as inst:
             if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
                 raise
 
     def flush(self):
-        try: self.fout.flush()
-        except (IOError, ValueError): pass
-        try: self.ferr.flush()
-        except (IOError, ValueError): pass
+        # opencode timeblockedsection because this is a critical path
+        starttime = util.timer()
+        try:
+            try: self.fout.flush()
+            except (IOError, ValueError): pass
+            try: self.ferr.flush()
+            except (IOError, ValueError): pass
+        finally:
+            self._blockedtimes['stdio_blocked'] += \
+                (util.timer() - starttime) * 1000
 
     def _isatty(self, fh):
         if self.configbool('ui', 'nontty', False):
             return False
         return util.isatty(fh)
 
+    def disablepager(self):
+        self._disablepager = True
+
+    def pager(self, command):
+        """Start a pager for subsequent command output.
+
+        Commands which produce a long stream of output should call
+        this function to activate the user's preferred pagination
+        mechanism (which may be no pager). Calling this function
+        precludes any future use of interactive functionality, such as
+        prompting the user or activating curses.
+
+        Args:
+          command: The full, non-aliased name of the command. That is, "log"
+                   not "history, "summary" not "summ", etc.
+        """
+        if (self._disablepager
+            or self.pageractive
+            or command in self.configlist('pager', 'ignore')
+            or not self.configbool('pager', 'enable', True)
+            or not self.configbool('pager', 'attend-' + command, True)
+            # TODO: if we want to allow HGPLAINEXCEPT=pager,
+            # formatted() will need some adjustment.
+            or not self.formatted()
+            or self.plain()
+            # TODO: expose debugger-enabled on the UI object
+            or '--debugger' in sys.argv):
+            # We only want to paginate if the ui appears to be
+            # interactive, the user didn't say HGPLAIN or
+            # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
+            return
+
+        # TODO: add a "system defaults" config section so this default
+        # of more(1) can be easily replaced with a global
+        # configuration file. For example, on OS X the sane default is
+        # less(1), not more(1), and on debian it's
+        # sensible-pager(1). We should probably also give the system
+        # default editor command similar treatment.
+        envpager = encoding.environ.get('PAGER', 'more')
+        pagercmd = self.config('pager', 'pager', envpager)
+        if not pagercmd:
+            return
+
+        self.debug('starting pager for command %r\n' % command)
+        self.pageractive = True
+        # Preserve the formatted-ness of the UI. This is important
+        # because we mess with stdout, which might confuse
+        # auto-detection of things being formatted.
+        self.setconfig('ui', 'formatted', self.formatted(), 'pager')
+        self.setconfig('ui', 'interactive', False, 'pager')
+        if util.safehasattr(signal, "SIGPIPE"):
+            signal.signal(signal.SIGPIPE, _catchterm)
+        self._runpager(pagercmd)
+
+    def _runpager(self, command):
+        """Actually start the pager and set up file descriptors.
+
+        This is separate in part so that extensions (like chg) can
+        override how a pager is invoked.
+        """
+        pager = subprocess.Popen(command, shell=True, bufsize=-1,
+                                 close_fds=util.closefds, stdin=subprocess.PIPE,
+                                 stdout=util.stdout, stderr=util.stderr)
+
+        # back up original file descriptors
+        stdoutfd = os.dup(util.stdout.fileno())
+        stderrfd = os.dup(util.stderr.fileno())
+
+        os.dup2(pager.stdin.fileno(), util.stdout.fileno())
+        if self._isatty(util.stderr):
+            os.dup2(pager.stdin.fileno(), util.stderr.fileno())
+
+        @atexit.register
+        def killpager():
+            if util.safehasattr(signal, "SIGINT"):
+                signal.signal(signal.SIGINT, signal.SIG_IGN)
+            # restore original fds, closing pager.stdin copies in the process
+            os.dup2(stdoutfd, util.stdout.fileno())
+            os.dup2(stderrfd, util.stderr.fileno())
+            pager.stdin.close()
+            pager.wait()
+
     def interface(self, feature):
         """what interface to use for interactive console features?
 
@@ -900,7 +1110,8 @@
         sys.stdout = self.fout
         # prompt ' ' must exist; otherwise readline may delete entire line
         # - http://bugs.python.org/issue12833
-        line = raw_input(' ')
+        with self.timeblockedsection('stdio'):
+            line = raw_input(' ')
         sys.stdin = oldin
         sys.stdout = oldout
 
@@ -980,13 +1191,14 @@
             self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
             # disable getpass() only if explicitly specified. it's still valid
             # to interact with tty even if fin is not a tty.
-            if self.configbool('ui', 'nontty'):
-                l = self.fin.readline()
-                if not l:
-                    raise EOFError
-                return l.rstrip('\n')
-            else:
-                return getpass.getpass('')
+            with self.timeblockedsection('stdio'):
+                if self.configbool('ui', 'nontty'):
+                    l = self.fin.readline()
+                    if not l:
+                        raise EOFError
+                    return l.rstrip('\n')
+                else:
+                    return getpass.getpass('')
         except EOFError:
             raise error.ResponseExpected()
     def status(self, *msg, **opts):
@@ -1038,7 +1250,7 @@
                                       suffix=extra['suffix'], text=True,
                                       dir=rdir)
         try:
-            f = os.fdopen(fd, "w")
+            f = os.fdopen(fd, pycompat.sysstr("w"))
             f.write(text)
             f.close()
 
@@ -1058,7 +1270,8 @@
 
             self.system("%s \"%s\"" % (editor, name),
                         environ=environ,
-                        onerr=error.Abort, errprefix=_("edit failed"))
+                        onerr=error.Abort, errprefix=_("edit failed"),
+                        blockedtag='editor')
 
             f = open(name)
             t = f.read()
@@ -1068,15 +1281,33 @@
 
         return t
 
-    def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None):
+    def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None,
+               blockedtag=None):
         '''execute shell command with appropriate output stream. command
         output will be redirected if fout is not stdout.
+
+        if command fails and onerr is None, return status, else raise onerr
+        object as exception.
         '''
+        if blockedtag is None:
+            blockedtag = 'unknown_system_' + cmd.translate(None, _keepalnum)
         out = self.fout
         if any(s[1] for s in self._bufferstates):
             out = self
-        return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
-                           errprefix=errprefix, out=out)
+        with self.timeblockedsection(blockedtag):
+            rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
+        if rc and onerr:
+            errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
+                                util.explainexit(rc)[0])
+            if errprefix:
+                errmsg = '%s: %s' % (errprefix, errmsg)
+            raise onerr(errmsg)
+        return rc
+
+    def _runsystem(self, cmd, environ, cwd, out):
+        """actually execute the given shell command (can be overridden by
+        extensions like chg)"""
+        return util.system(cmd, environ=environ, cwd=cwd, out=out)
 
     def traceback(self, exc=None, force=False):
         '''print exception traceback if traceback printing enabled or forced.
@@ -1180,13 +1411,15 @@
     def label(self, msg, label):
         '''style msg based on supplied label
 
-        Like ui.write(), this just returns msg unchanged, but extensions
-        and GUI tools can override it to allow styling output without
-        writing it.
+        If some color mode is enabled, this will add the necessary control
+        characters to apply such color. In addition, 'debug' color mode adds
+        markup showing which label affects a piece of text.
 
         ui.write(s, 'label') is equivalent to
         ui.write(ui.label(s, 'label')).
         '''
+        if self._colormode is not None:
+            return color.colorlabel(self, msg, label)
         return msg
 
     def develwarn(self, msg, stacklevel=1, config=None):
--- a/mercurial/util.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/util.py	Tue Feb 28 11:13:25 2017 -0800
@@ -63,9 +63,21 @@
 urlreq = pycompat.urlreq
 xmlrpclib = pycompat.xmlrpclib
 
+def isatty(fp):
+    try:
+        return fp.isatty()
+    except AttributeError:
+        return False
+
+# glibc determines buffering on first write to stdout - if we replace a TTY
+# destined stdout with a pipe destined stdout (e.g. pager), we want line
+# buffering
+if isatty(stdout):
+    stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
+
 if pycompat.osname == 'nt':
     from . import windows as platform
-    stdout = platform.winstdout(pycompat.stdout)
+    stdout = platform.winstdout(stdout)
 else:
     from . import posix as platform
 
@@ -797,7 +809,7 @@
     inname, outname = None, None
     try:
         infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
-        fp = os.fdopen(infd, 'wb')
+        fp = os.fdopen(infd, pycompat.sysstr('wb'))
         fp.write(s)
         fp.close()
         outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
@@ -943,10 +955,7 @@
     # executable version (py2exe) doesn't support __file__
     datapath = os.path.dirname(pycompat.sysexecutable)
 else:
-    datapath = os.path.dirname(__file__)
-
-if not isinstance(datapath, bytes):
-    datapath = pycompat.fsencode(datapath)
+    datapath = os.path.dirname(pycompat.fsencode(__file__))
 
 i18n.setdatapath(datapath)
 
@@ -968,8 +977,9 @@
                 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
             else:
                 _sethgexecutable(pycompat.sysexecutable)
-        elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
-            _sethgexecutable(mainmod.__file__)
+        elif (os.path.basename(
+            pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
+            _sethgexecutable(pycompat.fsencode(mainmod.__file__))
         else:
             exe = findexe('hg') or os.path.basename(sys.argv[0])
             _sethgexecutable(exe)
@@ -999,20 +1009,16 @@
     env['HG'] = hgexecutable()
     return env
 
-def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
+def system(cmd, environ=None, cwd=None, out=None):
     '''enhanced shell command execution.
     run with environment maybe modified, maybe in different dir.
 
-    if command fails and onerr is None, return status, else raise onerr
-    object as exception.
-
     if out is specified, it is assumed to be a file-like object that has a
     write() method. stdout and stderr will be redirected to out.'''
     try:
         stdout.flush()
     except Exception:
         pass
-    origcmd = cmd
     cmd = quotecommand(cmd)
     if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
                                     and sys.version_info[1] < 7):
@@ -1036,12 +1042,6 @@
             rc = proc.returncode
         if pycompat.sysplatform == 'OpenVMS' and rc & 1:
             rc = 0
-    if rc and onerr:
-        errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
-                            explainexit(rc)[0])
-        if errprefix:
-            errmsg = '%s: %s' % (errprefix, errmsg)
-        raise onerr(errmsg)
     return rc
 
 def checksignature(func):
@@ -1191,8 +1191,13 @@
 
 if pycompat.osname == 'nt':
     checkosfilename = checkwinfilename
+    timer = time.clock
 else:
     checkosfilename = platform.checkosfilename
+    timer = time.time
+
+if safehasattr(time, "perf_counter"):
+    timer = time.perf_counter
 
 def makelock(info, pathname):
     try:
@@ -2750,12 +2755,6 @@
     u.user = u.passwd = None
     return str(u)
 
-def isatty(fp):
-    try:
-        return fp.isatty()
-    except AttributeError:
-        return False
-
 timecount = unitcountfn(
     (1, 1e3, _('%.0f s')),
     (100, 1, _('%.1f s')),
@@ -2786,13 +2785,13 @@
     '''
 
     def wrapper(*args, **kwargs):
-        start = time.time()
+        start = timer()
         indent = 2
         _timenesting[0] += indent
         try:
             return func(*args, **kwargs)
         finally:
-            elapsed = time.time() - start
+            elapsed = timer() - start
             _timenesting[0] -= indent
             stderr.write('%s%s: %s\n' %
                          (' ' * _timenesting[0], func.__name__,
--- a/mercurial/verify.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/verify.py	Tue Feb 28 11:13:25 2017 -0800
@@ -18,6 +18,7 @@
 from . import (
     error,
     revlog,
+    scmutil,
     util,
 )
 
@@ -32,21 +33,13 @@
         f = f.replace('//', '/')
     return f
 
-def _validpath(repo, path):
-    """Returns False if a path should NOT be treated as part of a repo.
-
-    For all in-core cases, this returns True, as we have no way for a
-    path to be mentioned in the history but not actually be
-    relevant. For narrow clones, this is important because many
-    filelogs will be missing, and changelog entries may mention
-    modified files that are outside the narrow scope.
-    """
-    return True
-
 class verifier(object):
-    def __init__(self, repo):
+    # The match argument is always None in hg core, but e.g. the narrowhg
+    # extension will pass in a matcher here.
+    def __init__(self, repo, match=None):
         self.repo = repo.unfiltered()
         self.ui = repo.ui
+        self.match = match or scmutil.matchall(repo)
         self.badrevs = set()
         self.errors = 0
         self.warnings = 0
@@ -170,6 +163,7 @@
     def _verifychangelog(self):
         ui = self.ui
         repo = self.repo
+        match = self.match
         cl = repo.changelog
 
         ui.status(_("checking changesets\n"))
@@ -189,7 +183,7 @@
                     mflinkrevs.setdefault(changes[0], []).append(i)
                     self.refersmf = True
                 for f in changes[3]:
-                    if _validpath(repo, f):
+                    if match(f):
                         filelinkrevs.setdefault(_normpath(f), []).append(i)
             except Exception as inst:
                 self.refersmf = True
@@ -201,6 +195,7 @@
                         progress=None):
         repo = self.repo
         ui = self.ui
+        match = self.match
         mfl = self.repo.manifestlog
         mf = mfl._revlog.dirlog(dir)
 
@@ -243,12 +238,14 @@
                     elif f == "/dev/null":  # ignore this in very old repos
                         continue
                     fullpath = dir + _normpath(f)
-                    if not _validpath(repo, fullpath):
-                        continue
                     if fl == 't':
+                        if not match.visitdir(fullpath):
+                            continue
                         subdirnodes.setdefault(fullpath + '/', {}).setdefault(
                             fn, []).append(lr)
                     else:
+                        if not match(fullpath):
+                            continue
                         filenodes.setdefault(fullpath, {}).setdefault(fn, lr)
             except Exception as inst:
                 self.exc(lr, _("reading delta %s") % short(n), inst, label)
--- a/mercurial/wireproto.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/wireproto.py	Tue Feb 28 11:13:25 2017 -0800
@@ -26,6 +26,7 @@
     exchange,
     peer,
     pushkey as pushkeymod,
+    pycompat,
     streamclone,
     util,
 )
@@ -839,7 +840,6 @@
             raise error.Abort(bundle2requiredmain,
                               hint=bundle2requiredhint)
 
-    #chunks = exchange.getbundlechunks(repo, 'serve', **opts)
     try:
         chunks = exchange.getbundlechunks(repo, 'serve', **opts)
     except error.Abort as exc:
@@ -961,7 +961,7 @@
 
         # write bundle data to temporary file because it can be big
         fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
-        fp = os.fdopen(fd, 'wb+')
+        fp = os.fdopen(fd, pycompat.sysstr('wb+'))
         r = 0
         try:
             proto.getfile(fp)
--- a/mercurial/worker.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/mercurial/worker.py	Tue Feb 28 11:13:25 2017 -0800
@@ -164,7 +164,7 @@
                 os._exit(0)
         pids.add(pid)
     os.close(wfd)
-    fp = os.fdopen(rfd, 'rb', 0)
+    fp = os.fdopen(rfd, pycompat.sysstr('rb'), 0)
     def cleanup():
         signal.signal(signal.SIGINT, oldhandler)
         waitforworkers()
--- a/tests/dumbhttp.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/dumbhttp.py	Tue Feb 28 11:13:25 2017 -0800
@@ -7,7 +7,9 @@
 """
 
 import optparse
+import os
 import signal
+import socket
 import sys
 
 from mercurial import (
@@ -18,11 +20,17 @@
 httpserver = util.httpserver
 OptionParser = optparse.OptionParser
 
+if os.environ.get('HGIPV6', '0') == '1':
+    class simplehttpserver(httpserver.httpserver):
+        address_family = socket.AF_INET6
+else:
+    simplehttpserver = httpserver.httpserver
+
 class simplehttpservice(object):
     def __init__(self, host, port):
         self.address = (host, port)
     def init(self):
-        self.httpd = httpserver.httpserver(
+        self.httpd = simplehttpserver(
             self.address, httpserver.simplehttprequesthandler)
     def run(self):
         self.httpd.serve_forever()
--- a/tests/dummyssh	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/dummyssh	Tue Feb 28 11:13:25 2017 -0800
@@ -10,7 +10,7 @@
 if sys.argv[1] != "user@dummy":
     sys.exit(-1)
 
-os.environ["SSH_CLIENT"] = "127.0.0.1 1 2"
+os.environ["SSH_CLIENT"] = "%s 1 2" % os.environ.get('LOCALIP', '127.0.0.1')
 
 log = open("dummylog", "ab")
 log.write("Got arguments")
--- a/tests/run-tests.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/run-tests.py	Tue Feb 28 11:13:25 2017 -0800
@@ -112,18 +112,51 @@
 # For Windows support
 wifexited = getattr(os, "WIFEXITED", lambda x: False)
 
-def checkportisavailable(port):
-    """return true if a port seems free to bind on localhost"""
+# Whether to use IPv6
+def checksocketfamily(name, port=20058):
+    """return true if we can listen on localhost using family=name
+
+    name should be either 'AF_INET', or 'AF_INET6'.
+    port being used is okay - EADDRINUSE is considered as successful.
+    """
+    family = getattr(socket, name, None)
+    if family is None:
+        return False
     try:
-        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        s = socket.socket(family, socket.SOCK_STREAM)
         s.bind(('localhost', port))
         s.close()
         return True
     except socket.error as exc:
-        if not exc.errno == errno.EADDRINUSE:
+        if exc.errno == errno.EADDRINUSE:
+            return True
+        elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
+            return False
+        else:
             raise
+    else:
         return False
 
+# useipv6 will be set by parseargs
+useipv6 = None
+
+def checkportisavailable(port):
+    """return true if a port seems free to bind on localhost"""
+    if useipv6:
+        family = socket.AF_INET6
+    else:
+        family = socket.AF_INET
+    try:
+        s = socket.socket(family, socket.SOCK_STREAM)
+        s.bind(('localhost', port))
+        s.close()
+        return True
+    except socket.error as exc:
+        if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
+                             errno.EPROTONOSUPPORT):
+            raise
+    return False
+
 closefds = os.name == 'posix'
 def Popen4(cmd, wd, timeout, env=None):
     processlock.acquire()
@@ -269,6 +302,8 @@
                       help="install and use chg wrapper in place of hg")
     parser.add_option("--with-chg", metavar="CHG",
                       help="use specified chg wrapper in place of hg")
+    parser.add_option("--ipv6", action="store_true",
+                      help="prefer IPv6 to IPv4 for network related tests")
     parser.add_option("-3", "--py3k-warnings", action="store_true",
         help="enable Py3k warnings on Python 2.6+")
     # This option should be deleted once test-check-py3-compat.t and other
@@ -338,6 +373,14 @@
         parser.error('--chg does not work when --with-hg is specified '
                      '(use --with-chg instead)')
 
+    global useipv6
+    if options.ipv6:
+        useipv6 = checksocketfamily('AF_INET6')
+    else:
+        # only use IPv6 if IPv4 is unavailable and IPv6 is available
+        useipv6 = ((not checksocketfamily('AF_INET'))
+                   and checksocketfamily('AF_INET6'))
+
     options.anycoverage = options.cover or options.annotate or options.htmlcov
     if options.anycoverage:
         try:
@@ -506,7 +549,8 @@
                  timeout=defaults['timeout'],
                  startport=defaults['port'], extraconfigopts=None,
                  py3kwarnings=False, shell=None, hgcommand=None,
-                 slowtimeout=defaults['slowtimeout'], usechg=False):
+                 slowtimeout=defaults['slowtimeout'], usechg=False,
+                 useipv6=False):
         """Create a test from parameters.
 
         path is the full path to the file defining the test.
@@ -554,6 +598,7 @@
         self._shell = _bytespath(shell)
         self._hgcommand = hgcommand or b'hg'
         self._usechg = usechg
+        self._useipv6 = useipv6
 
         self._aborted = False
         self._daemonpids = []
@@ -802,6 +847,7 @@
             self._portmap(2),
             (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
              br'\1 (glob)'),
+            (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
             ]
         r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
 
@@ -817,6 +863,12 @@
         else:
             return re.escape(p)
 
+    def _localip(self):
+        if self._useipv6:
+            return b'::1'
+        else:
+            return b'127.0.0.1'
+
     def _getenv(self):
         """Obtain environment variables to use during test execution."""
         def defineport(i):
@@ -839,6 +891,11 @@
         env["HGUSER"]   = "test"
         env["HGENCODING"] = "ascii"
         env["HGENCODINGMODE"] = "strict"
+        env['HGIPV6'] = str(int(self._useipv6))
+
+        # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
+        # IP addresses.
+        env['LOCALIP'] = self._localip()
 
         # Reset some environment variables to well-known values so that
         # the tests produce repeatable output.
@@ -849,6 +906,7 @@
         env['TERM'] = 'xterm'
 
         for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
+                  'HGPLAIN HGPLAINEXCEPT ' +
                   'NO_PROXY CHGDEBUG').split():
             if k in env:
                 del env[k]
@@ -881,6 +939,9 @@
         hgrc.write(b'[largefiles]\n')
         hgrc.write(b'usercache = %s\n' %
                    (os.path.join(self._testtmp, b'.cache/largefiles')))
+        hgrc.write(b'[web]\n')
+        hgrc.write(b'address = localhost\n')
+        hgrc.write(b'ipv6 = %s\n' % self._useipv6)
 
         for opt in self._extraconfigopts:
             section, key = opt.split('.', 1)
@@ -2288,7 +2349,8 @@
                     py3kwarnings=self.options.py3k_warnings,
                     shell=self.options.shell,
                     hgcommand=self._hgcommand,
-                    usechg=bool(self.options.with_chg or self.options.chg))
+                    usechg=bool(self.options.with_chg or self.options.chg),
+                    useipv6=useipv6)
         t.should_reload = True
         return t
 
--- a/tests/test-archive.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-archive.t	Tue Feb 28 11:13:25 2017 -0800
@@ -99,7 +99,7 @@
   > except AttributeError:
   >     stdout = sys.stdout
   > try:
-  >     f = util.urlreq.urlopen('http://127.0.0.1:%s/?%s'
+  >     f = util.urlreq.urlopen('http://$LOCALIP:%s/?%s'
   >                     % (os.environ['HGPORT'], requeststr))
   >     stdout.write(f.read())
   > except util.urlerr.httperror as e:
--- a/tests/test-basic.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-basic.t	Tue Feb 28 11:13:25 2017 -0800
@@ -11,6 +11,8 @@
   ui.interactive=False
   ui.mergemarkers=detailed
   ui.promptecho=True
+  web.address=localhost
+  web\.ipv6=(?:True|False) (re)
   $ hg init t
   $ cd t
 
--- a/tests/test-bdiff.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-bdiff.py	Tue Feb 28 11:13:25 2017 -0800
@@ -3,8 +3,6 @@
 import struct
 import unittest
 
-import silenttestrunner
-
 from mercurial import (
     bdiff,
     mpatch,
@@ -148,4 +146,5 @@
                          ['a\n', diffreplace(2, 10, 'a\na\na\na\n', '')])
 
 if __name__ == '__main__':
+    import silenttestrunner
     silenttestrunner.main(__name__)
--- a/tests/test-bookmarks.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-bookmarks.t	Tue Feb 28 11:13:25 2017 -0800
@@ -1,4 +1,5 @@
-  $ hg init
+  $ hg init repo
+  $ cd repo
 
 no bookmarks
 
@@ -630,7 +631,7 @@
      Z                         2:db815d6d32e6
      x  y                      2:db815d6d32e6
   $ hg -R ../cloned-bookmarks-manual-update-with-divergence pull
-  pulling from $TESTTMP
+  pulling from $TESTTMP/repo (glob)
   searching for changes
   adding changesets
   adding manifests
@@ -895,3 +896,58 @@
   $ touch $TESTTMP/unpause
 
   $ cd ..
+
+check whether HG_PENDING makes pending changes only in related
+repositories visible to an external hook.
+
+(emulate a transaction running concurrently by copied
+.hg/bookmarks.pending in subsequent test)
+
+  $ cat > $TESTTMP/savepending.sh <<EOF
+  > cp .hg/bookmarks.pending .hg/bookmarks.pending.saved
+  > exit 1 # to avoid adding new bookmark for subsequent tests
+  > EOF
+
+  $ hg init unrelated
+  $ cd unrelated
+  $ echo a > a
+  $ hg add a
+  $ hg commit -m '#0'
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" bookmarks INVISIBLE
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ cp .hg/bookmarks.pending.saved .hg/bookmarks.pending
+
+(check visible bookmarks while transaction running in repo)
+
+  $ cat > $TESTTMP/checkpending.sh <<EOF
+  > echo "@repo"
+  > hg -R $TESTTMP/repo bookmarks
+  > echo "@unrelated"
+  > hg -R $TESTTMP/unrelated bookmarks
+  > exit 1 # to avoid adding new bookmark for subsequent tests
+  > EOF
+
+  $ cd ../repo
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" bookmarks NEW
+  @repo
+   * NEW                       6:81dcce76aa0b
+     X2                        1:925d80f479bb
+     Y                         4:125c9a1d6df6
+     Z                         5:5fb12f0f2d51
+     Z@1                       1:925d80f479bb
+     Z@2                       4:125c9a1d6df6
+     foo                       3:9ba5f110a0b3
+     foo@1                     0:f7b1eb17ad24
+     foo@2                     2:db815d6d32e6
+     four                      3:9ba5f110a0b3
+     should-end-on-two         2:db815d6d32e6
+     x  y                      2:db815d6d32e6
+  @unrelated
+  no bookmarks set
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
--- a/tests/test-bundle2-exchange.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-bundle2-exchange.t	Tue Feb 28 11:13:25 2017 -0800
@@ -340,7 +340,7 @@
   remote: lock:  free
   remote: wlock: free
   remote: postclose-tip:5fddd98957c8 draft book_5fdd
-  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:127.0.0.1 (glob)
+  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:$LOCALIP (glob)
   updating bookmark book_5fdd
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
@@ -394,7 +394,7 @@
   remote: lock:  free
   remote: wlock: free
   remote: postclose-tip:32af7686d403 public book_32af
-  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:http:127.0.0.1: (glob)
+  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:http:*: (glob)
   updating bookmark book_32af
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-check-help.t	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,25 @@
+#require test-repo
+
+  $ . "$TESTDIR/helpers-testrepo.sh"
+
+  $ cat <<'EOF' > scanhelptopics.py
+  > from __future__ import absolute_import, print_function
+  > import re
+  > import sys
+  > topics = set()
+  > topicre = re.compile(r':hg:`help ([a-z0-9\-.]+)`')
+  > for fname in sys.argv:
+  >     with open(fname) as f:
+  >         topics.update(m.group(1) for m in topicre.finditer(f.read()))
+  > for s in sorted(topics):
+  >     print(s)
+  > EOF
+
+  $ cd "$TESTDIR"/..
+
+Check if ":hg:`help TOPIC`" is valid:
+(use "xargs -n1 -t" to see which help commands are executed)
+
+  $ hg files 'glob:{hgext,mercurial}/**/*.py' \
+  > | xargs python "$TESTTMP/scanhelptopics.py" \
+  > | xargs -n1 hg help > /dev/null
--- a/tests/test-check-py3-compat.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-check-py3-compat.t	Tue Feb 28 11:13:25 2017 -0800
@@ -7,7 +7,6 @@
   contrib/python-zstandard/setup.py not using absolute_import
   contrib/python-zstandard/setup_zstd.py not using absolute_import
   contrib/python-zstandard/tests/common.py not using absolute_import
-  contrib/python-zstandard/tests/test_cffi.py not using absolute_import
   contrib/python-zstandard/tests/test_compressor.py not using absolute_import
   contrib/python-zstandard/tests/test_data_structures.py not using absolute_import
   contrib/python-zstandard/tests/test_decompressor.py not using absolute_import
--- a/tests/test-check-pyflakes.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-check-pyflakes.t	Tue Feb 28 11:13:25 2017 -0800
@@ -7,9 +7,8 @@
 (skipping binary file random-seed)
 
   $ hg locate 'set:**.py or grep("^#!.*python")' -X hgext/fsmonitor/pywatchman \
-  > -X mercurial/pycompat.py \
+  > -X mercurial/pycompat.py -X contrib/python-zstandard \
   > 2>/dev/null \
   > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
-  contrib/python-zstandard/tests/test_data_structures.py:107: local variable 'size' is assigned to but never used
   tests/filterpyflakes.py:39: undefined name 'undefinedname'
   
--- a/tests/test-chg.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-chg.t	Tue Feb 28 11:13:25 2017 -0800
@@ -32,6 +32,46 @@
 
   $ cd ..
 
+editor
+------
+
+  $ cat >> pushbuffer.py <<EOF
+  > def reposetup(ui, repo):
+  >     repo.ui.pushbuffer(subproc=True)
+  > EOF
+
+  $ chg init editor
+  $ cd editor
+
+by default, system() should be redirected to the client:
+
+  $ touch foo
+  $ CHGDEBUG= HGEDITOR=cat chg ci -Am channeled --edit 2>&1 \
+  > | egrep "HG:|run 'cat"
+  chg: debug: run 'cat "*"' at '$TESTTMP/editor' (glob)
+  HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  HG: Leave message empty to abort commit.
+  HG: --
+  HG: user: test
+  HG: branch 'default'
+  HG: added foo
+
+but no redirection should be made if output is captured:
+
+  $ touch bar
+  $ CHGDEBUG= HGEDITOR=cat chg ci -Am bufferred --edit \
+  > --config extensions.pushbuffer="$TESTTMP/pushbuffer.py" 2>&1 \
+  > | egrep "HG:|run 'cat"
+  [1]
+
+check that commit commands succeeded:
+
+  $ hg log -T '{rev}:{desc}\n'
+  1:bufferred
+  0:channeled
+
+  $ cd ..
+
 pager
 -----
 
--- a/tests/test-clone.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-clone.t	Tue Feb 28 11:13:25 2017 -0800
@@ -579,11 +579,11 @@
 No remote source
 
 #if windows
-  $ hg clone http://127.0.0.1:3121/a b
+  $ hg clone http://$LOCALIP:3121/a b
   abort: error: * (glob)
   [255]
 #else
-  $ hg clone http://127.0.0.1:3121/a b
+  $ hg clone http://$LOCALIP:3121/a b
   abort: error: *refused* (glob)
   [255]
 #endif
--- a/tests/test-commandserver.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-commandserver.t	Tue Feb 28 11:13:25 2017 -0800
@@ -199,6 +199,8 @@
   ui.usehttp2=true (?)
   ui.foo=bar
   ui.nontty=true
+  web.address=localhost
+  web\.ipv6=(?:True|False) (re)
   *** runcommand init foo
   *** runcommand -R foo showconfig ui defaults
   defaults.backout=-d "0 0"
--- a/tests/test-completion.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-completion.t	Tue Feb 28 11:13:25 2017 -0800
@@ -129,6 +129,7 @@
 
 Show the global options
   $ hg debugcomplete --options | sort
+  --color
   --config
   --cwd
   --debug
@@ -138,6 +139,7 @@
   --help
   --hidden
   --noninteractive
+  --pager
   --profile
   --quiet
   --repository
@@ -157,6 +159,7 @@
   --address
   --certificate
   --cmdserver
+  --color
   --config
   --cwd
   --daemon
@@ -171,6 +174,7 @@
   --ipv6
   --name
   --noninteractive
+  --pager
   --pid-file
   --port
   --prefix
--- a/tests/test-config.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-config.t	Tue Feb 28 11:13:25 2017 -0800
@@ -58,12 +58,12 @@
   [
    {
     "name": "Section.KeY",
-    "source": "*.hgrc:16", (glob)
+    "source": "*.hgrc:*", (glob)
     "value": "Case Sensitive"
    },
    {
     "name": "Section.key",
-    "source": "*.hgrc:17", (glob)
+    "source": "*.hgrc:*", (glob)
     "value": "lower case"
    }
   ]
@@ -71,7 +71,7 @@
   [
    {
     "name": "Section.KeY",
-    "source": "*.hgrc:16", (glob)
+    "source": "*.hgrc:*", (glob)
     "value": "Case Sensitive"
    }
   ]
@@ -158,3 +158,9 @@
   $ hg showconfig paths
   paths.foo:suboption=~/foo
   paths.foo=$TESTTMP/foo
+
+edit failure
+
+  $ HGEDITOR=false hg config --edit
+  abort: edit failed: false exited with status 1
+  [255]
--- a/tests/test-contrib-perf.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-contrib-perf.t	Tue Feb 28 11:13:25 2017 -0800
@@ -109,6 +109,7 @@
    perfvolatilesets
                  benchmark the computation of various volatile set
    perfwalk      (no help text available)
+   perfwrite     microbenchmark ui.write
   
   (use 'hg help -v perfstatusext' to show built-in aliases and global options)
   $ hg perfaddremove
--- a/tests/test-convert-git.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-convert-git.t	Tue Feb 28 11:13:25 2017 -0800
@@ -330,7 +330,7 @@
 
 input validation
   $ hg convert --config convert.git.similarity=foo --datesort git-repo2 fullrepo
-  abort: convert.git.similarity is not an integer ('foo')
+  abort: convert.git.similarity is not a valid integer ('foo')
   [255]
   $ hg convert --config convert.git.similarity=-1 --datesort git-repo2 fullrepo
   abort: similarity must be between 0 and 100
--- a/tests/test-diff-color.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-diff-color.t	Tue Feb 28 11:13:25 2017 -0800
@@ -1,10 +1,10 @@
 Setup
 
   $ cat <<EOF >> $HGRCPATH
+  > [ui]
+  > color = always
   > [color]
   > mode = ansi
-  > [extensions]
-  > color =
   > EOF
   $ hg init repo
   $ cd repo
@@ -35,7 +35,7 @@
 
 default context
 
-  $ hg diff --nodates --color=always
+  $ hg diff --nodates
   \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
@@ -51,7 +51,7 @@
 
 --unified=2
 
-  $ hg diff --nodates -U 2  --color=always
+  $ hg diff --nodates -U 2
   \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
@@ -65,10 +65,11 @@
 
 diffstat
 
-  $ hg diff --stat --color=always
+  $ hg diff --stat
    a |  2 \x1b[0;32m+\x1b[0m\x1b[0;31m-\x1b[0m (esc)
    1 files changed, 1 insertions(+), 1 deletions(-)
   $ cat <<EOF >> $HGRCPATH
+  > [extensions]
   > record =
   > [ui]
   > interactive = true
@@ -81,7 +82,7 @@
 record
 
   $ chmod +x a
-  $ hg record --color=always -m moda a <<EOF
+  $ hg record -m moda a <<EOF
   > y
   > y
   > EOF
@@ -111,7 +112,7 @@
 
 qrecord
 
-  $ hg qrecord --color=always -m moda patch <<EOF
+  $ hg qrecord -m moda patch <<EOF
   > y
   > y
   > EOF
@@ -151,7 +152,7 @@
   $ echo aa >> a
   $ echo bb >> sub/b
 
-  $ hg diff --color=always -S
+  $ hg diff -S
   \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
@@ -176,7 +177,7 @@
   > mid	tab
   > 	all		tabs	
   > EOF
-  $ hg diff --nodates --color=always
+  $ hg diff --nodates
   \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
@@ -192,7 +193,7 @@
   \x1b[0;32m+\x1b[0m	\x1b[0;32mall\x1b[0m		\x1b[0;32mtabs\x1b[0m\x1b[0;1;41m	\x1b[0m (esc)
   $ echo "[color]" >> $HGRCPATH
   $ echo "diff.tab = bold magenta" >> $HGRCPATH
-  $ hg diff --nodates --color=always
+  $ hg diff --nodates
   \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
--- a/tests/test-doctest.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-doctest.py	Tue Feb 28 11:13:25 2017 -0800
@@ -28,7 +28,8 @@
 testmod('mercurial.patch')
 testmod('mercurial.pathutil')
 testmod('mercurial.parser')
-testmod('mercurial.revset')
+testmod('mercurial.revsetlang')
+testmod('mercurial.smartset')
 testmod('mercurial.store')
 testmod('mercurial.subrepo')
 testmod('mercurial.templatefilters')
--- a/tests/test-eol.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-eol.t	Tue Feb 28 11:13:25 2017 -0800
@@ -470,6 +470,22 @@
   > EOF
   $ hg commit -m 'consistent'
 
+  $ hg init subrepo
+  $ hg -R subrepo pull -qu .
+  $ echo "subrepo = subrepo" > .hgsub
+  $ hg ci -Am "add subrepo"
+  adding .hgeol
+  adding .hgsub
+  $ hg archive -S ../archive
+  $ find ../archive/* | sort
+  ../archive/a.txt
+  ../archive/subrepo
+  ../archive/subrepo/a.txt
+  $ cat ../archive/a.txt ../archive/subrepo/a.txt
+  first\r (esc)
+  second\r (esc)
+  first\r (esc)
+  second\r (esc)
 
 Test trailing newline
 
--- a/tests/test-extension.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-extension.t	Tue Feb 28 11:13:25 2017 -0800
@@ -532,6 +532,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug) (EXPERIMENTAL)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -543,6 +545,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 
 
@@ -567,6 +571,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug) (EXPERIMENTAL)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -578,6 +584,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 
 
@@ -845,6 +853,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug) (EXPERIMENTAL)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -856,6 +866,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 Make sure that single '-v' option shows help and built-ins only for 'dodo' command
   $ hg help -v dodo
@@ -878,6 +890,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug) (EXPERIMENTAL)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -889,6 +903,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 In case when extension name doesn't match any of its commands,
 help message should ask for '-v' to get list of built-in aliases
@@ -949,6 +965,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug) (EXPERIMENTAL)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -960,6 +978,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
   $ hg help -v -e dudu
   dudu extension -
@@ -981,6 +1001,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug) (EXPERIMENTAL)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -992,6 +1014,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 Disabled extension commands:
 
--- a/tests/test-gendoc-ro.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-gendoc-ro.t	Tue Feb 28 11:13:25 2017 -0800
@@ -1,4 +1,9 @@
 #require docutils gettext
 
+Error: the current ro localization has some rst defects exposed by
+moving pager to core. These two warnings about references are expected
+until the localization is corrected.
   $ $TESTDIR/check-gendoc ro
   checking for parse errors
+  gendoc.txt:58: (WARNING/2) Inline interpreted text or phrase reference start-string without end-string.
+  gendoc.txt:58: (WARNING/2) Inline interpreted text or phrase reference start-string without end-string.
--- a/tests/test-globalopts.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-globalopts.t	Tue Feb 28 11:13:25 2017 -0800
@@ -351,6 +351,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
@@ -432,6 +433,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
--- a/tests/test-glog.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-glog.t	Tue Feb 28 11:13:25 2017 -0800
@@ -82,18 +82,18 @@
   > }
 
   $ cat > printrevset.py <<EOF
-  > from mercurial import extensions, revset, commands, cmdutil
+  > from mercurial import extensions, revsetlang, commands, cmdutil
   > 
   > def uisetup(ui):
   >     def printrevset(orig, ui, repo, *pats, **opts):
   >         if opts.get('print_revset'):
   >             expr = cmdutil.getgraphlogrevs(repo, pats, opts)[1]
   >             if expr:
-  >                 tree = revset.parse(expr)
+  >                 tree = revsetlang.parse(expr)
   >             else:
   >                 tree = []
   >             ui.write('%r\n' % (opts.get('rev', []),))
-  >             ui.write(revset.prettyformat(tree) + '\n')
+  >             ui.write(revsetlang.prettyformat(tree) + '\n')
   >             return 0
   >         return orig(ui, repo, *pats, **opts)
   >     entry = extensions.wrapcommand(commands.table, 'log', printrevset)
--- a/tests/test-help.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-help.t	Tue Feb 28 11:13:25 2017 -0800
@@ -113,6 +113,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
@@ -188,6 +189,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
@@ -262,7 +264,6 @@
        largefiles    track large binary files
        mq            manage a stack of patches
        notify        hooks for sending email push notifications
-       pager         browse command output with an external pager
        patchbomb     command to send changesets as (a series of) patch emails
        purge         command to delete untracked files from the working
                      directory
@@ -315,6 +316,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug) (EXPERIMENTAL)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -326,6 +329,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
   
   (use 'hg help' for the full list of commands)
 
@@ -411,6 +416,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug) (EXPERIMENTAL)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -422,6 +429,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 Test the textwidth config option
 
@@ -678,6 +687,7 @@
   >     ('', 'newline', '', 'line1\nline2')],
   >     'hg nohelp',
   >     norepo=True)
+  > @command('debugoptADV', [('', 'aopt', None, 'option is (ADVANCED)')])
   > @command('debugoptDEP', [('', 'dopt', None, 'option is (DEPRECATED)')])
   > @command('debugoptEXP', [('', 'eopt', None, 'option is (EXPERIMENTAL)')])
   > def nohelp(ui, *args, **kwargs):
@@ -827,6 +837,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
@@ -889,6 +900,7 @@
                  complete "names" - tags, open branch names, bookmark names
    debugobsolete
                  create arbitrary obsolete marker
+   debugoptADV   (no help text available)
    debugoptDEP   (no help text available)
    debugoptEXP   (no help text available)
    debugpathcomplete
@@ -1102,7 +1114,15 @@
   (use 'hg help -v helpext' to show built-in aliases and global options)
 
 
-test deprecated and experimental options are hidden in command help
+test advanced, deprecated and experimental options are hidden in command help
+  $ hg help debugoptADV
+  hg debugoptADV
+  
+  (no help text available)
+  
+  options:
+  
+  (some details hidden, use --verbose to show complete help)
   $ hg help debugoptDEP
   hg debugoptDEP
   
@@ -1121,7 +1141,9 @@
   
   (some details hidden, use --verbose to show complete help)
 
-test deprecated and experimental options is shown with -v
+test advanced, deprecated and experimental options are shown with -v
+  $ hg help -v debugoptADV | grep aopt
+    --aopt option is (ADVANCED)
   $ hg help -v debugoptDEP | grep dopt
     --dopt option is (DEPRECATED)
   $ hg help -v debugoptEXP | grep eopt
@@ -1547,11 +1569,11 @@
          "default:pushurl" should be used instead.
   
   $ hg help glossary.mcguffin
-  abort: help section not found
+  abort: help section not found: glossary.mcguffin
   [255]
 
   $ hg help glossary.mc.guffin
-  abort: help section not found
+  abort: help section not found: glossary.mc.guffin
   [255]
 
   $ hg help template.files
@@ -1792,7 +1814,7 @@
   $ hg serve -R "$TESTTMP/test" -n test -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid >> $DAEMON_PIDS
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -1914,6 +1936,13 @@
   Merge Tools
   </td></tr>
   <tr><td>
+  <a href="/help/pager">
+  pager
+  </a>
+  </td><td>
+  Pager Support
+  </td></tr>
+  <tr><td>
   <a href="/help/patterns">
   patterns
   </a>
@@ -2361,7 +2390,7 @@
   </html>
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/add"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/add"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2491,6 +2520,9 @@
   <td>--verbose</td>
   <td>enable additional output</td></tr>
   <tr><td></td>
+  <td>--color TYPE</td>
+  <td>when to colorize (boolean, always, auto, never, or debug) (EXPERIMENTAL)</td></tr>
+  <tr><td></td>
   <td>--config CONFIG [+]</td>
   <td>set/override config option (use 'section.name=value')</td></tr>
   <tr><td></td>
@@ -2523,6 +2555,9 @@
   <tr><td></td>
   <td>--hidden</td>
   <td>consider hidden changesets</td></tr>
+  <tr><td></td>
+  <td>--pager TYPE</td>
+  <td>when to paginate (boolean, always, auto, or never) (default: auto)</td></tr>
   </table>
   
   </div>
@@ -2535,7 +2570,7 @@
   </html>
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/remove"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/remove"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2686,6 +2721,9 @@
   <td>--verbose</td>
   <td>enable additional output</td></tr>
   <tr><td></td>
+  <td>--color TYPE</td>
+  <td>when to colorize (boolean, always, auto, never, or debug) (EXPERIMENTAL)</td></tr>
+  <tr><td></td>
   <td>--config CONFIG [+]</td>
   <td>set/override config option (use 'section.name=value')</td></tr>
   <tr><td></td>
@@ -2718,6 +2756,9 @@
   <tr><td></td>
   <td>--hidden</td>
   <td>consider hidden changesets</td></tr>
+  <tr><td></td>
+  <td>--pager TYPE</td>
+  <td>when to paginate (boolean, always, auto, or never) (default: auto)</td></tr>
   </table>
   
   </div>
@@ -2730,7 +2771,7 @@
   </html>
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/dates"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/dates"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2837,7 +2878,7 @@
 
 Sub-topic indexes rendered properly
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/internals"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/internals"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2933,7 +2974,7 @@
 
 Sub-topic topics rendered properly
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/internals.changegroups"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/internals.changegroups"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
--- a/tests/test-hgweb-commands.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgweb-commands.t	Tue Feb 28 11:13:25 2017 -0800
@@ -58,7 +58,7 @@
 
 Logs and changes
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/?style=atom'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/?style=atom'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -244,7 +244,7 @@
    </entry>
   
   </feed>
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/?style=rss'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/?style=rss'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -422,7 +422,7 @@
   
     </channel>
   </rss> (no-eol)
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/?style=atom'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/?style=atom'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -522,7 +522,7 @@
    </entry>
   
   </feed>
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/?style=rss'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/?style=rss'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -618,7 +618,7 @@
   
     </channel>
   </rss> (no-eol)
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/foo/?style=atom'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/foo/?style=atom'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -673,7 +673,7 @@
    </entry>
   
   </feed>
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/foo/?style=rss'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/foo/?style=rss'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -694,7 +694,7 @@
   
     </channel>
   </rss>
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'shortlog/'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'shortlog/'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -834,7 +834,7 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'rev/0/'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'rev/0/'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -965,7 +965,7 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'rev/1/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'rev/1/?style=raw'
   200 Script output follows
   
   
@@ -982,7 +982,7 @@
   @@ -0,0 +1,1 @@
   +2ef0ac749a14e4f57a5a822464a0902c6f7f448f 1.0
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=base'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=base'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -1071,12 +1071,12 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=stable&style=raw' | grep 'revision:'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=stable&style=raw' | grep 'revision:'
   revision:    2
 
 Search with revset syntax
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=tip^&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=tip^&style=raw'
   200 Script output follows
   
   
@@ -1093,7 +1093,7 @@
   branch:      stable
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(all(),2)^&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(all(),2)^&style=raw'
   200 Script output follows
   
   
@@ -1117,7 +1117,7 @@
   branch:      default
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(all(,2)^&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(all(,2)^&style=raw'
   200 Script output follows
   
   
@@ -1127,7 +1127,7 @@
   # Mode literal keyword search
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(al(),2)^&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(al(),2)^&style=raw'
   200 Script output follows
   
   
@@ -1137,7 +1137,7 @@
   # Mode literal keyword search
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=bookmark(anotherthing)&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=bookmark(anotherthing)&style=raw'
   200 Script output follows
   
   
@@ -1155,7 +1155,7 @@
   bookmark:    anotherthing
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=bookmark(abc)&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=bookmark(abc)&style=raw'
   200 Script output follows
   
   
@@ -1165,7 +1165,7 @@
   # Mode literal keyword search
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=deadbeef:&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=deadbeef:&style=raw'
   200 Script output follows
   
   
@@ -1176,7 +1176,7 @@
   
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=user("test")&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=user("test")&style=raw'
   200 Script output follows
   
   
@@ -1217,7 +1217,7 @@
   bookmark:    anotherthing
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=user("re:test")&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=user("re:test")&style=raw'
   200 Script output follows
   
   
@@ -1230,11 +1230,11 @@
 
 File-related
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/foo/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/foo/?style=raw'
   200 Script output follows
   
   foo
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'annotate/1/foo/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'annotate/1/foo/?style=raw'
   200 Script output follows
   
   
@@ -1243,7 +1243,7 @@
   
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/?style=raw'
   200 Script output follows
   
   
@@ -1259,7 +1259,7 @@
   $ hg parents --template "{node|short}\n" -r 1 foo
   2ef0ac749a14
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/foo'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/foo'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -1354,7 +1354,7 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'filediff/0/foo/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'filediff/0/foo/?style=raw'
   200 Script output follows
   
   
@@ -1368,7 +1368,7 @@
   
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'filediff/1/foo/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'filediff/1/foo/?style=raw'
   200 Script output follows
   
   
@@ -1384,7 +1384,7 @@
   $ hg parents --template "{node|short}\n" -r 2 foo
   2ef0ac749a14
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/2/foo'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/2/foo'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -1483,23 +1483,23 @@
 
 Overviews
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-tags'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'raw-tags'
   200 Script output follows
   
   tip	cad8025a2e87f88c06259790adfa15acb4080123
   1.0	2ef0ac749a14e4f57a5a822464a0902c6f7f448f
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-branches'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'raw-branches'
   200 Script output follows
   
   unstable	cad8025a2e87f88c06259790adfa15acb4080123	open
   stable	1d22e65f027e5a0609357e7d8e7508cd2ba5d2fe	inactive
   default	a4f92ed23982be056b9852de5dfe873eaac7f0de	inactive
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-bookmarks'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'raw-bookmarks'
   200 Script output follows
   
   something	cad8025a2e87f88c06259790adfa15acb4080123
   anotherthing	2ef0ac749a14e4f57a5a822464a0902c6f7f448f
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'summary/?style=gitweb'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'summary/?style=gitweb'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -1697,7 +1697,7 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/?style=gitweb'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'graph/?style=gitweb'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -1843,7 +1843,7 @@
   
 raw graph
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'graph/?style=raw'
   200 Script output follows
   
   
@@ -1893,28 +1893,28 @@
 
 capabilities
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities'; echo
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities'; echo
   200 Script output follows
   
   lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=*zlib (glob)
 
 heads
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=heads'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=heads'
   200 Script output follows
   
   cad8025a2e87f88c06259790adfa15acb4080123
 
 branches
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=branches&nodes=0000000000000000000000000000000000000000'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=branches&nodes=0000000000000000000000000000000000000000'
   200 Script output follows
   
   0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
 
 changegroup
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=changegroup&roots=0000000000000000000000000000000000000000'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=changegroup&roots=0000000000000000000000000000000000000000'
   200 Script output follows
   
   x\x9c\xbd\x94MHTQ\x14\xc7'+\x9d\xc66\x81\x89P\xc1\xa3\x14\xcct\xba\xef\xbe\xfb\xde\xbb\xcfr0\xb3"\x02\x11[%\x98\xdcO\xa7\xd2\x19\x98y\xd2\x07h"\x96\xa0e\xda\xa6lUY-\xca\x08\xa2\x82\x16\x96\xd1\xa2\xf0#\xc8\x95\x1b\xdd$!m*"\xc8\x82\xea\xbe\x9c\x01\x85\xc9\x996\x1d\xf8\xc1\xe3~\x9d\xff9\xef\x7f\xaf\xcf\xe7\xbb\x19\xfc4\xec^\xcb\x9b\xfbz\xa6\xbe\xb3\x90_\xef/\x8d\x9e\xad\xbe\xe4\xcb0\xd2\xec\xad\x12X:\xc8\x12\x12\xd9:\x95\xba	\x1cG\xb7$\xc5\xc44\x1c(\x1d\x03\x03\xdb\x84\x0cK#\xe0\x8a\xb8\x1b\x00\x1a\x08p\xb2SF\xa3\x01\x8f\x00%q\xa1Ny{k!8\xe5t>[{\xe2j\xddl\xc3\xcf\xee\xd0\xddW\x9ff3U\x9djobj\xbb\x87E\x88\x05l\x001\x12\x18\x13\xc6 \xb7(\xe3\x02a\x80\x81\xcel.u\x9b\x1b\x8c\x91\x80Z\x0c\x15\x15 (esc)
@@ -1925,14 +1925,14 @@
 
 stream_out
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=stream_out'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
   200 Script output follows
   
   1
 
 failing unbundle, requires POST request
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=unbundle'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=unbundle'
   405 push requires POST request
   
   0
@@ -1941,7 +1941,7 @@
 
 Static files
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'static/style.css'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'static/style.css'
   200 Script output follows
   
   a { text-decoration:none; }
@@ -2077,7 +2077,7 @@
   > --cwd .. -R `pwd`
   $ cat hg.pid >> $DAEMON_PIDS
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=adds("foo")&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=adds("foo")&style=raw'
   200 Script output follows
   
   
@@ -2110,7 +2110,7 @@
 
 Graph json escape of multibyte character
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/' > out
+  $ get-with-headers.py $LOCALIP:$HGPORT 'graph/' > out
   >>> from __future__ import print_function
   >>> for line in open("out"):
   ...     if line.startswith("var data ="):
@@ -2121,14 +2121,14 @@
 
 (plain version to check the format)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | dd ibs=75 count=1 2> /dev/null; echo
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | dd ibs=75 count=1 2> /dev/null; echo
   200 Script output follows
   
   lookup changegroupsubset branchmap pushkey known
 
 (spread version to check the content)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n'; echo
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n'; echo
   200
   Script
   output
@@ -2194,23 +2194,23 @@
 
 Test paging
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT \
+  $ get-with-headers.py $LOCALIP:$HGPORT \
   >   'graph/?style=raw' | grep changeset
   changeset:   aed2d9c1d0e7
   changeset:   b60a39a85a01
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT \
+  $ get-with-headers.py $LOCALIP:$HGPORT \
   >   'graph/?style=raw&revcount=3' | grep changeset
   changeset:   aed2d9c1d0e7
   changeset:   b60a39a85a01
   changeset:   ada793dcc118
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT \
+  $ get-with-headers.py $LOCALIP:$HGPORT \
   >   'graph/e06180cbfb0?style=raw&revcount=3' | grep changeset
   changeset:   e06180cbfb0c
   changeset:   b4e73ffab476
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT \
+  $ get-with-headers.py $LOCALIP:$HGPORT \
   >   'graph/b4e73ffab47?style=raw&revcount=3' | grep changeset
   changeset:   b4e73ffab476
 
--- a/tests/test-hgweb-descend-empties.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgweb-descend-empties.t	Tue Feb 28 11:13:25 2017 -0800
@@ -29,7 +29,7 @@
 
 manifest with descending (paper)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -147,7 +147,7 @@
 
 manifest with descending (coal)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=coal'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=coal'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -266,7 +266,7 @@
 
 manifest with descending (monoblue)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=monoblue'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=monoblue'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
@@ -379,7 +379,7 @@
 
 manifest with descending (gitweb)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=gitweb'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=gitweb'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -482,7 +482,7 @@
 
 manifest with descending (spartan)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=spartan'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=spartan'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
--- a/tests/test-hgweb-json.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgweb-json.t	Tue Feb 28 11:13:25 2017 -0800
@@ -1593,6 +1593,10 @@
         "topic": "merge-tools"
       },
       {
+        "summary": "Pager Support",
+        "topic": "pager"
+      },
+      {
         "summary": "File Name Patterns",
         "topic": "patterns"
       },
--- a/tests/test-hgweb-no-path-info.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgweb-no-path-info.t	Tue Feb 28 11:13:25 2017 -0800
@@ -49,7 +49,7 @@
   >     'REQUEST_METHOD': 'GET',
   >     'PATH_INFO': '/',
   >     'SCRIPT_NAME': '',
-  >     'SERVER_NAME': '127.0.0.1',
+  >     'SERVER_NAME': '$LOCALIP',
   >     'SERVER_PORT': os.environ['HGPORT'],
   >     'SERVER_PROTOCOL': 'HTTP/1.0'
   > }
@@ -79,16 +79,16 @@
   <?xml version="1.0" encoding="ascii"?>
   <feed xmlns="http://www.w3.org/2005/Atom">
    <!-- Changelog -->
-   <id>http://127.0.0.1:$HGPORT/</id> (glob)
-   <link rel="self" href="http://127.0.0.1:$HGPORT/atom-log"/> (glob)
-   <link rel="alternate" href="http://127.0.0.1:$HGPORT/"/> (glob)
+   <id>http://$LOCALIP:$HGPORT/</id> (glob)
+   <link rel="self" href="http://$LOCALIP:$HGPORT/atom-log"/> (glob)
+   <link rel="alternate" href="http://$LOCALIP:$HGPORT/"/> (glob)
    <title>repo Changelog</title>
    <updated>1970-01-01T00:00:00+00:00</updated>
   
    <entry>
     <title>[default] test</title>
-    <id>http://127.0.0.1:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob)
-    <link href="http://127.0.0.1:$HGPORT/rev/61c9426e69fe"/> (glob)
+    <id>http://$LOCALIP:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob)
+    <link href="http://$LOCALIP:$HGPORT/rev/61c9426e69fe"/> (glob)
     <author>
      <name>test</name>
      <email>&#116;&#101;&#115;&#116;</email>
--- a/tests/test-hgweb-no-request-uri.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgweb-no-request-uri.t	Tue Feb 28 11:13:25 2017 -0800
@@ -48,7 +48,7 @@
   >     'wsgi.run_once': False,
   >     'REQUEST_METHOD': 'GET',
   >     'SCRIPT_NAME': '',
-  >     'SERVER_NAME': '127.0.0.1',
+  >     'SERVER_NAME': '$LOCALIP',
   >     'SERVER_PORT': os.environ['HGPORT'],
   >     'SERVER_PROTOCOL': 'HTTP/1.0'
   > }
@@ -90,16 +90,16 @@
   <?xml version="1.0" encoding="ascii"?>
   <feed xmlns="http://www.w3.org/2005/Atom">
    <!-- Changelog -->
-   <id>http://127.0.0.1:$HGPORT/</id> (glob)
-   <link rel="self" href="http://127.0.0.1:$HGPORT/atom-log"/> (glob)
-   <link rel="alternate" href="http://127.0.0.1:$HGPORT/"/> (glob)
+   <id>http://$LOCALIP:$HGPORT/</id> (glob)
+   <link rel="self" href="http://$LOCALIP:$HGPORT/atom-log"/> (glob)
+   <link rel="alternate" href="http://$LOCALIP:$HGPORT/"/> (glob)
    <title>repo Changelog</title>
    <updated>1970-01-01T00:00:00+00:00</updated>
   
    <entry>
     <title>[default] test</title>
-    <id>http://127.0.0.1:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob)
-    <link href="http://127.0.0.1:$HGPORT/rev/61c9426e69fe"/> (glob)
+    <id>http://$LOCALIP:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob)
+    <link href="http://$LOCALIP:$HGPORT/rev/61c9426e69fe"/> (glob)
     <author>
      <name>test</name>
      <email>&#116;&#101;&#115;&#116;</email>
--- a/tests/test-hgweb-non-interactive.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgweb-non-interactive.t	Tue Feb 28 11:13:25 2017 -0800
@@ -60,7 +60,7 @@
   >     'SCRIPT_NAME': '',
   >     'PATH_INFO': '',
   >     'QUERY_STRING': '',
-  >     'SERVER_NAME': '127.0.0.1',
+  >     'SERVER_NAME': '$LOCALIP',
   >     'SERVER_PORT': os.environ['HGPORT'],
   >     'SERVER_PROTOCOL': 'HTTP/1.0'
   > }
--- a/tests/test-hgweb-raw.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgweb-raw.t	Tue Feb 28 11:13:25 2017 -0800
@@ -32,7 +32,7 @@
   It is very boring to read, but computers don't
   care about things like that.
   $ cat access.log error.log
-  127.0.0.1 - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
+  $LOCALIP - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
 
   $ rm access.log error.log
   $ hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid \
@@ -53,6 +53,6 @@
   It is very boring to read, but computers don't
   care about things like that.
   $ cat access.log error.log
-  127.0.0.1 - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
+  $LOCALIP - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
 
   $ cd ..
--- a/tests/test-hgweb-symrev.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgweb-symrev.t	Tue Feb 28 11:13:25 2017 -0800
@@ -37,7 +37,7 @@
 
 (De)referencing symbolic revisions (paper)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=paper' | egrep $REVLINKS
   <li><a href="/graph/tip?style=paper">graph</a></li>
   <li><a href="/rev/tip?style=paper">changeset</a></li>
   <li><a href="/file/tip?style=paper">browse</a></li>
@@ -52,7 +52,7 @@
   <a href="/shortlog/tip?revcount=120&style=paper">more</a>
   | rev 2: <a href="/shortlog/43c799df6e75?style=paper">(0)</a> <a href="/shortlog/tip?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/tip?style=paper">log</a></li>
   <li><a href="/rev/tip?style=paper">changeset</a></li>
   <li><a href="/file/tip?style=paper">browse</a></li>
@@ -63,7 +63,7 @@
   <a href="/graph/tip?revcount=120&style=paper">more</a>
   | rev 2: <a href="/graph/43c799df6e75?style=paper">(0)</a> <a href="/graph/tip?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/tip?style=paper">log</a></li>
   <li><a href="/graph/tip?style=paper">graph</a></li>
   <li><a href="/rev/tip?style=paper">changeset</a></li>
@@ -74,24 +74,24 @@
   <a href="/file/tip/dir/?style=paper">
   <a href="/file/tip/foo?style=paper">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=paper' | egrep $REVLINKS
   <a href="/shortlog/default?style=paper" class="open">
   <a href="/shortlog/9d8c40cba617?style=paper" class="open">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=paper' | egrep $REVLINKS
   <a href="/rev/tip?style=paper">
   <a href="/rev/9d8c40cba617?style=paper">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=paper' | egrep $REVLINKS
   <a href="/rev/xyzzy?style=paper">
   <a href="/rev/a7c1559b7bba?style=paper">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=paper&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=paper&rev=all()' | egrep $REVLINKS
      <a href="/rev/9d8c40cba617?style=paper">third</a>
      <a href="/rev/a7c1559b7bba?style=paper">second</a>
      <a href="/rev/43c799df6e75?style=paper">first</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=paper' | egrep $REVLINKS
    <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
    <li><a href="/graph/xyzzy?style=paper">graph</a></li>
    <li><a href="/raw-rev/xyzzy?style=paper">raw</a></li>
@@ -102,7 +102,7 @@
    <td class="author"> <a href="/rev/9d8c40cba617?style=paper">9d8c40cba617</a></td>
    <td class="files"><a href="/file/a7c1559b7bba/foo?style=paper">foo</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=paper' | egrep $REVLINKS
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
   <li><a href="/file/xyzzy?style=paper">browse</a></li>
@@ -116,7 +116,7 @@
   <a href="/shortlog/xyzzy?revcount=120&style=paper">more</a>
   | rev 1: <a href="/shortlog/43c799df6e75?style=paper">(0)</a> <a href="/shortlog/tip?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
   <li><a href="/file/xyzzy?style=paper">browse</a></li>
@@ -127,7 +127,7 @@
   <a href="/graph/xyzzy?revcount=120&style=paper">more</a>
   | rev 1: <a href="/graph/43c799df6e75?style=paper">(0)</a> <a href="/graph/tip?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -138,7 +138,7 @@
   <a href="/file/xyzzy/dir/?style=paper">
   <a href="/file/xyzzy/foo?style=paper">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -153,7 +153,7 @@
    <td class="author"><a href="/file/43c799df6e75/foo?style=paper">43c799df6e75</a> </td>
    <td class="author"><a href="/file/9d8c40cba617/foo?style=paper">9d8c40cba617</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=paper' | egrep $REVLINKS
      href="/atom-log/tip/foo" title="Atom feed for test:foo" />
      href="/rss-log/tip/foo" title="RSS feed for test:foo" />
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
@@ -176,7 +176,7 @@
   <a href="/log/xyzzy/foo?revcount=120&style=paper">more</a>
   | <a href="/log/43c799df6e75/foo?style=paper">(0)</a> <a href="/log/tip/foo?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -200,7 +200,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=paper">diff</a>
   <a href="/rev/a7c1559b7bba?style=paper">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -215,7 +215,7 @@
    <td><a href="/file/43c799df6e75/foo?style=paper">43c799df6e75</a> </td>
    <td><a href="/file/9d8c40cba617/foo?style=paper">9d8c40cba617</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -232,7 +232,7 @@
 
 (De)referencing symbolic revisions (coal)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=coal' | egrep $REVLINKS
   <li><a href="/graph/tip?style=coal">graph</a></li>
   <li><a href="/rev/tip?style=coal">changeset</a></li>
   <li><a href="/file/tip?style=coal">browse</a></li>
@@ -247,7 +247,7 @@
   <a href="/shortlog/tip?revcount=120&style=coal">more</a>
   | rev 2: <a href="/shortlog/43c799df6e75?style=coal">(0)</a> <a href="/shortlog/tip?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/tip?style=coal">log</a></li>
   <li><a href="/rev/tip?style=coal">changeset</a></li>
   <li><a href="/file/tip?style=coal">browse</a></li>
@@ -258,7 +258,7 @@
   <a href="/graph/tip?revcount=120&style=coal">more</a>
   | rev 2: <a href="/graph/43c799df6e75?style=coal">(0)</a> <a href="/graph/tip?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/tip?style=coal">log</a></li>
   <li><a href="/graph/tip?style=coal">graph</a></li>
   <li><a href="/rev/tip?style=coal">changeset</a></li>
@@ -269,24 +269,24 @@
   <a href="/file/tip/dir/?style=coal">
   <a href="/file/tip/foo?style=coal">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=coal' | egrep $REVLINKS
   <a href="/shortlog/default?style=coal" class="open">
   <a href="/shortlog/9d8c40cba617?style=coal" class="open">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=coal' | egrep $REVLINKS
   <a href="/rev/tip?style=coal">
   <a href="/rev/9d8c40cba617?style=coal">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=coal' | egrep $REVLINKS
   <a href="/rev/xyzzy?style=coal">
   <a href="/rev/a7c1559b7bba?style=coal">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=coal&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=coal&rev=all()' | egrep $REVLINKS
      <a href="/rev/9d8c40cba617?style=coal">third</a>
      <a href="/rev/a7c1559b7bba?style=coal">second</a>
      <a href="/rev/43c799df6e75?style=coal">first</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=coal' | egrep $REVLINKS
    <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
    <li><a href="/graph/xyzzy?style=coal">graph</a></li>
    <li><a href="/raw-rev/xyzzy?style=coal">raw</a></li>
@@ -297,7 +297,7 @@
    <td class="author"> <a href="/rev/9d8c40cba617?style=coal">9d8c40cba617</a></td>
    <td class="files"><a href="/file/a7c1559b7bba/foo?style=coal">foo</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=coal' | egrep $REVLINKS
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
   <li><a href="/file/xyzzy?style=coal">browse</a></li>
@@ -311,7 +311,7 @@
   <a href="/shortlog/xyzzy?revcount=120&style=coal">more</a>
   | rev 1: <a href="/shortlog/43c799df6e75?style=coal">(0)</a> <a href="/shortlog/tip?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
   <li><a href="/file/xyzzy?style=coal">browse</a></li>
@@ -322,7 +322,7 @@
   <a href="/graph/xyzzy?revcount=120&style=coal">more</a>
   | rev 1: <a href="/graph/43c799df6e75?style=coal">(0)</a> <a href="/graph/tip?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -333,7 +333,7 @@
   <a href="/file/xyzzy/dir/?style=coal">
   <a href="/file/xyzzy/foo?style=coal">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -348,7 +348,7 @@
    <td class="author"><a href="/file/43c799df6e75/foo?style=coal">43c799df6e75</a> </td>
    <td class="author"><a href="/file/9d8c40cba617/foo?style=coal">9d8c40cba617</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=coal' | egrep $REVLINKS
      href="/atom-log/tip/foo" title="Atom feed for test:foo" />
      href="/rss-log/tip/foo" title="RSS feed for test:foo" />
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
@@ -371,7 +371,7 @@
   <a href="/log/xyzzy/foo?revcount=120&style=coal">more</a>
   | <a href="/log/43c799df6e75/foo?style=coal">(0)</a> <a href="/log/tip/foo?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -395,7 +395,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=coal">diff</a>
   <a href="/rev/a7c1559b7bba?style=coal">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -410,7 +410,7 @@
    <td><a href="/file/43c799df6e75/foo?style=coal">43c799df6e75</a> </td>
    <td><a href="/file/9d8c40cba617/foo?style=coal">9d8c40cba617</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -427,7 +427,7 @@
 
 (De)referencing symbolic revisions (gitweb)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'summary?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'summary?style=gitweb' | egrep $REVLINKS
   <a href="/file?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a>  |
   <a class="list" href="/rev/9d8c40cba617?style=gitweb">
   <a href="/rev/9d8c40cba617?style=gitweb">changeset</a> |
@@ -447,7 +447,7 @@
   <a href="/log/9d8c40cba617?style=gitweb">changelog</a> |
   <a href="/file/9d8c40cba617?style=gitweb">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=gitweb' | egrep $REVLINKS
   <a href="/log/tip?style=gitweb">changelog</a> |
   <a href="/graph/tip?style=gitweb">graph</a> |
   <a href="/file/tip?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a>  |
@@ -463,7 +463,7 @@
   <a href="/file/43c799df6e75?style=gitweb">files</a>
   <a href="/shortlog/43c799df6e75?style=gitweb">(0)</a> <a href="/shortlog/tip?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/tip?style=gitweb">shortlog</a> |
   <a href="/graph/tip?style=gitweb">graph</a> |
   <a href="/file/tip?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a>  |
@@ -476,7 +476,7 @@
   <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/>
   <a href="/log/43c799df6e75?style=gitweb">(0)</a>  <a href="/log/tip?style=gitweb">tip</a> <br/>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/tip?style=gitweb">shortlog</a> |
   <a href="/log/tip?style=gitweb">changelog</a> |
   <a href="/file/tip?style=gitweb">files</a> |
@@ -487,25 +487,25 @@
   <a href="/graph/tip?revcount=120&style=gitweb">more</a>
   | <a href="/graph/43c799df6e75?style=gitweb">(0)</a> <a href="/graph/tip?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=gitweb' | egrep $REVLINKS
   <td><a class="list" href="/rev/tip?style=gitweb"><b>tip</b></a></td>
   <a href="/rev/9d8c40cba617?style=gitweb">changeset</a> |
   <a href="/log/9d8c40cba617?style=gitweb">changelog</a> |
   <a href="/file/9d8c40cba617?style=gitweb">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=gitweb' | egrep $REVLINKS
   <td><a class="list" href="/rev/xyzzy?style=gitweb"><b>xyzzy</b></a></td>
   <a href="/rev/a7c1559b7bba?style=gitweb">changeset</a> |
   <a href="/log/a7c1559b7bba?style=gitweb">changelog</a> |
   <a href="/file/a7c1559b7bba?style=gitweb">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=gitweb' | egrep $REVLINKS
   <td class="open"><a class="list" href="/shortlog/default?style=gitweb"><b>default</b></a></td>
   <a href="/changeset/9d8c40cba617?style=gitweb">changeset</a> |
   <a href="/log/9d8c40cba617?style=gitweb">changelog</a> |
   <a href="/file/9d8c40cba617?style=gitweb">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=gitweb' | egrep $REVLINKS
   <a href="/rev/tip?style=gitweb">changeset</a>  | <a href="/archive/tip.zip">zip</a>  |
   <td><a href="/file/tip/?style=gitweb">[up]</a></td>
   <a href="/file/tip/dir?style=gitweb">dir</a>
@@ -516,7 +516,7 @@
   <a href="/log/tip/foo?style=gitweb">revisions</a> |
   <a href="/annotate/tip/foo?style=gitweb">annotate</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=gitweb&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=gitweb&rev=all()' | egrep $REVLINKS
   <a href="/file?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a> 
   <a class="title" href="/rev/9d8c40cba617?style=gitweb"><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>third<span class="logtags"> <span class="branchtag" title="default">default</span> <span class="tagtag" title="tip">tip</span> </span></a>
   <a href="/rev/9d8c40cba617?style=gitweb">changeset</a><br/>
@@ -525,7 +525,7 @@
   <a class="title" href="/rev/43c799df6e75?style=gitweb"><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>first<span class="logtags"> </span></a>
   <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> |
   <a href="/log/xyzzy?style=gitweb">changelog</a> |
   <a href="/graph/xyzzy?style=gitweb">graph</a> |
@@ -542,7 +542,7 @@
   <a href="/comparison/a7c1559b7bba/foo?style=gitweb">comparison</a> |
   <a href="/log/a7c1559b7bba/foo?style=gitweb">revisions</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/log/xyzzy?style=gitweb">changelog</a> |
   <a href="/graph/xyzzy?style=gitweb">graph</a> |
   <a href="/file/xyzzy?style=gitweb">files</a> | <a href="/archive/xyzzy.zip">zip</a>  |
@@ -555,7 +555,7 @@
   <a href="/file/43c799df6e75?style=gitweb">files</a>
   <a href="/shortlog/43c799df6e75?style=gitweb">(0)</a> <a href="/shortlog/tip?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> |
   <a href="/graph/xyzzy?style=gitweb">graph</a> |
   <a href="/file/xyzzy?style=gitweb">files</a> | <a href="/archive/xyzzy.zip">zip</a>  |
@@ -566,7 +566,7 @@
   <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/>
   <a href="/log/43c799df6e75?style=gitweb">(0)</a>  <a href="/log/tip?style=gitweb">tip</a> <br/>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> |
   <a href="/log/xyzzy?style=gitweb">changelog</a> |
   <a href="/file/xyzzy?style=gitweb">files</a> |
@@ -577,7 +577,7 @@
   <a href="/graph/xyzzy?revcount=120&style=gitweb">more</a>
   | <a href="/graph/43c799df6e75?style=gitweb">(0)</a> <a href="/graph/tip?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/rev/xyzzy?style=gitweb">changeset</a>  | <a href="/archive/xyzzy.zip">zip</a>  |
   <td><a href="/file/xyzzy/?style=gitweb">[up]</a></td>
   <a href="/file/xyzzy/dir?style=gitweb">dir</a>
@@ -588,7 +588,7 @@
   <a href="/log/xyzzy/foo?style=gitweb">revisions</a> |
   <a href="/annotate/xyzzy/foo?style=gitweb">annotate</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy/?style=gitweb">files</a> |
   <a href="/rev/xyzzy?style=gitweb">changeset</a> |
   <a href="/file/tip/foo?style=gitweb">latest</a> |
@@ -601,7 +601,7 @@
   <a class="list" href="/file/43c799df6e75/foo?style=gitweb">
   <a class="list" href="/file/9d8c40cba617/foo?style=gitweb">9d8c40cba617</a></td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy/foo?style=gitweb">file</a> |
   <a href="/annotate/xyzzy/foo?style=gitweb">annotate</a> |
   <a href="/diff/xyzzy/foo?style=gitweb">diff</a> |
@@ -616,9 +616,11 @@
   <a href="/file/43c799df6e75/foo?style=gitweb">file</a> |
   <a href="/diff/43c799df6e75/foo?style=gitweb">diff</a> |
   <a href="/annotate/43c799df6e75/foo?style=gitweb">annotate</a>
+  <a href="/log/xyzzy/foo?revcount=30&style=gitweb">less</a>
+  <a href="/log/xyzzy/foo?revcount=120&style=gitweb">more</a>
   <a href="/log/43c799df6e75/foo?style=gitweb">(0)</a> <a href="/log/tip/foo?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy/?style=gitweb">files</a> |
   <a href="/rev/xyzzy?style=gitweb">changeset</a> |
   <a href="/file/xyzzy/foo?style=gitweb">file</a> |
@@ -640,7 +642,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=gitweb">diff</a>
   <a href="/rev/a7c1559b7bba?style=gitweb">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy?style=gitweb">files</a> |
   <a href="/rev/xyzzy?style=gitweb">changeset</a> |
   <a href="/file/xyzzy/foo?style=gitweb">file</a> |
@@ -653,7 +655,7 @@
   <a class="list" href="/diff/43c799df6e75/foo?style=gitweb">
   <a class="list" href="/diff/9d8c40cba617/foo?style=gitweb">9d8c40cba617</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy?style=gitweb">files</a> |
   <a href="/rev/xyzzy?style=gitweb">changeset</a> |
   <a href="/file/xyzzy/foo?style=gitweb">file</a> |
@@ -668,7 +670,7 @@
 
 (De)referencing symbolic revisions (monoblue)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'summary?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'summary?style=monoblue' | egrep $REVLINKS
               <li><a href="/archive/tip.zip">zip</a></li>
   <a href="/rev/9d8c40cba617?style=monoblue">
   <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> |
@@ -688,7 +690,7 @@
   <a href="/log/9d8c40cba617?style=monoblue">changelog</a> |
   <a href="/file/9d8c40cba617?style=monoblue">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/tip?style=monoblue">graph</a></li>
               <li><a href="/file/tip?style=monoblue">files</a></li>
               <li><a href="/archive/tip.zip">zip</a></li>
@@ -703,7 +705,7 @@
   <a href="/file/43c799df6e75?style=monoblue">files</a>
       <a href="/shortlog/43c799df6e75?style=monoblue">(0)</a> <a href="/shortlog/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/tip?style=monoblue">graph</a></li>
               <li><a href="/file/tip?style=monoblue">files</a></li>
               <li><a href="/archive/tip.zip">zip</a></li>
@@ -712,31 +714,31 @@
   <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3>
   <a href="/log/43c799df6e75?style=monoblue">(0)</a>  <a href="/log/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=monoblue' | egrep $REVLINKS
               <li><a href="/file/tip?style=monoblue">files</a></li>
           <a href="/graph/tip?revcount=30&style=monoblue">less</a>
           <a href="/graph/tip?revcount=120&style=monoblue">more</a>
           | <a href="/graph/43c799df6e75?style=monoblue">(0)</a> <a href="/graph/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=monoblue' | egrep $REVLINKS
   <td><a href="/rev/tip?style=monoblue">tip</a></td>
   <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> |
   <a href="/log/9d8c40cba617?style=monoblue">changelog</a> |
   <a href="/file/9d8c40cba617?style=monoblue">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=monoblue' | egrep $REVLINKS
   <td><a href="/rev/xyzzy?style=monoblue">xyzzy</a></td>
   <a href="/rev/a7c1559b7bba?style=monoblue">changeset</a> |
   <a href="/log/a7c1559b7bba?style=monoblue">changelog</a> |
   <a href="/file/a7c1559b7bba?style=monoblue">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=monoblue' | egrep $REVLINKS
   <td class="open"><a href="/shortlog/default?style=monoblue">default</a></td>
   <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> |
   <a href="/log/9d8c40cba617?style=monoblue">changelog</a> |
   <a href="/file/9d8c40cba617?style=monoblue">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/tip?style=monoblue">graph</a></li>
           <li><a href="/rev/tip?style=monoblue">changeset</a></li>
           <li><a href="/archive/tip.zip">zip</a></li>
@@ -749,13 +751,13 @@
   <a href="/log/tip/foo?style=monoblue">revisions</a> |
   <a href="/annotate/tip/foo?style=monoblue">annotate</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=monoblue&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=monoblue&rev=all()' | egrep $REVLINKS
               <li><a href="/archive/tip.zip">zip</a></li>
       <h3 class="changelog"><a class="title" href="/rev/9d8c40cba617?style=monoblue">third<span class="logtags"> <span class="branchtag" title="default">default</span> <span class="tagtag" title="tip">tip</span> </span></a></h3>
   <h3 class="changelog"><a class="title" href="/rev/a7c1559b7bba?style=monoblue">second<span class="logtags"> <span class="bookmarktag" title="xyzzy">xyzzy</span> </span></a></h3>
   <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <li><a href="/raw-rev/xyzzy">raw</a></li>
@@ -771,7 +773,7 @@
   <a href="/comparison/a7c1559b7bba/foo?style=monoblue">comparison</a> |
   <a href="/log/a7c1559b7bba/foo?style=monoblue">revisions</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
               <li><a href="/archive/xyzzy.zip">zip</a></li>
@@ -783,7 +785,7 @@
   <a href="/file/43c799df6e75?style=monoblue">files</a>
       <a href="/shortlog/43c799df6e75?style=monoblue">(0)</a> <a href="/shortlog/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
               <li><a href="/archive/xyzzy.zip">zip</a></li>
@@ -791,13 +793,13 @@
   <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3>
   <a href="/log/43c799df6e75?style=monoblue">(0)</a>  <a href="/log/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <a href="/graph/xyzzy?revcount=30&style=monoblue">less</a>
           <a href="/graph/xyzzy?revcount=120&style=monoblue">more</a>
           | <a href="/graph/43c799df6e75?style=monoblue">(0)</a> <a href="/graph/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
           <li><a href="/rev/xyzzy?style=monoblue">changeset</a></li>
           <li><a href="/archive/xyzzy.zip">zip</a></li>
@@ -810,7 +812,7 @@
   <a href="/log/xyzzy/foo?style=monoblue">revisions</a> |
   <a href="/annotate/xyzzy/foo?style=monoblue">annotate</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy/?style=monoblue">files</a></li>
           <li><a href="/file/tip/foo?style=monoblue">latest</a></li>
@@ -823,7 +825,7 @@
   <a href="/file/43c799df6e75/foo?style=monoblue">
   <a href="/file/9d8c40cba617/foo?style=monoblue">9d8c40cba617</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li>
@@ -841,7 +843,7 @@
   <a href="/annotate/43c799df6e75/foo?style=monoblue">annotate</a>
       <a href="/log/43c799df6e75/foo?style=monoblue">(0)</a> <a href="/log/tip/foo?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy/?style=monoblue">files</a></li>
           <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li>
@@ -863,7 +865,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=monoblue">diff</a>
   <a href="/rev/a7c1559b7bba?style=monoblue">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li>
@@ -876,7 +878,7 @@
   <dd><a href="/diff/43c799df6e75/foo?style=monoblue">43c799df6e75</a></dd>
   <dd><a href="/diff/9d8c40cba617/foo?style=monoblue">9d8c40cba617</a></dd>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li>
@@ -891,7 +893,7 @@
 
 (De)referencing symbolic revisions (spartan)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=spartan' | egrep $REVLINKS
   <a href="/log/tip?style=spartan">changelog</a>
   <a href="/graph/tip?style=spartan">graph</a>
   <a href="/file/tip/?style=spartan">files</a>
@@ -902,7 +904,7 @@
     <td class="node"><a href="/rev/43c799df6e75?style=spartan">first</a></td>
   navigate: <small class="navigate"><a href="/shortlog/43c799df6e75?style=spartan">(0)</a> <a href="/shortlog/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=spartan' | egrep $REVLINKS
   <a href="/shortlog/tip?style=spartan">shortlog</a>
   <a href="/graph/tip?style=spartan">graph</a>
   <a href="/file/tip?style=spartan">files</a>
@@ -919,20 +921,20 @@
     <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td>
   navigate: <small class="navigate"><a href="/log/43c799df6e75?style=spartan">(0)</a>  <a href="/log/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=spartan' | egrep $REVLINKS
   <a href="/log/tip?style=spartan">changelog</a>
   <a href="/shortlog/tip?style=spartan">shortlog</a>
   <a href="/file/tip/?style=spartan">files</a>
   navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small>
   navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=spartan' | egrep $REVLINKS
   <a href="/rev/9d8c40cba617?style=spartan">tip</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=spartan' | egrep $REVLINKS
   <a href="/shortlog/9d8c40cba617?style=spartan" class="open">default</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=spartan' | egrep $REVLINKS
   <a href="/log/tip?style=spartan">changelog</a>
   <a href="/shortlog/tip?style=spartan">shortlog</a>
   <a href="/graph/tip?style=spartan">graph</a>
@@ -944,7 +946,7 @@
   <a href="/file/tip/dir/?style=spartan">
   <td><a href="/file/tip/foo?style=spartan">foo</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=spartan&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=spartan&rev=all()' | egrep $REVLINKS
   <a href="/archive/tip.zip">zip</a> 
     <td class="node"><a href="/rev/9d8c40cba617?style=spartan">9d8c40cba617</a></td>
   <a href="/rev/a7c1559b7bba?style=spartan">a7c1559b7bba</a>
@@ -960,7 +962,7 @@
     <th class="files"><a href="/file/43c799df6e75?style=spartan">files</a>:</th>
     <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
@@ -972,7 +974,7 @@
   <td class="child"><a href="/rev/9d8c40cba617?style=spartan">9d8c40cba617</a></td>
    <td class="files"><a href="/file/a7c1559b7bba/foo?style=spartan">foo</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
   <a href="/file/xyzzy/?style=spartan">files</a>
@@ -982,7 +984,7 @@
     <td class="node"><a href="/rev/43c799df6e75?style=spartan">first</a></td>
   navigate: <small class="navigate"><a href="/shortlog/43c799df6e75?style=spartan">(0)</a> <a href="/shortlog/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
   <a href="/file/xyzzy?style=spartan">files</a>
@@ -996,14 +998,14 @@
     <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td>
   navigate: <small class="navigate"><a href="/log/43c799df6e75?style=spartan">(0)</a>  <a href="/log/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/file/xyzzy/?style=spartan">files</a>
   navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small>
   navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
@@ -1015,7 +1017,7 @@
   <a href="/file/xyzzy/dir/?style=spartan">
   <td><a href="/file/xyzzy/foo?style=spartan">foo</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
@@ -1028,7 +1030,7 @@
   <a href="/file/43c799df6e75/foo?style=spartan">
   <td><a href="/file/9d8c40cba617/foo?style=spartan">9d8c40cba617</a></td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=spartan' | egrep $REVLINKS
      href="/atom-log/tip/foo" title="Atom feed for test:foo">
      href="/rss-log/tip/foo" title="RSS feed for test:foo">
   <a href="/file/xyzzy/foo?style=spartan">file</a>
@@ -1045,7 +1047,7 @@
      <a href="/diff/43c799df6e75/foo?style=spartan">(diff)</a>
      <a href="/annotate/43c799df6e75/foo?style=spartan">(annotate)</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
@@ -1067,7 +1069,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=spartan">diff</a>
   <a href="/rev/a7c1559b7bba?style=spartan">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
--- a/tests/test-hgwebdir.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hgwebdir.t	Tue Feb 28 11:13:25 2017 -0800
@@ -1421,7 +1421,7 @@
   > EOF
   $ hg serve -d --pid-file=hg.pid --web-conf paths.conf \
   >     -A access-paths.log -E error-paths-9.log
-  listening at http://*:$HGPORT1/ (bound to 127.0.0.1:$HGPORT1) (glob)
+  listening at http://*:$HGPORT1/ (bound to *$LOCALIP*:$HGPORT1) (glob)
   $ cat hg.pid >> $DAEMON_PIDS
   $ get-with-headers.py localhost:$HGPORT1 '?style=raw'
   200 Script output follows
@@ -1433,7 +1433,7 @@
   $ killdaemons.py
   $ hg serve -p $HGPORT2 -d -v --pid-file=hg.pid --web-conf paths.conf \
   >     -A access-paths.log -E error-paths-10.log
-  listening at http://*:$HGPORT2/ (bound to 127.0.0.1:$HGPORT2) (glob)
+  listening at http://*:$HGPORT2/ (bound to *$LOCALIP*:$HGPORT2) (glob)
   $ cat hg.pid >> $DAEMON_PIDS
   $ get-with-headers.py localhost:$HGPORT2 '?style=raw'
   200 Script output follows
--- a/tests/test-histedit-arguments.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-histedit-arguments.t	Tue Feb 28 11:13:25 2017 -0800
@@ -72,7 +72,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 Run on a revision not ancestors of the current working directory.
@@ -308,7 +308,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 Test --continue with --keep
@@ -544,7 +544,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
   $ cd ..
--- a/tests/test-histedit-bookmark-motion.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-histedit-bookmark-motion.t	Tue Feb 28 11:13:25 2017 -0800
@@ -78,7 +78,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ hg histedit 1 --commands - --verbose << EOF | grep histedit
   > pick 177f92b77385 2 c
@@ -141,7 +141,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ hg histedit 1 --commands - --verbose << EOF | grep histedit
   > pick b346ab9a313d 1 c
--- a/tests/test-histedit-commute.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-histedit-commute.t	Tue Feb 28 11:13:25 2017 -0800
@@ -72,7 +72,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 edit the history
@@ -350,7 +350,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 should also work if a commit message is missing
--- a/tests/test-histedit-edit.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-histedit-edit.t	Tue Feb 28 11:13:25 2017 -0800
@@ -478,5 +478,5 @@
   #  p, fold = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
--- a/tests/test-histedit-fold-non-commute.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-histedit-fold-non-commute.t	Tue Feb 28 11:13:25 2017 -0800
@@ -5,6 +5,12 @@
   > histedit=
   > EOF
 
+  $ modwithdate ()
+  > {
+  >     echo $1 > $1
+  >     hg ci -m $1 -d "$2 0"
+  > }
+
   $ initrepo ()
   > {
   >     hg init $1
@@ -14,12 +20,14 @@
   >         hg add $x
   >     done
   >     hg ci -m 'Initial commit'
-  >     for x in a b c d e f ; do
-  >         echo $x > $x
-  >         hg ci -m $x
-  >     done
+  >     modwithdate a 1
+  >     modwithdate b 2
+  >     modwithdate c 3
+  >     modwithdate d 4
+  >     modwithdate e 5
+  >     modwithdate f 6
   >     echo 'I can haz no commute' > e
-  >     hg ci -m 'does not commute with e'
+  >     hg ci -m 'does not commute with e' -d '7 0'
   >     cd ..
   > }
 
@@ -34,48 +42,48 @@
   $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 5 >> $EDITED
   $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 6 >> $EDITED
   $ cat $EDITED
-  pick 65a9a84f33fd 3 c
-  pick 00f1c5383965 4 d
-  fold 39522b764e3d 7 does not commute with e
-  pick 7b4e2f4b7bcd 5 e
-  pick 500cac37a696 6 f
+  pick 092e4ce14829 3 c
+  pick ae78f4c9d74f 4 d
+  fold 42abbb61bede 7 does not commute with e
+  pick 7f3755409b00 5 e
+  pick dd184f2faeb0 6 f
 
 log before edit
   $ hg log --graph
-  @  changeset:   7:39522b764e3d
+  @  changeset:   7:42abbb61bede
   |  tag:         tip
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:07 1970 +0000
   |  summary:     does not commute with e
   |
-  o  changeset:   6:500cac37a696
+  o  changeset:   6:dd184f2faeb0
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:06 1970 +0000
   |  summary:     f
   |
-  o  changeset:   5:7b4e2f4b7bcd
+  o  changeset:   5:7f3755409b00
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:05 1970 +0000
   |  summary:     e
   |
-  o  changeset:   4:00f1c5383965
+  o  changeset:   4:ae78f4c9d74f
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:04 1970 +0000
   |  summary:     d
   |
-  o  changeset:   3:65a9a84f33fd
+  o  changeset:   3:092e4ce14829
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:03 1970 +0000
   |  summary:     c
   |
-  o  changeset:   2:da6535b52e45
+  o  changeset:   2:40ccdd8beb95
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
   |  summary:     b
   |
-  o  changeset:   1:c1f09da44841
+  o  changeset:   1:cd997a145b29
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
   |  summary:     a
   |
   o  changeset:   0:1715188a53c7
@@ -89,7 +97,7 @@
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
-  Fix up the change (fold 39522b764e3d)
+  Fix up the change (fold 42abbb61bede)
   (hg histedit --continue to resume)
 
 fix up
@@ -113,7 +121,7 @@
   HG: changed e
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
-  Fix up the change (pick 7b4e2f4b7bcd)
+  Fix up the change (pick 7f3755409b00)
   (hg histedit --continue to resume)
 
 just continue this time
@@ -124,34 +132,34 @@
   continue: hg histedit --continue
   $ hg diff
   $ hg histedit --continue 2>&1 | fixbundle
-  7b4e2f4b7bcd: skipping changeset (no changes)
+  7f3755409b00: skipping changeset (no changes)
 
 log after edit
   $ hg log --graph
-  @  changeset:   5:d9cf42e54966
+  @  changeset:   5:1300355b1a54
   |  tag:         tip
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:06 1970 +0000
   |  summary:     f
   |
-  o  changeset:   4:10486af2e984
+  o  changeset:   4:e2ac33269083
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:07 1970 +0000
   |  summary:     d
   |
-  o  changeset:   3:65a9a84f33fd
+  o  changeset:   3:092e4ce14829
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:03 1970 +0000
   |  summary:     c
   |
-  o  changeset:   2:da6535b52e45
+  o  changeset:   2:40ccdd8beb95
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
   |  summary:     b
   |
-  o  changeset:   1:c1f09da44841
+  o  changeset:   1:cd997a145b29
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
   |  summary:     a
   |
   o  changeset:   0:1715188a53c7
@@ -175,7 +183,7 @@
 
   $ cd ..
 
-Repeat test using "roll", not "fold". "roll" folds in changes but drops message
+Repeat test using "roll", not "fold". "roll" folds in changes but drops message and date
 
   $ initrepo r2
   $ cd r2
@@ -189,48 +197,48 @@
   $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 5 >> $EDITED
   $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 6 >> $EDITED
   $ cat $EDITED
-  pick 65a9a84f33fd 3 c
-  pick 00f1c5383965 4 d
-  roll 39522b764e3d 7 does not commute with e
-  pick 7b4e2f4b7bcd 5 e
-  pick 500cac37a696 6 f
+  pick 092e4ce14829 3 c
+  pick ae78f4c9d74f 4 d
+  roll 42abbb61bede 7 does not commute with e
+  pick 7f3755409b00 5 e
+  pick dd184f2faeb0 6 f
 
 log before edit
   $ hg log --graph
-  @  changeset:   7:39522b764e3d
+  @  changeset:   7:42abbb61bede
   |  tag:         tip
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:07 1970 +0000
   |  summary:     does not commute with e
   |
-  o  changeset:   6:500cac37a696
+  o  changeset:   6:dd184f2faeb0
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:06 1970 +0000
   |  summary:     f
   |
-  o  changeset:   5:7b4e2f4b7bcd
+  o  changeset:   5:7f3755409b00
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:05 1970 +0000
   |  summary:     e
   |
-  o  changeset:   4:00f1c5383965
+  o  changeset:   4:ae78f4c9d74f
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:04 1970 +0000
   |  summary:     d
   |
-  o  changeset:   3:65a9a84f33fd
+  o  changeset:   3:092e4ce14829
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:03 1970 +0000
   |  summary:     c
   |
-  o  changeset:   2:da6535b52e45
+  o  changeset:   2:40ccdd8beb95
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
   |  summary:     b
   |
-  o  changeset:   1:c1f09da44841
+  o  changeset:   1:cd997a145b29
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
   |  summary:     a
   |
   o  changeset:   0:1715188a53c7
@@ -244,7 +252,7 @@
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
-  Fix up the change (roll 39522b764e3d)
+  Fix up the change (roll 42abbb61bede)
   (hg histedit --continue to resume)
 
 fix up
@@ -255,7 +263,7 @@
   $ hg histedit --continue 2>&1 | fixbundle | grep -v '2 files removed'
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
-  Fix up the change (pick 7b4e2f4b7bcd)
+  Fix up the change (pick 7f3755409b00)
   (hg histedit --continue to resume)
 
 just continue this time
@@ -264,34 +272,34 @@
   (no more unresolved files)
   continue: hg histedit --continue
   $ hg histedit --continue 2>&1 | fixbundle
-  7b4e2f4b7bcd: skipping changeset (no changes)
+  7f3755409b00: skipping changeset (no changes)
 
 log after edit
   $ hg log --graph
-  @  changeset:   5:e7c4f5d4eb75
+  @  changeset:   5:b538bcb461be
   |  tag:         tip
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:06 1970 +0000
   |  summary:     f
   |
-  o  changeset:   4:803d1bb561fc
+  o  changeset:   4:317e37cb6d66
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:04 1970 +0000
   |  summary:     d
   |
-  o  changeset:   3:65a9a84f33fd
+  o  changeset:   3:092e4ce14829
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:03 1970 +0000
   |  summary:     c
   |
-  o  changeset:   2:da6535b52e45
+  o  changeset:   2:40ccdd8beb95
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
   |  summary:     b
   |
-  o  changeset:   1:c1f09da44841
+  o  changeset:   1:cd997a145b29
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
   |  summary:     a
   |
   o  changeset:   0:1715188a53c7
@@ -316,16 +324,16 @@
 description is taken from rollup target commit
 
   $ hg log --debug --rev 4
-  changeset:   4:803d1bb561fceac3129ec778db9da249a3106fc3
+  changeset:   4:317e37cb6d66c1c84628c00e5bf4c8c292831951
   phase:       draft
-  parent:      3:65a9a84f33fdeb1ad5679b3941ec885d2b24027b
+  parent:      3:092e4ce14829f4974399ce4316d59f64ef0b6725
   parent:      -1:0000000000000000000000000000000000000000
   manifest:    4:b068a323d969f22af1296ec6a5ea9384cef437ac
   user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:04 1970 +0000
   files:       d e
   extra:       branch=default
-  extra:       histedit_source=00f1c53839651fa5c76d423606811ea5455a79d0,39522b764e3d26103f08bd1fa2ccd3e3d7dbcf4e
+  extra:       histedit_source=ae78f4c9d74ffa4b6cb5045001c303fe9204e890,42abbb61bede6f4366fa1e74a664343e5d558a70
   description:
   d
   
--- a/tests/test-histedit-fold.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-histedit-fold.t	Tue Feb 28 11:13:25 2017 -0800
@@ -20,52 +20,60 @@
 
 Simple folding
 --------------------
+  $ addwithdate ()
+  > {
+  >     echo $1 > $1
+  >     hg add $1
+  >     hg ci -m $1 -d "$2 0"
+  > }
+
   $ initrepo ()
   > {
   >     hg init r
   >     cd r
-  >     for x in a b c d e f ; do
-  >         echo $x > $x
-  >         hg add $x
-  >         hg ci -m $x
-  >     done
+  >     addwithdate a 1
+  >     addwithdate b 2
+  >     addwithdate c 3
+  >     addwithdate d 4
+  >     addwithdate e 5
+  >     addwithdate f 6
   > }
 
   $ initrepo
 
 log before edit
   $ hg logt --graph
-  @  5:652413bf663e f
+  @  5:178e35e0ce73 f
   |
-  o  4:e860deea161a e
+  o  4:1ddb6c90f2ee e
   |
-  o  3:055a42cdd887 d
+  o  3:532247a8969b d
   |
-  o  2:177f92b77385 c
+  o  2:ff2c9fa2018b c
   |
-  o  1:d2ae7f538514 b
+  o  1:97d72e5f12c7 b
   |
-  o  0:cb9a9f314b8b a
+  o  0:8580ff50825a a
   
 
-  $ hg histedit 177f92b77385 --commands - 2>&1 <<EOF | fixbundle
-  > pick e860deea161a e
-  > pick 652413bf663e f
-  > fold 177f92b77385 c
-  > pick 055a42cdd887 d
+  $ hg histedit ff2c9fa2018b --commands - 2>&1 <<EOF | fixbundle
+  > pick 1ddb6c90f2ee e
+  > pick 178e35e0ce73 f
+  > fold ff2c9fa2018b c
+  > pick 532247a8969b d
   > EOF
 
 log after edit
   $ hg logt --graph
-  @  4:9c277da72c9b d
+  @  4:c4d7f3def76d d
   |
-  o  3:6de59d13424a f
+  o  3:575228819b7e f
   |
-  o  2:ee283cb5f2d5 e
+  o  2:505a591af19e e
   |
-  o  1:d2ae7f538514 b
+  o  1:97d72e5f12c7 b
   |
-  o  0:cb9a9f314b8b a
+  o  0:8580ff50825a a
   
 
 post-fold manifest
@@ -78,19 +86,19 @@
   f
 
 
-check histedit_source
+check histedit_source, including that it uses the later date, from the first changeset
 
   $ hg log --debug --rev 3
-  changeset:   3:6de59d13424a8a13acd3e975514aed29dd0d9b2d
+  changeset:   3:575228819b7e6ed69e8c0a6a383ee59a80db7358
   phase:       draft
-  parent:      2:ee283cb5f2d5955443f23a27b697a04339e9a39a
+  parent:      2:505a591af19eed18f560af827b9e03d2076773dc
   parent:      -1:0000000000000000000000000000000000000000
   manifest:    3:81eede616954057198ead0b2c73b41d1f392829a
   user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:06 1970 +0000
   files+:      c f
   extra:       branch=default
-  extra:       histedit_source=a4f7421b80f79fcc59fff01bcbf4a53d127dd6d3,177f92b773850b59254aa5e923436f921b55483b
+  extra:       histedit_source=7cad1d7030207872dfd1c3a7cb430f24f2884086,ff2c9fa2018b15fa74b33363bda9527323e2a99f
   description:
   f
   ***
@@ -98,43 +106,43 @@
   
   
 
-rollup will fold without preserving the folded commit's message
+rollup will fold without preserving the folded commit's message or date
 
   $ OLDHGEDITOR=$HGEDITOR
   $ HGEDITOR=false
-  $ hg histedit d2ae7f538514 --commands - 2>&1 <<EOF | fixbundle
-  > pick d2ae7f538514 b
-  > roll ee283cb5f2d5 e
-  > pick 6de59d13424a f
-  > pick 9c277da72c9b d
+  $ hg histedit 97d72e5f12c7 --commands - 2>&1 <<EOF | fixbundle
+  > pick 97d72e5f12c7 b
+  > roll 505a591af19e e
+  > pick 575228819b7e f
+  > pick c4d7f3def76d d
   > EOF
 
   $ HGEDITOR=$OLDHGEDITOR
 
 log after edit
   $ hg logt --graph
-  @  3:c4a9eb7989fc d
+  @  3:bab801520cec d
   |
-  o  2:8e03a72b6f83 f
+  o  2:58c8f2bfc151 f
   |
-  o  1:391ee782c689 b
+  o  1:5d939c56c72e b
   |
-  o  0:cb9a9f314b8b a
+  o  0:8580ff50825a a
   
 
 description is taken from rollup target commit
 
   $ hg log --debug --rev 1
-  changeset:   1:391ee782c68930be438ccf4c6a403daedbfbffa5
+  changeset:   1:5d939c56c72e77e29f5167696218e2131a40f5cf
   phase:       draft
-  parent:      0:cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  parent:      0:8580ff50825a50c8f716709acdf8de0deddcd6ab
   parent:      -1:0000000000000000000000000000000000000000
   manifest:    1:b5e112a3a8354e269b1524729f0918662d847c38
   user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:02 1970 +0000
   files+:      b e
   extra:       branch=default
-  extra:       histedit_source=d2ae7f538514cd87c17547b0de4cea71fe1af9fb,ee283cb5f2d5955443f23a27b697a04339e9a39a
+  extra:       histedit_source=97d72e5f12c7e84f85064aa72e5a297142c36ed9,505a591af19eed18f560af827b9e03d2076773dc
   description:
   b
   
@@ -163,13 +171,13 @@
   > EOF
 
   $ rm -f .hg/last-message.txt
-  $ hg status --rev '8e03a72b6f83^1::c4a9eb7989fc'
+  $ hg status --rev '58c8f2bfc151^1::bab801520cec'
   A c
   A d
   A f
-  $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit 8e03a72b6f83 --commands - 2>&1 <<EOF
-  > pick 8e03a72b6f83 f
-  > fold c4a9eb7989fc d
+  $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit 58c8f2bfc151 --commands - 2>&1 <<EOF
+  > pick 58c8f2bfc151 f
+  > fold bab801520cec d
   > EOF
   allow non-folding commit
   ==== before editing
@@ -209,37 +217,37 @@
   $ cd ..
   $ rm -r r
 
-folding preserves initial author
---------------------------------
+folding preserves initial author but uses later date
+----------------------------------------------------
 
   $ initrepo
 
-  $ hg ci --user "someone else" --amend --quiet
+  $ hg ci -d '7 0' --user "someone else" --amend --quiet
 
 tip before edit
   $ hg log --rev .
-  changeset:   5:a00ad806cb55
+  changeset:   5:10c36dd37515
   tag:         tip
   user:        someone else
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:07 1970 +0000
   summary:     f
   
 
   $ hg --config progress.debug=1 --debug \
-  > histedit e860deea161a --commands - 2>&1 <<EOF | \
+  > histedit 1ddb6c90f2ee --commands - 2>&1 <<EOF | \
   > egrep 'editing|unresolved'
-  > pick e860deea161a e
-  > fold a00ad806cb55 f
+  > pick 1ddb6c90f2ee e
+  > fold 10c36dd37515 f
   > EOF
-  editing: pick e860deea161a 4 e 1/2 changes (50.00%)
-  editing: fold a00ad806cb55 5 f 2/2 changes (100.00%)
+  editing: pick 1ddb6c90f2ee 4 e 1/2 changes (50.00%)
+  editing: fold 10c36dd37515 5 f 2/2 changes (100.00%)
 
-tip after edit
+tip after edit, which should use the later date, from the second changeset
   $ hg log --rev .
-  changeset:   4:698d4e8040a1
+  changeset:   4:e4f3ec5d0b40
   tag:         tip
   user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:07 1970 +0000
   summary:     e
   
 
--- a/tests/test-histedit-obsolete.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-histedit-obsolete.t	Tue Feb 28 11:13:25 2017 -0800
@@ -136,7 +136,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ hg histedit 1 --commands - --verbose <<EOF | grep histedit
   > pick 177f92b77385 2 c
--- a/tests/test-histedit-outgoing.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-histedit-outgoing.t	Tue Feb 28 11:13:25 2017 -0800
@@ -54,7 +54,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ cd ..
 
@@ -88,7 +88,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ cd ..
 
@@ -114,7 +114,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 test to check number of roots in outgoing revisions
--- a/tests/test-hook.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-hook.t	Tue Feb 28 11:13:25 2017 -0800
@@ -832,6 +832,50 @@
   [1]
   $ cd ..
 
+check whether HG_PENDING makes pending changes only in related
+repositories visible to an external hook.
+
+(emulate a transaction running concurrently by copied
+.hg/store/00changelog.i.a in subsequent test)
+
+  $ cat > $TESTTMP/savepending.sh <<EOF
+  > cp .hg/store/00changelog.i.a  .hg/store/00changelog.i.a.saved
+  > exit 1 # to avoid adding new revision for subsequent tests
+  > EOF
+  $ cd a
+  $ hg tip -q
+  4:539e4b31b6dc
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible"
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
+
+(check (in)visibility of new changeset while transaction running in
+repo)
+
+  $ cat > $TESTTMP/checkpending.sh <<EOF
+  > echo '@a'
+  > hg -R $TESTTMP/a tip -q
+  > echo '@a/nested'
+  > hg -R $TESTTMP/a/nested tip -q
+  > exit 1 # to avoid adding new revision for subsequent tests
+  > EOF
+  $ hg init nested
+  $ cd nested
+  $ echo a > a
+  $ hg add a
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0'
+  @a
+  4:539e4b31b6dc
+  @a/nested
+  0:bf5e395ced2c
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+
 Hook from untrusted hgrc are reported as failure
 ================================================
 
--- a/tests/test-http-bundle1.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-http-bundle1.t	Tue Feb 28 11:13:25 2017 -0800
@@ -28,11 +28,11 @@
 
 #if windows
   $ hg serve -p $HGPORT1 2>&1
-  abort: cannot start server at ':$HGPORT1': * (glob)
+  abort: cannot start server at 'localhost:$HGPORT1': * (glob)
   [255]
 #else
   $ hg serve -p $HGPORT1 2>&1
-  abort: cannot start server at ':$HGPORT1': Address already in use
+  abort: cannot start server at 'localhost:$HGPORT1': Address already in use
   [255]
 #endif
   $ cd ..
--- a/tests/test-http-protocol.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-http-protocol.t	Tue Feb 28 11:13:25 2017 -0800
@@ -16,9 +16,9 @@
 compression formats are advertised in compression capability
 
 #if zstd
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null
 #else
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null
 #endif
 
   $ killdaemons.py
@@ -27,7 +27,7 @@
 
   $ hg --config server.compressionengines=none -R server serve -p $HGPORT -d --pid-file hg.pid
   $ cat hg.pid > $DAEMON_PIDS
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null
 
   $ killdaemons.py
 
@@ -35,7 +35,7 @@
 
   $ hg --config server.compressionengines=none,zlib -R server serve -p $HGPORT -d --pid-file hg.pid
   $ cat hg.pid > $DAEMON_PIDS
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null
 
   $ killdaemons.py
 
@@ -46,7 +46,7 @@
 
 Server should send application/mercurial-0.1 to clients if no Accept is used
 
-  $ get-with-headers.py --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.1
   date: * (glob)
@@ -55,7 +55,7 @@
 
 Server should send application/mercurial-0.1 when client says it wants it
 
-  $ get-with-headers.py --hgproto '0.1' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --hgproto '0.1' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.1
   date: * (glob)
@@ -64,14 +64,14 @@
 
 Server should send application/mercurial-0.2 when client says it wants it
 
-  $ get-with-headers.py --hgproto '0.2' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --hgproto '0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.2
   date: * (glob)
   server: * (glob)
   transfer-encoding: chunked
 
-  $ get-with-headers.py --hgproto '0.1 0.2' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --hgproto '0.1 0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.2
   date: * (glob)
@@ -80,7 +80,7 @@
 
 Requesting a compression format that server doesn't support results will fall back to 0.1
 
-  $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.1
   date: * (glob)
@@ -90,7 +90,7 @@
 #if zstd
 zstd is used if available
 
-  $ get-with-headers.py --hgproto '0.2 comp=zstd' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
+  $ get-with-headers.py --hgproto '0.2 comp=zstd' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
   $ f --size --hexdump --bytes 36 --sha1 resp
   resp: size=248, sha1=4d8d8f87fb82bd542ce52881fdc94f850748
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -101,7 +101,7 @@
 
 application/mercurial-0.2 is not yet used on non-streaming responses
 
-  $ get-with-headers.py --hgproto '0.2' 127.0.0.1:$HGPORT '?cmd=heads' -
+  $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=heads' -
   200 Script output follows
   content-length: 41
   content-type: application/mercurial-0.1
@@ -118,11 +118,11 @@
 
 No Accept will send 0.1+zlib, even though "none" is preferred b/c "none" isn't supported on 0.1
 
-  $ get-with-headers.py --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
+  $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
   200 Script output follows
   content-type: application/mercurial-0.1
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 28 --sha1 resp
   resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -130,7 +130,7 @@
 
 Explicit 0.1 will send zlib because "none" isn't supported on 0.1
 
-  $ get-with-headers.py --hgproto '0.1' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py --hgproto '0.1' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 28 --sha1 resp
   resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -139,7 +139,7 @@
 0.2 with no compression will get "none" because that is server's preference
 (spec says ZL and UN are implicitly supported)
 
-  $ get-with-headers.py --hgproto '0.2' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 32 --sha1 resp
   resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -147,7 +147,7 @@
 
 Client receives server preference even if local order doesn't match
 
-  $ get-with-headers.py --hgproto '0.2 comp=zlib,none' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py --hgproto '0.2 comp=zlib,none' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 32 --sha1 resp
   resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -155,7 +155,7 @@
 
 Client receives only supported format even if not server preferred format
 
-  $ get-with-headers.py --hgproto '0.2 comp=zlib' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py --hgproto '0.2 comp=zlib' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 33 --sha1 resp
   resp: size=232, sha1=a1c727f0c9693ca15742a75c30419bc36
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
--- a/tests/test-http.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-http.t	Tue Feb 28 11:13:25 2017 -0800
@@ -23,7 +23,7 @@
   [255]
 #else
   $ hg serve -p $HGPORT1 2>&1
-  abort: cannot start server at ':$HGPORT1': Address already in use
+  abort: cannot start server at 'localhost:$HGPORT1': Address already in use
   [255]
 #endif
   $ cd ..
--- a/tests/test-https.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-https.t	Tue Feb 28 11:13:25 2017 -0800
@@ -36,11 +36,11 @@
 
 #if windows
   $ hg serve -p $HGPORT --certificate=$PRIV 2>&1
-  abort: cannot start server at ':$HGPORT':
+  abort: cannot start server at 'localhost:$HGPORT':
   [255]
 #else
   $ hg serve -p $HGPORT --certificate=$PRIV 2>&1
-  abort: cannot start server at ':$HGPORT': Address already in use
+  abort: cannot start server at 'localhost:$HGPORT': Address already in use
   [255]
 #endif
   $ cd ..
@@ -278,17 +278,17 @@
 cacert mismatch
 
   $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \
-  > https://127.0.0.1:$HGPORT/
-  pulling from https://127.0.0.1:$HGPORT/ (glob)
-  warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
-  abort: 127.0.0.1 certificate error: certificate is for localhost (glob)
-  (set hostsecurity.127.0.0.1:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely) (glob)
+  > https://$LOCALIP:$HGPORT/
+  pulling from https://*:$HGPORT/ (glob)
+  warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  abort: $LOCALIP certificate error: certificate is for localhost
+  (set hostsecurity.$LOCALIP:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely)
   [255]
   $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \
-  > https://127.0.0.1:$HGPORT/ --insecure
-  pulling from https://127.0.0.1:$HGPORT/ (glob)
-  warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
-  warning: connection security to 127.0.0.1 is disabled per current settings; communication is susceptible to eavesdropping and tampering (glob)
+  > https://$LOCALIP:$HGPORT/ --insecure
+  pulling from https://*:$HGPORT/ (glob)
+  warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  warning: connection security to $LOCALIP is disabled per current settings; communication is susceptible to eavesdropping and tampering
   searching for changes
   no changes found
   $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-other.pem"
@@ -434,8 +434,8 @@
 
 
 - ignores that certificate doesn't match hostname
-  $ hg -R copy-pull id https://127.0.0.1:$HGPORT/ --config hostfingerprints.127.0.0.1=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
-  warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  $ hg -R copy-pull id https://$LOCALIP:$HGPORT/ --config hostfingerprints.$LOCALIP=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
+  warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
   5fed3813f7f5
 
 Ports used by next test. Kill servers.
@@ -571,9 +571,9 @@
   warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
   searching for changes
   no changes found
-  $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://127.0.0.1:$HGPORT/ --config hostfingerprints.127.0.0.1=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
-  pulling from https://127.0.0.1:$HGPORT/ (glob)
-  warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://localhost:$HGPORT/ --config hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 --trace
+  pulling from https://*:$HGPORT/ (glob)
+  warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
   searching for changes
   no changes found
 
--- a/tests/test-i18n.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-i18n.t	Tue Feb 28 11:13:25 2017 -0800
@@ -29,14 +29,15 @@
 
 Test keyword search in translated help text:
 
-  $ HGENCODING=UTF-8 LANGUAGE=de hg help -k blättern
+  $ HGENCODING=UTF-8 LANGUAGE=de hg help -k Aktualisiert
   Themen:
   
-   extensions Benutzung erweiterter Funktionen
+   subrepos Unterarchive
   
-  Erweiterungen:
+  Befehle:
   
-   pager Verwendet einen externen Pager zum Bl\xc3\xa4ttern in der Ausgabe von Befehlen (esc)
+   pull   Ruft \xc3\x84nderungen von der angegebenen Quelle ab (esc)
+   update Aktualisiert das Arbeitsverzeichnis (oder wechselt die Version)
 
 #endif
 
--- a/tests/test-largefiles-wireproto.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-largefiles-wireproto.t	Tue Feb 28 11:13:25 2017 -0800
@@ -347,7 +347,7 @@
   searching 2 changesets for largefiles
   verified existence of 2 revisions of 2 largefiles
   $ tail -1 access.log
-  127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  $LOCALIP - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   $ hg -R batchverifyclone update
   getting changed largefiles
   2 largefiles updated, 0 removed
@@ -384,7 +384,7 @@
   searching 3 changesets for largefiles
   verified existence of 3 revisions of 3 largefiles
   $ tail -1 access.log
-  127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  $LOCALIP - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
   $ killdaemons.py
 
--- a/tests/test-largefiles.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-largefiles.t	Tue Feb 28 11:13:25 2017 -0800
@@ -192,7 +192,7 @@
 
   $ hg serve -d -p $HGPORT --pid-file ../hg.pid
   $ cat ../hg.pid >> $DAEMON_PIDS
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/tip/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/tip/?style=raw'
   200 Script output follows
   
   
@@ -201,7 +201,7 @@
   -rw-r--r-- 9 normal3
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/tip/sub/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/tip/sub/?style=raw'
   200 Script output follows
   
   
--- a/tests/test-logtoprocess.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-logtoprocess.t	Tue Feb 28 11:13:25 2017 -0800
@@ -1,3 +1,7 @@
+ATTENTION: logtoprocess runs commands asynchronously. Be sure to append "| cat"
+to hg commands, to wait for the output, if you want to test its output.
+Otherwise the test will be flaky.
+
 Test if logtoprocess correctly captures command-related log calls.
 
   $ hg init
@@ -10,6 +14,7 @@
   > def foo(ui, repo):
   >     ui.log('foo', 'a message: %(bar)s\n', bar='spam')
   > EOF
+  $ cp $HGRCPATH $HGRCPATH.bak
   $ cat >> $HGRCPATH << EOF
   > [extensions]
   > logtoprocess=
@@ -33,9 +38,8 @@
 Running a command triggers both a ui.log('command') and a
 ui.log('commandfinish') call. The foo command also uses ui.log.
 
-Use head to ensure we wait for all lines to be produced, and sort to avoid
-ordering issues between the various processes we spawn:
-  $ hg foo | head -n 17 | sort
+Use sort to avoid ordering issues between the various processes we spawn:
+  $ hg foo | cat | sort
   
   
   
@@ -52,3 +56,18 @@
   logtoprocess commandfinish output:
   logtoprocess foo output:
   spam
+
+Confirm that logging blocked time catches stdio properly:
+  $ cp $HGRCPATH.bak $HGRCPATH
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > logtoprocess=
+  > pager=
+  > [logtoprocess]
+  > uiblocked=echo "\$EVENT stdio \$OPT_STDIO_BLOCKED ms command \$OPT_COMMAND_DURATION ms"
+  > [ui]
+  > logblockedtimes=True
+  > EOF
+
+  $ hg log | cat
+  uiblocked stdio [0-9]+.[0-9]* ms command [0-9]+.[0-9]* ms (re)
--- a/tests/test-mq.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-mq.t	Tue Feb 28 11:13:25 2017 -0800
@@ -25,7 +25,7 @@
   Known patches are represented as patch files in the .hg/patches directory.
   Applied patches are both patch files and changesets.
   
-  Common tasks (use 'hg help command' for more details):
+  Common tasks (use 'hg help COMMAND' for more details):
   
     create new patch                          qnew
     import existing patch                     qimport
--- a/tests/test-pager.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-pager.t	Tue Feb 28 11:13:25 2017 -0800
@@ -26,7 +26,7 @@
   >   hg ci -m "modify a $x"
   > done
 
-By default diff and log are paged, but summary is not:
+By default diff and log are paged, but id is not:
 
   $ hg diff -c 2 --pager=yes
   paged! 'diff -r f4be7687d414 -r bce265549556 a\n'
@@ -50,25 +50,16 @@
   paged! 'summary:     modify a 9\n'
   paged! '\n'
 
-  $ hg summary
-  parent: 10:46106edeeb38 tip
-   modify a 10
-  branch: default
-  commit: (clean)
-  update: (current)
-  phases: 11 draft
+  $ hg id
+  46106edeeb38 tip
 
-We can enable the pager on summary:
+We can enable the pager on id:
 
-  $ hg --config pager.attend-summary=yes summary
-  paged! 'parent: 10:46106edeeb38 tip\n'
-  paged! ' modify a 10\n'
-  paged! 'branch: default\n'
-  paged! 'commit: (clean)\n'
-  paged! 'update: (current)\n'
-  paged! 'phases: 11 draft\n'
+  $ hg --config pager.attend-id=yes id
+  paged! '46106edeeb38 tip\n'
 
-If we completely change the attend list that's respected:
+Setting attend-$COMMAND to a false value works, even with pager in
+core:
 
   $ hg --config pager.attend-diff=no diff -c 2
   diff -r f4be7687d414 -r bce265549556 a
@@ -79,15 +70,6 @@
    a 1
   +a 2
 
-  $ hg --config pager.attend=summary diff -c 2
-  diff -r f4be7687d414 -r bce265549556 a
-  --- a/a	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
-  @@ -1,2 +1,3 @@
-   a
-   a 1
-  +a 2
-
 If 'log' is in attend, then 'history' should also be paged:
   $ hg history --limit 2 --config pager.attend=log
   paged! 'changeset:   10:46106edeeb38\n'
@@ -102,61 +84,17 @@
   paged! 'summary:     modify a 9\n'
   paged! '\n'
 
-Possible bug: history is explicitly ignored in pager config, but
-because log is in the attend list it still gets pager treatment.
-
-  $ hg history --limit 2 --config pager.attend=log \
-  >   --config pager.ignore=history
-  paged! 'changeset:   10:46106edeeb38\n'
-  paged! 'tag:         tip\n'
-  paged! 'user:        test\n'
-  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
-  paged! 'summary:     modify a 10\n'
-  paged! '\n'
-  paged! 'changeset:   9:6dd8ea7dd621\n'
-  paged! 'user:        test\n'
-  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
-  paged! 'summary:     modify a 9\n'
-  paged! '\n'
-
-Possible bug: history is explicitly marked as attend-history=no, but
-it doesn't fail to get paged because log is still in the attend list.
-
-  $ hg history --limit 2 --config pager.attend-history=no
-  paged! 'changeset:   10:46106edeeb38\n'
-  paged! 'tag:         tip\n'
-  paged! 'user:        test\n'
-  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
-  paged! 'summary:     modify a 10\n'
-  paged! '\n'
-  paged! 'changeset:   9:6dd8ea7dd621\n'
-  paged! 'user:        test\n'
-  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
-  paged! 'summary:     modify a 9\n'
-  paged! '\n'
-
-Possible bug: disabling pager for log but enabling it for history
-doesn't result in history being paged.
-
-  $ hg history --limit 2 --config pager.attend-log=no \
-  > --config pager.attend-history=yes
-  changeset:   10:46106edeeb38
-  tag:         tip
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     modify a 10
-  
-  changeset:   9:6dd8ea7dd621
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     modify a 9
-  
-
 Pager should not start if stdout is not a tty.
 
   $ hg log -l1 -q --config ui.formatted=False
   10:46106edeeb38
 
+Pager should be disabled if pager.pager is empty (otherwise the output would
+be silently lost.)
+
+  $ hg log -l1 -q --config pager.pager=
+  10:46106edeeb38
+
 Pager with color enabled allows colors to come through by default,
 even though stdout is no longer a tty.
   $ cat >> $HGRCPATH <<EOF
@@ -207,6 +145,11 @@
   $ A=2 hg --config pager.attend-printa=yes printa
   paged! '2\n'
 
+Something that's explicitly attended is still not paginated if the
+pager is globally set to off using a flag:
+  $ A=2 hg --config pager.attend-printa=yes printa --pager=no
+  2
+
 Pager should not override the exit code of other commands
 
   $ cat >> $TESTTMP/fortytwo.py <<'EOF'
@@ -227,3 +170,61 @@
   $ hg fortytwo --pager=on
   paged! '42\n'
   [42]
+
+A command that asks for paging using ui.pager() directly works:
+  $ hg blame a
+  paged! ' 0: a\n'
+  paged! ' 1: a 1\n'
+  paged! ' 2: a 2\n'
+  paged! ' 3: a 3\n'
+  paged! ' 4: a 4\n'
+  paged! ' 5: a 5\n'
+  paged! ' 6: a 6\n'
+  paged! ' 7: a 7\n'
+  paged! ' 8: a 8\n'
+  paged! ' 9: a 9\n'
+  paged! '10: a 10\n'
+but not with HGPLAIN
+  $ HGPLAIN=1 hg blame a
+   0: a
+   1: a 1
+   2: a 2
+   3: a 3
+   4: a 4
+   5: a 5
+   6: a 6
+   7: a 7
+   8: a 8
+   9: a 9
+  10: a 10
+explicit flags work too:
+  $ hg blame --pager=no a
+   0: a
+   1: a 1
+   2: a 2
+   3: a 3
+   4: a 4
+   5: a 5
+   6: a 6
+   7: a 7
+   8: a 8
+   9: a 9
+  10: a 10
+
+Put annotate in the ignore list for pager:
+  $ cat >> $HGRCPATH <<EOF
+  > [pager]
+  > ignore = annotate
+  > EOF
+  $ hg blame a
+   0: a
+   1: a 1
+   2: a 2
+   3: a 3
+   4: a 4
+   5: a 5
+   6: a 6
+   7: a 7
+   8: a 8
+   9: a 9
+  10: a 10
--- a/tests/test-phases.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-phases.t	Tue Feb 28 11:13:25 2017 -0800
@@ -590,3 +590,47 @@
   crosschecking files in changesets and manifests
   checking files
   7 files, 8 changesets, 7 total revisions
+
+  $ cd ..
+
+check whether HG_PENDING makes pending changes only in related
+repositories visible to an external hook.
+
+(emulate a transaction running concurrently by copied
+.hg/phaseroots.pending in subsequent test)
+
+  $ cat > $TESTTMP/savepending.sh <<EOF
+  > cp .hg/store/phaseroots.pending  .hg/store/phaseroots.pending.saved
+  > exit 1 # to avoid changing phase for subsequent tests
+  > EOF
+  $ cd push-dest
+  $ hg phase 6
+  6: draft
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" phase -f -s 6
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ cp .hg/store/phaseroots.pending.saved .hg/store/phaseroots.pending
+
+(check (in)visibility of phaseroot while transaction running in repo)
+
+  $ cat > $TESTTMP/checkpending.sh <<EOF
+  > echo '@initialrepo'
+  > hg -R $TESTTMP/initialrepo phase 7
+  > echo '@push-dest'
+  > hg -R $TESTTMP/push-dest phase 6
+  > exit 1 # to avoid changing phase for subsequent tests
+  > EOF
+  $ cd ../initialrepo
+  $ hg phase 7
+  7: public
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" phase -f -s 7
+  @initialrepo
+  7: secret
+  @push-dest
+  6: draft
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
--- a/tests/test-push-http-bundle1.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-push-http-bundle1.t	Tue Feb 28 11:13:25 2017 -0800
@@ -79,7 +79,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -95,7 +95,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -111,7 +111,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
--- a/tests/test-push-http.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-push-http.t	Tue Feb 28 11:13:25 2017 -0800
@@ -70,7 +70,7 @@
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
   remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
-  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -87,7 +87,7 @@
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
   remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
-  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -104,7 +104,7 @@
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
   remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
-  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -125,7 +125,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob)
   remote: pushkey-abort: prepushkey hook exited with status 1
   remote: transaction abort!
   remote: rollback completed
@@ -145,7 +145,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
--- a/tests/test-revset.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-revset.t	Tue Feb 28 11:13:25 2017 -0800
@@ -40,6 +40,8 @@
   >     cmdutil,
   >     node as nodemod,
   >     revset,
+  >     revsetlang,
+  >     smartset,
   > )
   > cmdtable = {}
   > command = cmdutil.command(cmdtable)
@@ -49,17 +51,18 @@
   > def debugrevlistspec(ui, repo, fmt, *args, **opts):
   >     if opts['bin']:
   >         args = map(nodemod.bin, args)
-  >     expr = revset.formatspec(fmt, list(args))
+  >     expr = revsetlang.formatspec(fmt, list(args))
   >     if ui.verbose:
-  >         tree = revset.parse(expr, lookup=repo.__contains__)
-  >         ui.note(revset.prettyformat(tree), "\n")
+  >         tree = revsetlang.parse(expr, lookup=repo.__contains__)
+  >         ui.note(revsetlang.prettyformat(tree), "\n")
   >         if opts["optimize"]:
-  >             opttree = revset.optimize(revset.analyze(tree))
-  >             ui.note("* optimized:\n", revset.prettyformat(opttree), "\n")
+  >             opttree = revsetlang.optimize(revsetlang.analyze(tree))
+  >             ui.note("* optimized:\n", revsetlang.prettyformat(opttree),
+  >                     "\n")
   >     func = revset.match(ui, expr, repo)
   >     revs = func(repo)
   >     if ui.verbose:
-  >         ui.note("* set:\n", revset.prettyformatset(revs), "\n")
+  >         ui.note("* set:\n", smartset.prettyformat(revs), "\n")
   >     for c in revs:
   >         ui.write("%s\n" % c)
   > EOF
--- a/tests/test-serve.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-serve.t	Tue Feb 28 11:13:25 2017 -0800
@@ -34,13 +34,13 @@
 With -v
 
   $ hgserve
-  listening at http://localhost/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/ (bound to *$LOCALIP*:HGPORT1) (glob)
   % errors
 
 With -v and -p HGPORT2
 
   $ hgserve -p "$HGPORT2"
-  listening at http://localhost/ (bound to 127.0.0.1:HGPORT2) (glob)
+  listening at http://localhost/ (bound to *$LOCALIP*:HGPORT2) (glob)
   % errors
 
 With -v and -p daytime (should fail because low port)
@@ -57,25 +57,25 @@
 With --prefix foo
 
   $ hgserve --prefix foo
-  listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob)
   % errors
 
 With --prefix /foo
 
   $ hgserve --prefix /foo
-  listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob)
   % errors
 
 With --prefix foo/
 
   $ hgserve --prefix foo/
-  listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob)
   % errors
 
 With --prefix /foo/
 
   $ hgserve --prefix /foo/
-  listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob)
   % errors
 
   $ cd ..
--- a/tests/test-share.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-share.t	Tue Feb 28 11:13:25 2017 -0800
@@ -154,6 +154,67 @@
    * bm1                       2:c2e0ac586386
      bm3                       2:c2e0ac586386
 
+check whether HG_PENDING makes pending changes only in relatd
+repositories visible to an external hook.
+
+In "hg share" case, another transaction can't run in other
+repositories sharing same source repository, because starting
+transaction requires locking store of source repository.
+
+Therefore, this test scenario ignores checking visibility of
+.hg/bookmakrs.pending in repo2, which shares repo1 without bookmarks.
+
+  $ cat > $TESTTMP/checkbookmarks.sh <<EOF
+  > echo "@repo1"
+  > hg -R $TESTTMP/repo1 bookmarks
+  > echo "@repo2"
+  > hg -R $TESTTMP/repo2 bookmarks
+  > echo "@repo3"
+  > hg -R $TESTTMP/repo3 bookmarks
+  > exit 1 # to avoid adding new bookmark for subsequent tests
+  > EOF
+
+  $ cd ../repo1
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
+  @repo1
+     bm1                       2:c2e0ac586386
+     bm3                       2:c2e0ac586386
+   * bmX                       2:c2e0ac586386
+  @repo2
+   * bm2                       3:0e6e70d1d5f1
+  @repo3
+     bm1                       2:c2e0ac586386
+   * bm3                       2:c2e0ac586386
+     bmX                       2:c2e0ac586386
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ hg book bm1
+
+FYI, in contrast to above test, bmX is invisible in repo1 (= shared
+src), because (1) HG_PENDING refers only repo3 and (2)
+"bookmarks.pending" is written only into repo3.
+
+  $ cd ../repo3
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
+  @repo1
+   * bm1                       2:c2e0ac586386
+     bm3                       2:c2e0ac586386
+  @repo2
+   * bm2                       3:0e6e70d1d5f1
+  @repo3
+     bm1                       2:c2e0ac586386
+     bm3                       2:c2e0ac586386
+   * bmX                       2:c2e0ac586386
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ hg book bm3
+
+  $ cd ../repo1
+
 test that commits work
 
   $ echo 'shared bookmarks' > a
--- a/tests/test-shelve.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-shelve.t	Tue Feb 28 11:13:25 2017 -0800
@@ -493,7 +493,7 @@
   $ ln -s foo a/a
   $ hg shelve -q -n symlink a/a
   $ hg status a/a
-  $ hg unshelve -q symlink
+  $ hg unshelve -q -n symlink
   $ hg status a/a
   M a/a
   $ hg revert a/a
--- a/tests/test-ssh-bundle1.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-ssh-bundle1.t	Tue Feb 28 11:13:25 2017 -0800
@@ -494,7 +494,7 @@
   Got arguments 1:user@dummy 2:hg -R local serve --stdio
   Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
@@ -504,7 +504,7 @@
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg init 'a repo'
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
@@ -512,7 +512,7 @@
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
 
 remote hook failure is attributed to remote
--- a/tests/test-ssh.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-ssh.t	Tue Feb 28 11:13:25 2017 -0800
@@ -498,7 +498,7 @@
   Got arguments 1:user@dummy 2:hg -R local serve --stdio
   Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
@@ -508,7 +508,7 @@
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg init 'a repo'
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
@@ -516,7 +516,7 @@
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
 
 remote hook failure is attributed to remote
--- a/tests/test-status-color.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-status-color.t	Tue Feb 28 11:13:25 2017 -0800
@@ -1,6 +1,6 @@
   $ cat <<EOF >> $HGRCPATH
-  > [extensions]
-  > color =
+  > [ui]
+  > color = always
   > [color]
   > mode = ansi
   > EOF
@@ -14,7 +14,7 @@
 
 hg status in repo root:
 
-  $ hg status --color=always
+  $ hg status
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
@@ -41,7 +41,7 @@
 
 hg status . in repo root:
 
-  $ hg status --color=always .
+  $ hg status .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
@@ -49,17 +49,17 @@
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
 
-  $ hg status --color=always --cwd a
+  $ hg status --cwd a
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd a .
+  $ hg status --cwd a .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
-  $ hg status --color=always --cwd a ..
+  $ hg status --cwd a ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/1/in_b_1\x1b[0m (esc)
@@ -67,18 +67,18 @@
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
 
-  $ hg status --color=always --cwd b
+  $ hg status --cwd b
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd b .
+  $ hg status --cwd b .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
-  $ hg status --color=always --cwd b ..
+  $ hg status --cwd b ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
@@ -86,43 +86,43 @@
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
 
-  $ hg status --color=always --cwd a/1
+  $ hg status --cwd a/1
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd a/1 .
+  $ hg status --cwd a/1 .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
-  $ hg status --color=always --cwd a/1 ..
+  $ hg status --cwd a/1 ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_a\x1b[0m (esc)
 
-  $ hg status --color=always --cwd b/1
+  $ hg status --cwd b/1
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd b/1 .
+  $ hg status --cwd b/1 .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
-  $ hg status --color=always --cwd b/1 ..
+  $ hg status --cwd b/1 ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
 
-  $ hg status --color=always --cwd b/2
+  $ hg status --cwd b/2
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd b/2 .
+  $ hg status --cwd b/2 .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
-  $ hg status --color=always --cwd b/2 ..
+  $ hg status --cwd b/2 ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
@@ -137,7 +137,7 @@
   ? in_root
 
 Make sure ui.formatted=False works
-  $ hg status --config ui.formatted=False
+  $ hg status --color=auto --config ui.formatted=False
   ? a/1/in_a_1
   ? a/in_a
   ? b/1/in_b_1
@@ -179,7 +179,7 @@
 
 hg status:
 
-  $ hg status --color=always
+  $ hg status
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
   \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
   \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
@@ -187,7 +187,7 @@
 
 hg status modified added removed deleted unknown never-existed ignored:
 
-  $ hg status --color=always modified added removed deleted unknown never-existed ignored
+  $ hg status modified added removed deleted unknown never-existed ignored
   never-existed: * (glob)
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
   \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
@@ -198,7 +198,7 @@
 
 hg status -C:
 
-  $ hg status --color=always -C
+  $ hg status -C
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
   \x1b[0;0m  modified\x1b[0m (esc)
@@ -208,7 +208,7 @@
 
 hg status -A:
 
-  $ hg status --color=always -A
+  $ hg status -A
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
   \x1b[0;0m  modified\x1b[0m (esc)
@@ -226,7 +226,7 @@
 
   $ mkdir "$TESTTMP/terminfo"
   $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti"
-  $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --color=always -A
+  $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo -A
   \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1madded\x1b[30m (esc)
   \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1mcopied\x1b[30m (esc)
   \x1b[30m\x1b[30m  modified\x1b[30m (esc)
@@ -245,7 +245,7 @@
   > # We can override what's in the terminfo database, too
   > terminfo.bold = \E[2m
   > EOF
-  $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim --color=always -A
+  $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim -A
   \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2madded\x1b[30m (esc)
   \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2mcopied\x1b[30m (esc)
   \x1b[30m\x1b[30m  modified\x1b[30m (esc)
@@ -265,11 +265,11 @@
 
 hg status ignoreddir/file:
 
-  $ hg status --color=always ignoreddir/file
+  $ hg status ignoreddir/file
 
 hg status -i ignoreddir/file:
 
-  $ hg status --color=always -i ignoreddir/file
+  $ hg status -i ignoreddir/file
   \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignoreddir/file\x1b[0m (esc)
   $ cd ..
 
@@ -293,7 +293,9 @@
 
 test unknown color
 
-  $ hg --config color.status.modified=periwinkle status --color=always
+  $ hg --config color.status.modified=periwinkle status
+  ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
+  ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
   ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
   M modified
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
@@ -307,8 +309,8 @@
 If result is not as expected, raise error
 
   $ assert() {
-  >     hg status --color=always $1 > ../a
-  >     hg status --color=always $2 > ../b
+  >     hg status $1 > ../a
+  >     hg status $2 > ../b
   >     if diff ../a ../b > /dev/null; then
   >         out=0
   >     else
@@ -367,7 +369,7 @@
 
 hg resolve with one unresolved, one resolved:
 
-  $ hg resolve --color=always -l
+  $ hg resolve -l
   \x1b[0;31;1mU \x1b[0m\x1b[0;31;1ma\x1b[0m (esc)
   \x1b[0;32;1mR \x1b[0m\x1b[0;32;1mb\x1b[0m (esc)
 
--- a/tests/test-ui-color.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-ui-color.py	Tue Feb 28 11:13:25 2017 -0800
@@ -1,16 +1,13 @@
 from __future__ import absolute_import, print_function
 
 import os
-from hgext import (
-    color,
-)
 from mercurial import (
     dispatch,
     ui as uimod,
 )
 
 # ensure errors aren't buffered
-testui = color.colorui()
+testui = uimod.ui()
 testui.pushbuffer()
 testui.write(('buffered\n'))
 testui.warn(('warning\n'))
@@ -35,6 +32,7 @@
     dispatch.dispatch(dispatch.request(['version', '-q'], ui_))
 
 runcmd()
-print("colored? " + str(issubclass(ui_.__class__, color.colorui)))
+print("colored? %s" % (ui_._colormode is not None))
 runcmd()
-print("colored? " + str(issubclass(ui_.__class__, color.colorui)))
+print("colored? %s" % (ui_._colormode is not None))
+
--- a/tests/test-update-branches.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-update-branches.t	Tue Feb 28 11:13:25 2017 -0800
@@ -177,6 +177,28 @@
 
   $ cd ..
 
+Test updating to null revision
+
+  $ hg init null-repo
+  $ cd null-repo
+  $ echo a > a
+  $ hg add a
+  $ hg ci -m a
+  $ hg up -qC 0
+  $ echo b > b
+  $ hg add b
+  $ hg up null
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg st
+  A b
+  $ hg up -q 0
+  $ hg st
+  A b
+  $ hg up -qC null
+  $ hg st
+  ? b
+  $ cd ..
+
 Test updating with closed head
 ---------------------------------------------------------------------
 
--- a/tests/test-walk.t	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/test-walk.t	Tue Feb 28 11:13:25 2017 -0800
@@ -112,6 +112,74 @@
   f  beans/navy      ../beans/navy
   f  beans/pinto     ../beans/pinto
   f  beans/turtle    ../beans/turtle
+
+  $ hg debugwalk 'rootfilesin:'
+  f  fennel      ../fennel
+  f  fenugreek   ../fenugreek
+  f  fiddlehead  ../fiddlehead
+  $ hg debugwalk -I 'rootfilesin:'
+  f  fennel      ../fennel
+  f  fenugreek   ../fenugreek
+  f  fiddlehead  ../fiddlehead
+  $ hg debugwalk 'rootfilesin:.'
+  f  fennel      ../fennel
+  f  fenugreek   ../fenugreek
+  f  fiddlehead  ../fiddlehead
+  $ hg debugwalk -I 'rootfilesin:.'
+  f  fennel      ../fennel
+  f  fenugreek   ../fenugreek
+  f  fiddlehead  ../fiddlehead
+  $ hg debugwalk -X 'rootfilesin:'
+  f  beans/black                     ../beans/black
+  f  beans/borlotti                  ../beans/borlotti
+  f  beans/kidney                    ../beans/kidney
+  f  beans/navy                      ../beans/navy
+  f  beans/pinto                     ../beans/pinto
+  f  beans/turtle                    ../beans/turtle
+  f  mammals/Procyonidae/cacomistle  Procyonidae/cacomistle
+  f  mammals/Procyonidae/coatimundi  Procyonidae/coatimundi
+  f  mammals/Procyonidae/raccoon     Procyonidae/raccoon
+  f  mammals/skunk                   skunk
+  $ hg debugwalk 'rootfilesin:fennel'
+  $ hg debugwalk -I 'rootfilesin:fennel'
+  $ hg debugwalk 'rootfilesin:skunk'
+  $ hg debugwalk -I 'rootfilesin:skunk'
+  $ hg debugwalk 'rootfilesin:beans'
+  f  beans/black     ../beans/black
+  f  beans/borlotti  ../beans/borlotti
+  f  beans/kidney    ../beans/kidney
+  f  beans/navy      ../beans/navy
+  f  beans/pinto     ../beans/pinto
+  f  beans/turtle    ../beans/turtle
+  $ hg debugwalk -I 'rootfilesin:beans'
+  f  beans/black     ../beans/black
+  f  beans/borlotti  ../beans/borlotti
+  f  beans/kidney    ../beans/kidney
+  f  beans/navy      ../beans/navy
+  f  beans/pinto     ../beans/pinto
+  f  beans/turtle    ../beans/turtle
+  $ hg debugwalk 'rootfilesin:mammals'
+  f  mammals/skunk  skunk
+  $ hg debugwalk -I 'rootfilesin:mammals'
+  f  mammals/skunk  skunk
+  $ hg debugwalk 'rootfilesin:mammals/'
+  f  mammals/skunk  skunk
+  $ hg debugwalk -I 'rootfilesin:mammals/'
+  f  mammals/skunk  skunk
+  $ hg debugwalk -X 'rootfilesin:mammals'
+  f  beans/black                     ../beans/black
+  f  beans/borlotti                  ../beans/borlotti
+  f  beans/kidney                    ../beans/kidney
+  f  beans/navy                      ../beans/navy
+  f  beans/pinto                     ../beans/pinto
+  f  beans/turtle                    ../beans/turtle
+  f  fennel                          ../fennel
+  f  fenugreek                       ../fenugreek
+  f  fiddlehead                      ../fiddlehead
+  f  mammals/Procyonidae/cacomistle  Procyonidae/cacomistle
+  f  mammals/Procyonidae/coatimundi  Procyonidae/coatimundi
+  f  mammals/Procyonidae/raccoon     Procyonidae/raccoon
+
   $ hg debugwalk .
   f  mammals/Procyonidae/cacomistle  Procyonidae/cacomistle
   f  mammals/Procyonidae/coatimundi  Procyonidae/coatimundi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-xdg.t	Tue Feb 28 11:13:25 2017 -0800
@@ -0,0 +1,11 @@
+#if no-windows no-osx
+
+  $ mkdir -p xdgconf/hg
+  $ echo '[ui]' > xdgconf/hg/hgrc
+  $ echo 'username = foobar' >> xdgconf/hg/hgrc
+  $ XDG_CONFIG_HOME="`pwd`/xdgconf" ; export XDG_CONFIG_HOME
+  $ unset HGRCPATH
+  $ hg config ui.username
+  foobar
+
+#endif
--- a/tests/tinyproxy.py	Sat Feb 25 12:48:50 2017 +0900
+++ b/tests/tinyproxy.py	Tue Feb 28 11:13:25 2017 -0800
@@ -26,6 +26,11 @@
 urlparse = util.urlparse
 socketserver = util.socketserver
 
+if os.environ.get('HGIPV6', '0') == '1':
+    family = socket.AF_INET6
+else:
+    family = socket.AF_INET
+
 class ProxyHandler (httpserver.basehttprequesthandler):
     __base = httpserver.basehttprequesthandler
     __base_handle = __base.handle
@@ -65,7 +70,7 @@
         return 1
 
     def do_CONNECT(self):
-        soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        soc = socket.socket(family, socket.SOCK_STREAM)
         try:
             if self._connect_to(self.path, soc):
                 self.log_request(200)
@@ -85,7 +90,7 @@
         if scm != 'http' or fragment or not netloc:
             self.send_error(400, "bad url %s" % self.path)
             return
-        soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        soc = socket.socket(family, socket.SOCK_STREAM)
         try:
             if self._connect_to(netloc, soc):
                 self.log_request()