changeset 30839:d7f5a35ac6cd stable

freeze: merge default into stable for 4.1 code freeze
author Augie Fackler <augie@google.com>
date Wed, 18 Jan 2017 11:43:36 -0500
parents 4a6ecc5d6d3c (current diff) eb78ec9e97b7 (diff)
children a1dd2c0c479e
files hgext/chgserver.py hgext/fsmonitor/pywatchman/msc_stdint.h mercurial/help/multirevs.txt mercurial/help/revsets.txt mercurial/py3kcompat.py mercurial/strutil.py setup_bdiff_cffi.py setup_mpatch_cffi.py setup_osutil_cffi.py tests/test-bdiff.py.out tests/test-push-hook-lock.t tests/test-push-r.t tests/test-push-validation.t
diffstat 387 files changed, 38130 insertions(+), 6380 deletions(-) [+]
line wrap: on
line diff
--- a/.hgignore	Wed Jan 04 10:51:37 2017 -0600
+++ b/.hgignore	Wed Jan 18 11:43:36 2017 -0500
@@ -49,6 +49,7 @@
 tags
 cscope.*
 .idea/*
+.asv/*
 i18n/hg.pot
 locale/*/LC_MESSAGES/hg.mo
 hgext/__index__.py
--- a/Makefile	Wed Jan 04 10:51:37 2017 -0600
+++ b/Makefile	Wed Jan 18 11:43:36 2017 -0500
@@ -195,18 +195,18 @@
 docker-ubuntu-trusty-ppa: contrib/docker/ubuntu-trusty
 	contrib/dockerdeb ubuntu trusty --source-only
 
-docker-ubuntu-wily: contrib/docker/ubuntu-wily
-	contrib/dockerdeb ubuntu wily
-
-docker-ubuntu-wily-ppa: contrib/docker/ubuntu-wily
-	contrib/dockerdeb ubuntu wily --source-only
-
 docker-ubuntu-xenial: contrib/docker/ubuntu-xenial
 	contrib/dockerdeb ubuntu xenial
 
 docker-ubuntu-xenial-ppa: contrib/docker/ubuntu-xenial
 	contrib/dockerdeb ubuntu xenial --source-only
 
+docker-ubuntu-yakkety: contrib/docker/ubuntu-yakkety
+	contrib/dockerdeb ubuntu yakkety
+
+docker-ubuntu-yakkety-ppa: contrib/docker/ubuntu-yakkety
+	contrib/dockerdeb ubuntu yakkety --source-only
+
 fedora20:
 	mkdir -p packages/fedora20
 	contrib/buildrpm
--- a/contrib/all-revsets.txt	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/all-revsets.txt	Wed Jan 18 11:43:36 2017 -0500
@@ -8,7 +8,7 @@
 #
 # Please update this file with any revsets you use for benchmarking a change so
 # that future contributors can easily find and retest it when doing further
-# modification. Feel free to highlight interresting variants if needed.
+# modification. Feel free to highlight interesting variants if needed.
 
 
 ## Revset from this section are all extracted from changelog when this file was
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/asv.conf.json	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,13 @@
+{
+    "version": 1,
+    "project": "mercurial",
+    "project_url": "https://mercurial-scm.org/",
+    "repo": "..",
+    "branches": ["default", "stable"],
+    "environment_type": "virtualenv",
+    "show_commit_url": "https://www.mercurial-scm.org/repo/hg/rev/",
+    "benchmark_dir": "benchmarks",
+    "env_dir": "../.asv/env",
+    "results_dir": "../.asv/results",
+    "html_dir": "../.asv/html"
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/benchmarks/__init__.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,113 @@
+# __init__.py - asv benchmark suite
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# "historical portability" policy of contrib/benchmarks:
+#
+# We have to make this code work correctly with current mercurial stable branch
+# and if possible with reasonable cost with early Mercurial versions.
+
+'''ASV (https://asv.readthedocs.io) benchmark suite
+
+Benchmark are parameterized against reference repositories found in the
+directory pointed by the REPOS_DIR environment variable.
+
+Invocation example:
+
+    $ export REPOS_DIR=~/hgperf/repos
+    # run suite on given revision
+    $ asv --config contrib/asv.conf.json run REV
+    # run suite on new changesets found in stable and default branch
+    $ asv --config contrib/asv.conf.json run NEW
+    # display a comparative result table of benchmark results between two given
+    # revisions
+    $ asv --config contrib/asv.conf.json compare REV1 REV2
+    # compute regression detection and generate ASV static website
+    $ asv --config contrib/asv.conf.json publish
+    # serve the static website
+    $ asv --config contrib/asv.conf.json preview
+'''
+
+from __future__ import absolute_import
+
+import functools
+import os
+import re
+
+from mercurial import (
+    extensions,
+    hg,
+    ui as uimod,
+    util,
+)
+
+basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                          os.path.pardir, os.path.pardir))
+reposdir = os.environ['REPOS_DIR']
+reposnames = [name for name in os.listdir(reposdir)
+              if os.path.isdir(os.path.join(reposdir, name, ".hg"))]
+if not reposnames:
+    raise ValueError("No repositories found in $REPO_DIR")
+outputre = re.compile((r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
+                       r'\d+.\d+ \(best of \d+\)'))
+
+def runperfcommand(reponame, command, *args, **kwargs):
+    os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
+    # for "historical portability"
+    # ui.load() has been available since d83ca85
+    if util.safehasattr(uimod.ui, "load"):
+        ui = uimod.ui.load()
+    else:
+        ui = uimod.ui()
+    repo = hg.repository(ui, os.path.join(reposdir, reponame))
+    perfext = extensions.load(ui, 'perfext',
+                              os.path.join(basedir, 'contrib', 'perf.py'))
+    cmd = getattr(perfext, command)
+    ui.pushbuffer()
+    cmd(ui, repo, *args, **kwargs)
+    output = ui.popbuffer()
+    match = outputre.search(output)
+    if not match:
+        raise ValueError("Invalid output {0}".format(output))
+    return float(match.group(1))
+
+def perfbench(repos=reposnames, name=None, params=None):
+    """decorator to declare ASV benchmark based on contrib/perf.py extension
+
+    An ASV benchmark is a python function with the given attributes:
+
+    __name__: should start with track_, time_ or mem_ to be collected by ASV
+    params and param_name: parameter matrix to display multiple graphs on the
+    same page.
+    pretty_name: If defined it's displayed in web-ui instead of __name__
+    (useful for revsets)
+    the module name is prepended to the benchmark name and displayed as
+    "category" in webui.
+
+    Benchmarks are automatically parameterized with repositories found in the
+    REPOS_DIR environment variable.
+
+    `params` is the param matrix in the form of a list of tuple
+    (param_name, [value0, value1])
+
+    For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
+    (a, c), (a, d), (b, c) and (b, d).
+    """
+    params = list(params or [])
+    params.insert(0, ("repo", repos))
+
+    def decorator(func):
+        @functools.wraps(func)
+        def wrapped(repo, *args):
+            def perf(command, *a, **kw):
+                return runperfcommand(repo, command, *a, **kw)
+            return func(perf, *args)
+
+        wrapped.params = [p[1] for p in params]
+        wrapped.param_names = [p[0] for p in params]
+        wrapped.pretty_name = name
+        return wrapped
+    return decorator
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/benchmarks/perf.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,26 @@
+# perf.py - asv benchmarks using contrib/perf.py extension
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from . import perfbench
+
+@perfbench()
+def track_tags(perf):
+    return perf("perftags")
+
+@perfbench()
+def track_status(perf):
+    return perf("perfstatus", unknown=False)
+
+@perfbench(params=[('rev', ['1000', '10000', 'tip'])])
+def track_manifest(perf, rev):
+    return perf("perfmanifest", rev)
+
+@perfbench()
+def track_heads(perf):
+    return perf("perfheads")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/benchmarks/revset.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,53 @@
+# revset.py - asv revset benchmarks
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''ASV revset benchmarks generated from contrib/base-revsets.txt
+
+Each revset benchmark is parameterized with variants (first, last, sort, ...)
+'''
+
+from __future__ import absolute_import
+
+import os
+import string
+import sys
+
+from . import basedir, perfbench
+
+def createrevsetbenchmark(baseset, variants=None):
+    if variants is None:
+        # Default variants
+        variants = ["plain", "first", "last", "sort", "sort+first",
+                    "sort+last"]
+    fname = "track_" + "_".join("".join([
+        c if c in string.digits + string.letters else " "
+        for c in baseset
+    ]).split())
+
+    def wrap(fname, baseset):
+        @perfbench(name=baseset, params=[("variant", variants)])
+        def f(perf, variant):
+            revset = baseset
+            if variant != "plain":
+                for var in variant.split("+"):
+                    revset = "%s(%s)" % (var, revset)
+            return perf("perfrevset", revset)
+        f.__name__ = fname
+        return f
+    return wrap(fname, baseset)
+
+def initializerevsetbenchmarks():
+    mod = sys.modules[__name__]
+    with open(os.path.join(basedir, 'contrib', 'base-revsets.txt'),
+              'rb') as fh:
+        for line in fh:
+            baseset = line.strip()
+            if baseset and not baseset.startswith('#'):
+                func = createrevsetbenchmark(baseset)
+                setattr(mod, func.__name__, func)
+
+initializerevsetbenchmarks()
--- a/contrib/check-code.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/check-code.py	Wed Jan 18 11:43:36 2017 -0500
@@ -142,7 +142,8 @@
     (r'\|&', "don't use |&, use 2>&1"),
     (r'\w =  +\w', "only one space after = allowed"),
     (r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"),
-    (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'")
+    (r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'"),
+    (r'cp.* -r ', "don't use 'cp -r', use 'cp -R'"),
   ],
   # warnings
   [
@@ -324,6 +325,7 @@
     # XXX only catch mutable arguments on the first line of the definition
     (r'def.*[( ]\w+=\{\}', "don't use mutable default arguments"),
     (r'\butil\.Abort\b', "directly use error.Abort"),
+    (r'^@(\w*\.)?cachefunc', "module-level @cachefunc is risky, please avoid"),
     (r'^import Queue', "don't use Queue, use util.queue + util.empty"),
     (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"),
     (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
@@ -455,8 +457,27 @@
   [],
 ]
 
+py3pats = [
+  [
+    (r'os\.environ', "use encoding.environ instead (py3)"),
+    (r'os\.name', "use pycompat.osname instead (py3)"),
+    (r'os\.getcwd', "use pycompat.getcwd instead (py3)"),
+    (r'os\.sep', "use pycompat.ossep instead (py3)"),
+    (r'os\.pathsep', "use pycompat.ospathsep instead (py3)"),
+    (r'os\.altsep', "use pycompat.osaltsep instead (py3)"),
+    (r'sys\.platform', "use pycompat.sysplatform instead (py3)"),
+    (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
+    (r'os\.getenv', "use encoding.environ.get instead"),
+    (r'os\.setenv', "modifying the environ dict is not preferred"),
+  ],
+  # warnings
+  [],
+]
+
 checks = [
     ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
+    ('python 3', r'.*(hgext|mercurial).*(?<!pycompat)\.py', '',
+            pyfilters, py3pats),
     ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
     ('c', r'.*\.[ch]$', '', cfilters, cpats),
     ('unified test', r'.*\.t$', '', utestfilters, utestpats),
--- a/contrib/chg/Makefile	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/chg/Makefile	Wed Jan 18 11:43:36 2017 -0500
@@ -1,7 +1,7 @@
 HG = $(CURDIR)/../../hg
 
 TARGET = chg
-SRCS = chg.c hgclient.c util.c
+SRCS = chg.c hgclient.c procutil.c util.c
 OBJS = $(SRCS:.c=.o)
 
 CFLAGS ?= -O2 -Wall -Wextra -pedantic -g
@@ -24,8 +24,9 @@
 $(TARGET): $(OBJS)
 	$(CC) $(LDFLAGS) -o $@ $(OBJS)
 
-chg.o: hgclient.h util.h
-hgclient.o: hgclient.h util.h
+chg.o: hgclient.h procutil.h util.h
+hgclient.o: hgclient.h procutil.h util.h
+procutil.o: procutil.h util.h
 util.o: util.h
 
 .PHONY: install
@@ -40,7 +41,6 @@
 	[ -d $(CHGSOCKDIR) ] || ( umask 077; mkdir $(CHGSOCKDIR) )
 	$(HG) serve --cwd / --cmdserver chgunix \
 		--address $(CHGSOCKNAME) \
-		--config extensions.chgserver= \
 		--config cmdserver.log=/dev/stderr
 
 .PHONY: clean
--- a/contrib/chg/chg.c	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/chg/chg.c	Wed Jan 18 11:43:36 2017 -0500
@@ -23,37 +23,29 @@
 #include <unistd.h>
 
 #include "hgclient.h"
+#include "procutil.h"
 #include "util.h"
 
-#ifndef UNIX_PATH_MAX
-#define UNIX_PATH_MAX (sizeof(((struct sockaddr_un *)NULL)->sun_path))
+#ifndef PATH_MAX
+#define PATH_MAX 4096
 #endif
 
 struct cmdserveropts {
-	char sockname[UNIX_PATH_MAX];
-	char redirectsockname[UNIX_PATH_MAX];
-	char lockfile[UNIX_PATH_MAX];
+	char sockname[PATH_MAX];
+	char initsockname[PATH_MAX];
+	char redirectsockname[PATH_MAX];
 	size_t argsize;
 	const char **args;
-	int lockfd;
-	int sockdirfd;
 };
 
 static void initcmdserveropts(struct cmdserveropts *opts) {
 	memset(opts, 0, sizeof(struct cmdserveropts));
-	opts->lockfd = -1;
-	opts->sockdirfd = -1;
 }
 
 static void freecmdserveropts(struct cmdserveropts *opts) {
 	free(opts->args);
 	opts->args = NULL;
 	opts->argsize = 0;
-	assert(opts->lockfd == -1 && "should be closed by unlockcmdserver()");
-	if (opts->sockdirfd >= 0) {
-		close(opts->sockdirfd);
-		opts->sockdirfd = -1;
-	}
 }
 
 /*
@@ -136,68 +128,46 @@
 		abortmsg("insecure sockdir %s", sockdir);
 }
 
+static void getdefaultsockdir(char sockdir[], size_t size)
+{
+	/* by default, put socket file in secure directory
+	 * (${XDG_RUNTIME_DIR}/chg, or /${TMPDIR:-tmp}/chg$UID)
+	 * (permission of socket file may be ignored on some Unices) */
+	const char *runtimedir = getenv("XDG_RUNTIME_DIR");
+	int r;
+	if (runtimedir) {
+		r = snprintf(sockdir, size, "%s/chg", runtimedir);
+	} else {
+		const char *tmpdir = getenv("TMPDIR");
+		if (!tmpdir)
+			tmpdir = "/tmp";
+		r = snprintf(sockdir, size, "%s/chg%d", tmpdir, geteuid());
+	}
+	if (r < 0 || (size_t)r >= size)
+		abortmsg("too long TMPDIR (r = %d)", r);
+}
+
 static void setcmdserveropts(struct cmdserveropts *opts)
 {
 	int r;
-	char sockdir[UNIX_PATH_MAX];
+	char sockdir[PATH_MAX];
 	const char *envsockname = getenv("CHGSOCKNAME");
 	if (!envsockname) {
-		/* by default, put socket file in secure directory
-		 * (permission of socket file may be ignored on some Unices) */
-		const char *tmpdir = getenv("TMPDIR");
-		if (!tmpdir)
-			tmpdir = "/tmp";
-		r = snprintf(sockdir, sizeof(sockdir), "%s/chg%d",
-			     tmpdir, geteuid());
-		if (r < 0 || (size_t)r >= sizeof(sockdir))
-			abortmsg("too long TMPDIR (r = %d)", r);
+		getdefaultsockdir(sockdir, sizeof(sockdir));
 		preparesockdir(sockdir);
 	}
 
 	const char *basename = (envsockname) ? envsockname : sockdir;
 	const char *sockfmt = (envsockname) ? "%s" : "%s/server";
-	const char *lockfmt = (envsockname) ? "%s.lock" : "%s/lock";
 	r = snprintf(opts->sockname, sizeof(opts->sockname), sockfmt, basename);
 	if (r < 0 || (size_t)r >= sizeof(opts->sockname))
 		abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r);
-	r = snprintf(opts->lockfile, sizeof(opts->lockfile), lockfmt, basename);
-	if (r < 0 || (size_t)r >= sizeof(opts->lockfile))
+	r = snprintf(opts->initsockname, sizeof(opts->initsockname),
+			"%s.%u", opts->sockname, (unsigned)getpid());
+	if (r < 0 || (size_t)r >= sizeof(opts->initsockname))
 		abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r);
 }
 
-/*
- * Acquire a file lock that indicates a client is trying to start and connect
- * to a server, before executing a command. The lock is released upon exit or
- * explicit unlock. Will block if the lock is held by another process.
- */
-static void lockcmdserver(struct cmdserveropts *opts)
-{
-	if (opts->lockfd == -1) {
-		opts->lockfd = open(opts->lockfile,
-				    O_RDWR | O_CREAT | O_NOFOLLOW, 0600);
-		if (opts->lockfd == -1)
-			abortmsgerrno("cannot create lock file %s",
-				      opts->lockfile);
-		fsetcloexec(opts->lockfd);
-	}
-	int r = flock(opts->lockfd, LOCK_EX);
-	if (r == -1)
-		abortmsgerrno("cannot acquire lock");
-}
-
-/*
- * Release the file lock held by calling lockcmdserver. Will do nothing if
- * lockcmdserver is not called.
- */
-static void unlockcmdserver(struct cmdserveropts *opts)
-{
-	if (opts->lockfd == -1)
-		return;
-	flock(opts->lockfd, LOCK_UN);
-	close(opts->lockfd);
-	opts->lockfd = -1;
-}
-
 static const char *gethgcmd(void)
 {
 	static const char *hgcmd = NULL;
@@ -223,9 +193,8 @@
 		hgcmd,
 		"serve",
 		"--cmdserver", "chgunix",
-		"--address", opts->sockname,
+		"--address", opts->initsockname,
 		"--daemon-postexec", "chdir:/",
-		"--config", "extensions.chgserver=",
 	};
 	size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]);
 	size_t argsize = baseargvsize + opts->argsize + 1;
@@ -248,7 +217,7 @@
 	static const struct timespec sleepreq = {0, 10 * 1000000};
 	int pst = 0;
 
-	debugmsg("try connect to %s repeatedly", opts->sockname);
+	debugmsg("try connect to %s repeatedly", opts->initsockname);
 
 	unsigned int timeoutsec = 60;  /* default: 60 seconds */
 	const char *timeoutenv = getenv("CHGTIMEOUT");
@@ -256,9 +225,15 @@
 		sscanf(timeoutenv, "%u", &timeoutsec);
 
 	for (unsigned int i = 0; !timeoutsec || i < timeoutsec * 100; i++) {
-		hgclient_t *hgc = hgc_open(opts->sockname);
-		if (hgc)
+		hgclient_t *hgc = hgc_open(opts->initsockname);
+		if (hgc) {
+			debugmsg("rename %s to %s", opts->initsockname,
+					opts->sockname);
+			int r = rename(opts->initsockname, opts->sockname);
+			if (r != 0)
+				abortmsgerrno("cannot rename");
 			return hgc;
+		}
 
 		if (pid > 0) {
 			/* collect zombie if child process fails to start */
@@ -270,7 +245,7 @@
 		nanosleep(&sleepreq, NULL);
 	}
 
-	abortmsg("timed out waiting for cmdserver %s", opts->sockname);
+	abortmsg("timed out waiting for cmdserver %s", opts->initsockname);
 	return NULL;
 
 cleanup:
@@ -298,14 +273,6 @@
 	if (hgc)
 		return hgc;
 
-	lockcmdserver(opts);
-	hgc = hgc_open(sockname);
-	if (hgc) {
-		unlockcmdserver(opts);
-		debugmsg("cmdserver is started by another process");
-		return hgc;
-	}
-
 	/* prevent us from being connected to an outdated server: we were
 	 * told by a server to redirect to opts->redirectsockname and that
 	 * address does not work. we do not want to connect to the server
@@ -313,7 +280,7 @@
 	if (sockname == opts->redirectsockname)
 		unlink(opts->sockname);
 
-	debugmsg("start cmdserver at %s", opts->sockname);
+	debugmsg("start cmdserver at %s", opts->initsockname);
 
 	pid_t pid = fork();
 	if (pid < 0)
@@ -324,7 +291,6 @@
 		hgc = retryconnectcmdserver(opts, pid);
 	}
 
-	unlockcmdserver(opts);
 	return hgc;
 }
 
@@ -338,215 +304,6 @@
 	}
 }
 
-static pid_t pagerpid = 0;
-static pid_t peerpgid = 0;
-static pid_t peerpid = 0;
-
-static void forwardsignal(int sig)
-{
-	assert(peerpid > 0);
-	if (kill(peerpid, sig) < 0)
-		abortmsgerrno("cannot kill %d", peerpid);
-	debugmsg("forward signal %d", sig);
-}
-
-static void forwardsignaltogroup(int sig)
-{
-	/* prefer kill(-pgid, sig), fallback to pid if pgid is invalid */
-	pid_t killpid = peerpgid > 1 ? -peerpgid : peerpid;
-	if (kill(killpid, sig) < 0)
-		abortmsgerrno("cannot kill %d", killpid);
-	debugmsg("forward signal %d to %d", sig, killpid);
-}
-
-static void handlestopsignal(int sig)
-{
-	sigset_t unblockset, oldset;
-	struct sigaction sa, oldsa;
-	if (sigemptyset(&unblockset) < 0)
-		goto error;
-	if (sigaddset(&unblockset, sig) < 0)
-		goto error;
-	memset(&sa, 0, sizeof(sa));
-	sa.sa_handler = SIG_DFL;
-	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
-		goto error;
-
-	forwardsignal(sig);
-	if (raise(sig) < 0)  /* resend to self */
-		goto error;
-	if (sigaction(sig, &sa, &oldsa) < 0)
-		goto error;
-	if (sigprocmask(SIG_UNBLOCK, &unblockset, &oldset) < 0)
-		goto error;
-	/* resent signal will be handled before sigprocmask() returns */
-	if (sigprocmask(SIG_SETMASK, &oldset, NULL) < 0)
-		goto error;
-	if (sigaction(sig, &oldsa, NULL) < 0)
-		goto error;
-	return;
-
-error:
-	abortmsgerrno("failed to handle stop signal");
-}
-
-static void handlechildsignal(int sig UNUSED_)
-{
-	if (peerpid == 0 || pagerpid == 0)
-		return;
-	/* if pager exits, notify the server with SIGPIPE immediately.
-	 * otherwise the server won't get SIGPIPE if it does not write
-	 * anything. (issue5278) */
-	if (waitpid(pagerpid, NULL, WNOHANG) == pagerpid)
-		kill(peerpid, SIGPIPE);
-}
-
-static void setupsignalhandler(const hgclient_t *hgc)
-{
-	pid_t pid = hgc_peerpid(hgc);
-	if (pid <= 0)
-		return;
-	peerpid = pid;
-
-	pid_t pgid = hgc_peerpgid(hgc);
-	peerpgid = (pgid <= 1 ? 0 : pgid);
-
-	struct sigaction sa;
-	memset(&sa, 0, sizeof(sa));
-	sa.sa_handler = forwardsignaltogroup;
-	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
-		goto error;
-
-	if (sigaction(SIGHUP, &sa, NULL) < 0)
-		goto error;
-	if (sigaction(SIGINT, &sa, NULL) < 0)
-		goto error;
-
-	/* terminate frontend by double SIGTERM in case of server freeze */
-	sa.sa_handler = forwardsignal;
-	sa.sa_flags |= SA_RESETHAND;
-	if (sigaction(SIGTERM, &sa, NULL) < 0)
-		goto error;
-
-	/* notify the worker about window resize events */
-	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGWINCH, &sa, NULL) < 0)
-		goto error;
-	/* propagate job control requests to worker */
-	sa.sa_handler = forwardsignal;
-	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGCONT, &sa, NULL) < 0)
-		goto error;
-	sa.sa_handler = handlestopsignal;
-	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGTSTP, &sa, NULL) < 0)
-		goto error;
-	/* get notified when pager exits */
-	sa.sa_handler = handlechildsignal;
-	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGCHLD, &sa, NULL) < 0)
-		goto error;
-
-	return;
-
-error:
-	abortmsgerrno("failed to set up signal handlers");
-}
-
-static void restoresignalhandler()
-{
-	struct sigaction sa;
-	memset(&sa, 0, sizeof(sa));
-	sa.sa_handler = SIG_DFL;
-	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
-		goto error;
-
-	if (sigaction(SIGHUP, &sa, NULL) < 0)
-		goto error;
-	if (sigaction(SIGTERM, &sa, NULL) < 0)
-		goto error;
-	if (sigaction(SIGWINCH, &sa, NULL) < 0)
-		goto error;
-	if (sigaction(SIGCONT, &sa, NULL) < 0)
-		goto error;
-	if (sigaction(SIGTSTP, &sa, NULL) < 0)
-		goto error;
-	if (sigaction(SIGCHLD, &sa, NULL) < 0)
-		goto error;
-
-	/* ignore Ctrl+C while shutting down to make pager exits cleanly */
-	sa.sa_handler = SIG_IGN;
-	if (sigaction(SIGINT, &sa, NULL) < 0)
-		goto error;
-
-	peerpid = 0;
-	return;
-
-error:
-	abortmsgerrno("failed to restore signal handlers");
-}
-
-/* This implementation is based on hgext/pager.py (post 369741ef7253)
- * Return 0 if pager is not started, or pid of the pager */
-static pid_t setuppager(hgclient_t *hgc, const char *const args[],
-		       size_t argsize)
-{
-	const char *pagercmd = hgc_getpager(hgc, args, argsize);
-	if (!pagercmd)
-		return 0;
-
-	int pipefds[2];
-	if (pipe(pipefds) < 0)
-		return 0;
-	pid_t pid = fork();
-	if (pid < 0)
-		goto error;
-	if (pid > 0) {
-		close(pipefds[0]);
-		if (dup2(pipefds[1], fileno(stdout)) < 0)
-			goto error;
-		if (isatty(fileno(stderr))) {
-			if (dup2(pipefds[1], fileno(stderr)) < 0)
-				goto error;
-		}
-		close(pipefds[1]);
-		hgc_attachio(hgc);  /* reattach to pager */
-		return pid;
-	} else {
-		dup2(pipefds[0], fileno(stdin));
-		close(pipefds[0]);
-		close(pipefds[1]);
-
-		int r = execlp("/bin/sh", "/bin/sh", "-c", pagercmd, NULL);
-		if (r < 0) {
-			abortmsgerrno("cannot start pager '%s'", pagercmd);
-		}
-		return 0;
-	}
-
-error:
-	close(pipefds[0]);
-	close(pipefds[1]);
-	abortmsgerrno("failed to prepare pager");
-	return 0;
-}
-
-static void waitpager(pid_t pid)
-{
-	/* close output streams to notify the pager its input ends */
-	fclose(stdout);
-	fclose(stderr);
-	while (1) {
-		pid_t ret = waitpid(pid, NULL, 0);
-		if (ret == -1 && errno == EINTR)
-			continue;
-		break;
-	}
-}
-
 /* Run instructions sent from the server like unlink and set redirect path
  * Return 1 if reconnect is needed, otherwise 0 */
 static int runinstructions(struct cmdserveropts *opts, const char **insts)
@@ -671,14 +428,12 @@
 				 gethgcmd());
 	}
 
-	setupsignalhandler(hgc);
-	pagerpid = setuppager(hgc, argv + 1, argc - 1);
+	setupsignalhandler(hgc_peerpid(hgc), hgc_peerpgid(hgc));
 	int exitcode = hgc_runcommand(hgc, argv + 1, argc - 1);
 	restoresignalhandler();
 	hgc_close(hgc);
 	freecmdserveropts(&opts);
-	if (pagerpid)
-		waitpager(pagerpid);
+	waitpager();
 
 	return exitcode;
 }
--- a/contrib/chg/hgclient.c	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/chg/hgclient.c	Wed Jan 18 11:43:36 2017 -0500
@@ -23,6 +23,7 @@
 #include <unistd.h>
 
 #include "hgclient.h"
+#include "procutil.h"
 #include "util.h"
 
 enum {
@@ -31,10 +32,10 @@
 	/* cHg extension: */
 	CAP_ATTACHIO = 0x0100,
 	CAP_CHDIR = 0x0200,
-	CAP_GETPAGER = 0x0400,
 	CAP_SETENV = 0x0800,
 	CAP_SETUMASK = 0x1000,
 	CAP_VALIDATE = 0x2000,
+	CAP_SETPROCNAME = 0x4000,
 };
 
 typedef struct {
@@ -47,10 +48,10 @@
 	{"runcommand", CAP_RUNCOMMAND},
 	{"attachio", CAP_ATTACHIO},
 	{"chdir", CAP_CHDIR},
-	{"getpager", CAP_GETPAGER},
 	{"setenv", CAP_SETENV},
 	{"setumask", CAP_SETUMASK},
 	{"validate", CAP_VALIDATE},
+	{"setprocname", CAP_SETPROCNAME},
 	{NULL, 0},  /* terminator */
 };
 
@@ -71,6 +72,8 @@
 
 static const size_t defaultdatasize = 4096;
 
+static void attachio(hgclient_t *hgc);
+
 static void initcontext(context_t *ctx)
 {
 	ctx->ch = '\0';
@@ -237,16 +240,27 @@
 	ctx->data[ctx->datasize] = '\0';  /* terminate last string */
 
 	const char **args = unpackcmdargsnul(ctx);
-	if (!args[0] || !args[1])
-		abortmsg("missing command or cwd in system request");
-	debugmsg("run '%s' at '%s'", args[0], args[1]);
-	int32_t r = runshellcmd(args[0], args + 2, args[1]);
-	free(args);
+	if (!args[0] || !args[1] || !args[2])
+		abortmsg("missing type or command or cwd in system request");
+	if (strcmp(args[0], "system") == 0) {
+		debugmsg("run '%s' at '%s'", args[1], args[2]);
+		int32_t r = runshellcmd(args[1], args + 3, args[2]);
+		free(args);
 
-	uint32_t r_n = htonl(r);
-	memcpy(ctx->data, &r_n, sizeof(r_n));
-	ctx->datasize = sizeof(r_n);
-	writeblock(hgc);
+		uint32_t r_n = htonl(r);
+		memcpy(ctx->data, &r_n, sizeof(r_n));
+		ctx->datasize = sizeof(r_n);
+		writeblock(hgc);
+	} else if (strcmp(args[0], "pager") == 0) {
+		setuppager(args[1]);
+		if (hgc->capflags & CAP_ATTACHIO)
+			attachio(hgc);
+		/* unblock the server */
+		static const char emptycmd[] = "\n";
+		sendall(hgc->sockfd, emptycmd, sizeof(emptycmd) - 1);
+	} else {
+		abortmsg("unknown type in system request: %s", args[0]);
+	}
 }
 
 /* Read response of command execution until receiving 'r'-esult */
@@ -350,6 +364,16 @@
 	debugmsg("capflags=0x%04x, pid=%d", hgc->capflags, hgc->pid);
 }
 
+static void updateprocname(hgclient_t *hgc)
+{
+	int r = snprintf(hgc->ctx.data, hgc->ctx.maxdatasize,
+			"chg[worker/%d]", (int)getpid());
+	if (r < 0 || (size_t)r >= hgc->ctx.maxdatasize)
+		abortmsg("insufficient buffer to write procname (r = %d)", r);
+	hgc->ctx.datasize = (size_t)r;
+	writeblockrequest(hgc, "setprocname");
+}
+
 static void attachio(hgclient_t *hgc)
 {
 	debugmsg("request attachio");
@@ -425,15 +449,49 @@
 
 	struct sockaddr_un addr;
 	addr.sun_family = AF_UNIX;
-	strncpy(addr.sun_path, sockname, sizeof(addr.sun_path));
+
+	/* use chdir to workaround small sizeof(sun_path) */
+	int bakfd = -1;
+	const char *basename = sockname;
+	{
+		const char *split = strrchr(sockname, '/');
+		if (split && split != sockname) {
+			if (split[1] == '\0')
+				abortmsg("sockname cannot end with a slash");
+			size_t len = split - sockname;
+			char sockdir[len + 1];
+			memcpy(sockdir, sockname, len);
+			sockdir[len] = '\0';
+
+			bakfd = open(".", O_DIRECTORY);
+			if (bakfd == -1)
+				abortmsgerrno("cannot open cwd");
+
+			int r = chdir(sockdir);
+			if (r != 0)
+				abortmsgerrno("cannot chdir %s", sockdir);
+
+			basename = split + 1;
+		}
+	}
+	if (strlen(basename) >= sizeof(addr.sun_path))
+		abortmsg("sockname is too long: %s", basename);
+	strncpy(addr.sun_path, basename, sizeof(addr.sun_path));
 	addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
 
+	/* real connect */
 	int r = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
 	if (r < 0) {
+		if (errno != ENOENT && errno != ECONNREFUSED)
+			abortmsgerrno("cannot connect to %s", sockname);
+	}
+	if (bakfd != -1) {
+		fchdirx(bakfd);
+		close(bakfd);
+	}
+	if (r < 0) {
 		close(fd);
-		if (errno == ENOENT || errno == ECONNREFUSED)
-			return NULL;
-		abortmsgerrno("cannot connect to %s", addr.sun_path);
+		return NULL;
 	}
 	debugmsg("connected to %s", addr.sun_path);
 
@@ -445,6 +503,8 @@
 	readhello(hgc);
 	if (!(hgc->capflags & CAP_RUNCOMMAND))
 		abortmsg("insufficient capability: runcommand");
+	if (hgc->capflags & CAP_SETPROCNAME)
+		updateprocname(hgc);
 	if (hgc->capflags & CAP_ATTACHIO)
 		attachio(hgc);
 	if (hgc->capflags & CAP_CHDIR)
@@ -545,31 +605,6 @@
 }
 
 /*!
- * Get pager command for the given Mercurial command args
- *
- * If no pager enabled, returns NULL. The return value becomes invalid
- * once you run another request to hgc.
- */
-const char *hgc_getpager(hgclient_t *hgc, const char *const args[],
-			 size_t argsize)
-{
-	assert(hgc);
-
-	if (!(hgc->capflags & CAP_GETPAGER))
-		return NULL;
-
-	packcmdargs(&hgc->ctx, args, argsize);
-	writeblockrequest(hgc, "getpager");
-	handleresponse(hgc);
-
-	if (hgc->ctx.datasize < 1 || hgc->ctx.data[0] == '\0')
-		return NULL;
-	enlargecontext(&hgc->ctx, hgc->ctx.datasize + 1);
-	hgc->ctx.data[hgc->ctx.datasize] = '\0';
-	return hgc->ctx.data;
-}
-
-/*!
  * Update server's environment variables
  *
  * @param envp  list of environment variables in "NAME=VALUE" format,
--- a/contrib/chg/hgclient.h	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/chg/hgclient.h	Wed Jan 18 11:43:36 2017 -0500
@@ -25,8 +25,6 @@
 			  size_t argsize);
 int hgc_runcommand(hgclient_t *hgc, const char *const args[], size_t argsize);
 void hgc_attachio(hgclient_t *hgc);
-const char *hgc_getpager(hgclient_t *hgc, const char *const args[],
-			 size_t argsize);
 void hgc_setenv(hgclient_t *hgc, const char *const envp[]);
 
 #endif  /* HGCLIENT_H_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/chg/procutil.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,227 @@
+/*
+ * Utilities about process handling - signal and subprocess (ex. pager)
+ *
+ * Copyright (c) 2011 Yuya Nishihara <yuya@tcha.org>
+ *
+ * This software may be used and distributed according to the terms of the
+ * GNU General Public License version 2 or any later version.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "procutil.h"
+#include "util.h"
+
+static pid_t pagerpid = 0;
+static pid_t peerpgid = 0;
+static pid_t peerpid = 0;
+
+static void forwardsignal(int sig)
+{
+	assert(peerpid > 0);
+	if (kill(peerpid, sig) < 0)
+		abortmsgerrno("cannot kill %d", peerpid);
+	debugmsg("forward signal %d", sig);
+}
+
+static void forwardsignaltogroup(int sig)
+{
+	/* prefer kill(-pgid, sig), fallback to pid if pgid is invalid */
+	pid_t killpid = peerpgid > 1 ? -peerpgid : peerpid;
+	if (kill(killpid, sig) < 0)
+		abortmsgerrno("cannot kill %d", killpid);
+	debugmsg("forward signal %d to %d", sig, killpid);
+}
+
+static void handlestopsignal(int sig)
+{
+	sigset_t unblockset, oldset;
+	struct sigaction sa, oldsa;
+	if (sigemptyset(&unblockset) < 0)
+		goto error;
+	if (sigaddset(&unblockset, sig) < 0)
+		goto error;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_DFL;
+	sa.sa_flags = SA_RESTART;
+	if (sigemptyset(&sa.sa_mask) < 0)
+		goto error;
+
+	forwardsignal(sig);
+	if (raise(sig) < 0)  /* resend to self */
+		goto error;
+	if (sigaction(sig, &sa, &oldsa) < 0)
+		goto error;
+	if (sigprocmask(SIG_UNBLOCK, &unblockset, &oldset) < 0)
+		goto error;
+	/* resent signal will be handled before sigprocmask() returns */
+	if (sigprocmask(SIG_SETMASK, &oldset, NULL) < 0)
+		goto error;
+	if (sigaction(sig, &oldsa, NULL) < 0)
+		goto error;
+	return;
+
+error:
+	abortmsgerrno("failed to handle stop signal");
+}
+
+static void handlechildsignal(int sig UNUSED_)
+{
+	if (peerpid == 0 || pagerpid == 0)
+		return;
+	/* if pager exits, notify the server with SIGPIPE immediately.
+	 * otherwise the server won't get SIGPIPE if it does not write
+	 * anything. (issue5278) */
+	if (waitpid(pagerpid, NULL, WNOHANG) == pagerpid)
+		kill(peerpid, SIGPIPE);
+}
+
+void setupsignalhandler(pid_t pid, pid_t pgid)
+{
+	if (pid <= 0)
+		return;
+	peerpid = pid;
+	peerpgid = (pgid <= 1 ? 0 : pgid);
+
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = forwardsignaltogroup;
+	sa.sa_flags = SA_RESTART;
+	if (sigemptyset(&sa.sa_mask) < 0)
+		goto error;
+
+	if (sigaction(SIGHUP, &sa, NULL) < 0)
+		goto error;
+	if (sigaction(SIGINT, &sa, NULL) < 0)
+		goto error;
+
+	/* terminate frontend by double SIGTERM in case of server freeze */
+	sa.sa_handler = forwardsignal;
+	sa.sa_flags |= SA_RESETHAND;
+	if (sigaction(SIGTERM, &sa, NULL) < 0)
+		goto error;
+
+	/* notify the worker about window resize events */
+	sa.sa_flags = SA_RESTART;
+	if (sigaction(SIGWINCH, &sa, NULL) < 0)
+		goto error;
+	/* propagate job control requests to worker */
+	sa.sa_handler = forwardsignal;
+	sa.sa_flags = SA_RESTART;
+	if (sigaction(SIGCONT, &sa, NULL) < 0)
+		goto error;
+	sa.sa_handler = handlestopsignal;
+	sa.sa_flags = SA_RESTART;
+	if (sigaction(SIGTSTP, &sa, NULL) < 0)
+		goto error;
+	/* get notified when pager exits */
+	sa.sa_handler = handlechildsignal;
+	sa.sa_flags = SA_RESTART;
+	if (sigaction(SIGCHLD, &sa, NULL) < 0)
+		goto error;
+
+	return;
+
+error:
+	abortmsgerrno("failed to set up signal handlers");
+}
+
+void restoresignalhandler(void)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_DFL;
+	sa.sa_flags = SA_RESTART;
+	if (sigemptyset(&sa.sa_mask) < 0)
+		goto error;
+
+	if (sigaction(SIGHUP, &sa, NULL) < 0)
+		goto error;
+	if (sigaction(SIGTERM, &sa, NULL) < 0)
+		goto error;
+	if (sigaction(SIGWINCH, &sa, NULL) < 0)
+		goto error;
+	if (sigaction(SIGCONT, &sa, NULL) < 0)
+		goto error;
+	if (sigaction(SIGTSTP, &sa, NULL) < 0)
+		goto error;
+	if (sigaction(SIGCHLD, &sa, NULL) < 0)
+		goto error;
+
+	/* ignore Ctrl+C while shutting down to make pager exits cleanly */
+	sa.sa_handler = SIG_IGN;
+	if (sigaction(SIGINT, &sa, NULL) < 0)
+		goto error;
+
+	peerpid = 0;
+	return;
+
+error:
+	abortmsgerrno("failed to restore signal handlers");
+}
+
+/* This implementation is based on hgext/pager.py (post 369741ef7253)
+ * Return 0 if pager is not started, or pid of the pager */
+pid_t setuppager(const char *pagercmd)
+{
+	assert(pagerpid == 0);
+	if (!pagercmd)
+		return 0;
+
+	int pipefds[2];
+	if (pipe(pipefds) < 0)
+		return 0;
+	pid_t pid = fork();
+	if (pid < 0)
+		goto error;
+	if (pid > 0) {
+		close(pipefds[0]);
+		if (dup2(pipefds[1], fileno(stdout)) < 0)
+			goto error;
+		if (isatty(fileno(stderr))) {
+			if (dup2(pipefds[1], fileno(stderr)) < 0)
+				goto error;
+		}
+		close(pipefds[1]);
+		pagerpid = pid;
+		return pid;
+	} else {
+		dup2(pipefds[0], fileno(stdin));
+		close(pipefds[0]);
+		close(pipefds[1]);
+
+		int r = execlp("/bin/sh", "/bin/sh", "-c", pagercmd, NULL);
+		if (r < 0) {
+			abortmsgerrno("cannot start pager '%s'", pagercmd);
+		}
+		return 0;
+	}
+
+error:
+	close(pipefds[0]);
+	close(pipefds[1]);
+	abortmsgerrno("failed to prepare pager");
+	return 0;
+}
+
+void waitpager(void)
+{
+	if (pagerpid == 0)
+		return;
+
+	/* close output streams to notify the pager its input ends */
+	fclose(stdout);
+	fclose(stderr);
+	while (1) {
+		pid_t ret = waitpid(pagerpid, NULL, 0);
+		if (ret == -1 && errno == EINTR)
+			continue;
+		break;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/chg/procutil.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,21 @@
+/*
+ * Utilities about process handling - signal and subprocess (ex. pager)
+ *
+ * Copyright (c) 2011 Yuya Nishihara <yuya@tcha.org>
+ *
+ * This software may be used and distributed according to the terms of the
+ * GNU General Public License version 2 or any later version.
+ */
+
+#ifndef PROCUTIL_H_
+#define PROCUTIL_H_
+
+#include <unistd.h>
+
+void restoresignalhandler(void);
+void setupsignalhandler(pid_t pid, pid_t pgid);
+
+pid_t setuppager(const char *pagercmd);
+void waitpager(void);
+
+#endif /* PROCUTIL_H_ */
--- a/contrib/debugshell.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/debugshell.py	Wed Jan 18 11:43:36 2017 -0500
@@ -18,7 +18,7 @@
         'mercurial': mercurial,
         'repo': repo,
         'cl': repo.changelog,
-        'mf': repo.manifest,
+        'mf': repo.manifestlog,
     }
 
     code.interact(msg, local=objects)
@@ -27,7 +27,7 @@
     import IPython
 
     cl = repo.changelog
-    mf = repo.manifest
+    mf = repo.manifestlog
     cl, mf # use variables to appease pyflakes
 
     IPython.embed()
--- a/contrib/import-checker.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/import-checker.py	Wed Jan 18 11:43:36 2017 -0500
@@ -391,6 +391,7 @@
     seennonsymbollocal = False
     # The last name to be imported (for sorting).
     lastname = None
+    laststdlib = None
     # Relative import levels encountered so far.
     seenlevels = set()
 
@@ -412,16 +413,18 @@
             name = node.names[0].name
             asname = node.names[0].asname
 
+            stdlib = name in stdlib_modules
+
             # Ignore sorting rules on imports inside blocks.
             if node.col_offset == root_col_offset:
-                if lastname and name < lastname:
+                if lastname and name < lastname and laststdlib == stdlib:
                     yield msg('imports not lexically sorted: %s < %s',
                               name, lastname)
 
-                lastname = name
+            lastname = name
+            laststdlib = stdlib
 
             # stdlib imports should be before local imports.
-            stdlib = name in stdlib_modules
             if stdlib and seenlocal and node.col_offset == root_col_offset:
                 yield msg('stdlib import "%s" follows local import: %s',
                           name, seenlocal)
--- a/contrib/memory.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/memory.py	Wed Jan 18 11:43:36 2017 -0500
@@ -25,8 +25,8 @@
             key = parts[0][2:-1].lower()
             if key in result:
                 result[key] = int(parts[1])
-    ui.write_err(", ".join(["%s: %.1f MiB" % (key, value / 1024.0)
-                            for key, value in result.iteritems()]) + "\n")
+    ui.write_err(", ".join(["%s: %.1f MiB" % (k, v / 1024.0)
+                            for k, v in result.iteritems()]) + "\n")
 
 def extsetup(ui):
     atexit.register(memusage, ui)
--- a/contrib/perf.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/perf.py	Wed Jan 18 11:43:36 2017 -0500
@@ -25,6 +25,7 @@
 import sys
 import time
 from mercurial import (
+    bdiff,
     changegroup,
     cmdutil,
     commands,
@@ -33,7 +34,6 @@
     extensions,
     mdiff,
     merge,
-    revlog,
     util,
 )
 
@@ -135,13 +135,14 @@
 
     if opts is None:
         opts = {}
-    # redirect all to stderr
-    ui = ui.copy()
-    uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
-    if uifout:
-        # for "historical portability":
-        # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
-        uifout.set(ui.ferr)
+    # redirect all to stderr unless buffer api is in use
+    if not ui._buffers:
+        ui = ui.copy()
+        uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
+        if uifout:
+            # for "historical portability":
+            # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
+            uifout.set(ui.ferr)
 
     # get a formatter
     uiformatter = getattr(ui, 'formatter', None)
@@ -565,8 +566,8 @@
     ctx = scmutil.revsingle(repo, rev, rev)
     t = ctx.manifestnode()
     def d():
-        repo.manifest.clearcaches()
-        repo.manifest.read(t)
+        repo.manifestlog.clearcaches()
+        repo.manifestlog[t].read()
     timer(d)
     fm.end()
 
@@ -746,6 +747,64 @@
     timer(d)
     fm.end()
 
+@command('perfbdiff', revlogopts + formatteropts + [
+    ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
+    ('', 'alldata', False, 'test bdiffs for all associated revisions')],
+    '-c|-m|FILE REV')
+def perfbdiff(ui, repo, file_, rev=None, count=None, **opts):
+    """benchmark a bdiff between revisions
+
+    By default, benchmark a bdiff between its delta parent and itself.
+
+    With ``--count``, benchmark bdiffs between delta parents and self for N
+    revisions starting at the specified revision.
+
+    With ``--alldata``, assume the requested revision is a changeset and
+    measure bdiffs for all changes related to that changeset (manifest
+    and filelogs).
+    """
+    if opts['alldata']:
+        opts['changelog'] = True
+
+    if opts.get('changelog') or opts.get('manifest'):
+        file_, rev = None, file_
+    elif rev is None:
+        raise error.CommandError('perfbdiff', 'invalid arguments')
+
+    textpairs = []
+
+    r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
+
+    startrev = r.rev(r.lookup(rev))
+    for rev in range(startrev, min(startrev + count, len(r) - 1)):
+        if opts['alldata']:
+            # Load revisions associated with changeset.
+            ctx = repo[rev]
+            mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+            for pctx in ctx.parents():
+                pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+                textpairs.append((pman, mtext))
+
+            # Load filelog revisions by iterating manifest delta.
+            man = ctx.manifest()
+            pman = ctx.p1().manifest()
+            for filename, change in pman.diff(man).items():
+                fctx = repo.file(filename)
+                f1 = fctx.revision(change[0][0] or -1)
+                f2 = fctx.revision(change[1][0] or -1)
+                textpairs.append((f1, f2))
+        else:
+            dp = r.deltaparent(rev)
+            textpairs.append((r.revision(dp), r.revision(rev)))
+
+    def d():
+        for pair in textpairs:
+            bdiff.bdiff(*pair)
+
+    timer, fm = gettimer(ui, opts)
+    timer(d)
+    fm.end()
+
 @command('perfdiffwd', formatteropts)
 def perfdiffwd(ui, repo, **opts):
     """Profile diff of working directory changes"""
@@ -799,6 +858,116 @@
     timer(d)
     fm.end()
 
+@command('perfrevlogchunks', revlogopts + formatteropts +
+         [('e', 'engines', '', 'compression engines to use'),
+          ('s', 'startrev', 0, 'revision to start at')],
+         '-c|-m|FILE')
+def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
+    """Benchmark operations on revlog chunks.
+
+    Logically, each revlog is a collection of fulltext revisions. However,
+    stored within each revlog are "chunks" of possibly compressed data. This
+    data needs to be read and decompressed or compressed and written.
+
+    This command measures the time it takes to read+decompress and recompress
+    chunks in a revlog. It effectively isolates I/O and compression performance.
+    For measurements of higher-level operations like resolving revisions,
+    see ``perfrevlog`` and ``perfrevlogrevision``.
+    """
+    rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
+
+    # Verify engines argument.
+    if engines:
+        engines = set(e.strip() for e in engines.split(','))
+        for engine in engines:
+            try:
+                util.compressionengines[engine]
+            except KeyError:
+                raise error.Abort('unknown compression engine: %s' % engine)
+    else:
+        engines = []
+        for e in util.compengines:
+            engine = util.compengines[e]
+            try:
+                if engine.available():
+                    engine.revlogcompressor().compress('dummy')
+                    engines.append(e)
+            except NotImplementedError:
+                pass
+
+    revs = list(rl.revs(startrev, len(rl) - 1))
+
+    def rlfh(rl):
+        if rl._inline:
+            return getsvfs(repo)(rl.indexfile)
+        else:
+            return getsvfs(repo)(rl.datafile)
+
+    def doread():
+        rl.clearcaches()
+        for rev in revs:
+            rl._chunkraw(rev, rev)
+
+    def doreadcachedfh():
+        rl.clearcaches()
+        fh = rlfh(rl)
+        for rev in revs:
+            rl._chunkraw(rev, rev, df=fh)
+
+    def doreadbatch():
+        rl.clearcaches()
+        rl._chunkraw(revs[0], revs[-1])
+
+    def doreadbatchcachedfh():
+        rl.clearcaches()
+        fh = rlfh(rl)
+        rl._chunkraw(revs[0], revs[-1], df=fh)
+
+    def dochunk():
+        rl.clearcaches()
+        fh = rlfh(rl)
+        for rev in revs:
+            rl._chunk(rev, df=fh)
+
+    chunks = [None]
+
+    def dochunkbatch():
+        rl.clearcaches()
+        fh = rlfh(rl)
+        # Save chunks as a side-effect.
+        chunks[0] = rl._chunks(revs, df=fh)
+
+    def docompress(compressor):
+        rl.clearcaches()
+
+        try:
+            # Swap in the requested compression engine.
+            oldcompressor = rl._compressor
+            rl._compressor = compressor
+            for chunk in chunks[0]:
+                rl.compress(chunk)
+        finally:
+            rl._compressor = oldcompressor
+
+    benches = [
+        (lambda: doread(), 'read'),
+        (lambda: doreadcachedfh(), 'read w/ reused fd'),
+        (lambda: doreadbatch(), 'read batch'),
+        (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
+        (lambda: dochunk(), 'chunk'),
+        (lambda: dochunkbatch(), 'chunk batch'),
+    ]
+
+    for engine in sorted(engines):
+        compressor = util.compengines[engine].revlogcompressor()
+        benches.append((functools.partial(docompress, compressor),
+                        'compress w/ %s' % engine))
+
+    for fn, title in benches:
+        timer, fm = gettimer(ui, opts)
+        timer(fn, title=title)
+        fm.end()
+
 @command('perfrevlogrevision', revlogopts + formatteropts +
          [('', 'cache', False, 'use caches instead of clearing')],
          '-c|-m|FILE REV')
@@ -851,7 +1020,7 @@
                 chunkstart += (rev + 1) * iosize
             chunklength = length(rev)
             b = buffer(data, chunkstart - offset, chunklength)
-            revlog.decompress(b)
+            r.decompress(b)
 
     def dopatch(text, bins):
         if not cache:
@@ -861,7 +1030,7 @@
     def dohash(text):
         if not cache:
             r.clearcaches()
-        r._checkhash(text, node, rev)
+        r.checkhash(text, node, rev=rev)
 
     def dorevision():
         if not cache:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/LICENSE	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,27 @@
+Copyright (c) 2016, Gregory Szorc
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/MANIFEST.in	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,5 @@
+graft c-ext
+graft zstd
+include make_cffi.py
+include setup_zstd.py
+include zstd.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/NEWS.rst	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,90 @@
+Version History
+===============
+
+0.6.0 (released 2017-01-14)
+---------------------------
+
+* Support for legacy zstd protocols (build time opt in feature).
+* Automation improvements to test against Python 3.6, latest versions
+  of Tox, more deterministic AppVeyor behavior.
+* CFFI "parser" improved to use a compiler preprocessor instead of rewriting
+  source code manually.
+* Vendored version of zstd updated to 1.1.2.
+* Documentation improvements.
+* Introduce a bench.py script for performing (crude) benchmarks.
+* ZSTD_CCtx instances are now reused across multiple compress() operations.
+* ZstdCompressor.write_to() now has a flush() method.
+* ZstdCompressor.compressobj()'s flush() method now accepts an argument to
+  flush a block (as opposed to ending the stream).
+* Disallow compress(b'') when writing content sizes by default (issue #11).
+
+0.5.2 (released 2016-11-12)
+---------------------------
+
+* more packaging fixes for source distribution
+
+0.5.1 (released 2016-11-12)
+---------------------------
+
+* setup_zstd.py is included in the source distribution
+
+0.5.0 (released 2016-11-10)
+---------------------------
+
+* Vendored version of zstd updated to 1.1.1.
+* Continuous integration for Python 3.6 and 3.7
+* Continuous integration for Conda
+* Added compression and decompression APIs providing similar interfaces
+  to the standard library ``zlib`` and ``bz2`` modules. This allows
+  coding to a common interface.
+* ``zstd.__version__` is now defined.
+* ``read_from()`` on various APIs now accepts objects implementing the buffer
+  protocol.
+* ``read_from()`` has gained a ``skip_bytes`` argument. This allows callers
+  to pass in an existing buffer with a header without having to create a
+  slice or a new object.
+* Implemented ``ZstdCompressionDict.as_bytes()``.
+* Python's memory allocator is now used instead of ``malloc()``.
+* Low-level zstd data structures are reused in more instances, cutting down
+  on overhead for certain operations.
+* ``distutils`` boilerplate for obtaining an ``Extension`` instance
+  has now been refactored into a standalone ``setup_zstd.py`` file. This
+  allows other projects with ``setup.py`` files to reuse the
+  ``distutils`` code for this project without copying code.
+* The monolithic ``zstd.c`` file has been split into a header file defining
+  types and separate ``.c`` source files for the implementation.
+
+History of the Project
+======================
+
+2016-08-31 - Zstandard 1.0.0 is released and Gregory starts hacking on a
+Python extension for use by the Mercurial project. A very hacky prototype
+is sent to the mercurial-devel list for RFC.
+
+2016-09-03 - Most functionality from Zstandard C API implemented. Source
+code published on https://github.com/indygreg/python-zstandard. Travis-CI
+automation configured. 0.0.1 release on PyPI.
+
+2016-09-05 - After the API was rounded out a bit and support for Python
+2.6 and 2.7 was added, version 0.1 was released to PyPI.
+
+2016-09-05 - After the compressor and decompressor APIs were changed, 0.2
+was released to PyPI.
+
+2016-09-10 - 0.3 is released with a bunch of new features. ZstdCompressor
+now accepts arguments controlling frame parameters. The source size can now
+be declared when performing streaming compression. ZstdDecompressor.decompress()
+is implemented. Compression dictionaries are now cached when using the simple
+compression and decompression APIs. Memory size APIs added.
+ZstdCompressor.read_from() and ZstdDecompressor.read_from() have been
+implemented. This rounds out the major compression/decompression APIs planned
+by the author.
+
+2016-10-02 - 0.3.3 is released with a bug fix for read_from not fully
+decoding a zstd frame (issue #2).
+
+2016-10-02 - 0.4.0 is released with zstd 1.1.0, support for custom read and
+write buffer sizes, and a few bug fixes involving failure to read/write
+all data when buffer sizes were too small to hold remaining data.
+
+2016-11-10 - 0.5.0 is released with zstd 1.1.1 and other enhancements.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/README.rst	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,829 @@
+================
+python-zstandard
+================
+
+This project provides Python bindings for interfacing with the
+`Zstandard <http://www.zstd.net>`_ compression library. A C extension
+and CFFI interface is provided.
+
+The primary goal of the extension is to provide a Pythonic interface to
+the underlying C API. This means exposing most of the features and flexibility
+of the C API while not sacrificing usability or safety that Python provides.
+
+The canonical home for this project is
+https://github.com/indygreg/python-zstandard.
+
+|  |ci-status| |win-ci-status|
+
+State of Project
+================
+
+The project is officially in beta state. The author is reasonably satisfied
+with the current API and that functionality works as advertised. There
+may be some backwards incompatible changes before 1.0. Though the author
+does not intend to make any major changes to the Python API.
+
+There is continuous integration for Python versions 2.6, 2.7, and 3.3+
+on Linux x86_x64 and Windows x86 and x86_64. The author is reasonably
+confident the extension is stable and works as advertised on these
+platforms.
+
+Expected Changes
+----------------
+
+The author is reasonably confident in the current state of what's
+implemented on the ``ZstdCompressor`` and ``ZstdDecompressor`` types.
+Those APIs likely won't change significantly. Some low-level behavior
+(such as naming and types expected by arguments) may change.
+
+There will likely be arguments added to control the input and output
+buffer sizes (currently, certain operations read and write in chunk
+sizes using zstd's preferred defaults).
+
+There should be an API that accepts an object that conforms to the buffer
+interface and returns an iterator over compressed or decompressed output.
+
+The author is on the fence as to whether to support the extremely
+low level compression and decompression APIs. It could be useful to
+support compression without the framing headers. But the author doesn't
+believe it a high priority at this time.
+
+The CFFI bindings are half-baked and need to be finished.
+
+Requirements
+============
+
+This extension is designed to run with Python 2.6, 2.7, 3.3, 3.4, and 3.5
+on common platforms (Linux, Windows, and OS X). Only x86_64 is currently
+well-tested as an architecture.
+
+Installing
+==========
+
+This package is uploaded to PyPI at https://pypi.python.org/pypi/zstandard.
+So, to install this package::
+
+   $ pip install zstandard
+
+Binary wheels are made available for some platforms. If you need to
+install from a source distribution, all you should need is a working C
+compiler and the Python development headers/libraries. On many Linux
+distributions, you can install a ``python-dev`` or ``python-devel``
+package to provide these dependencies.
+
+Packages are also uploaded to Anaconda Cloud at
+https://anaconda.org/indygreg/zstandard. See that URL for how to install
+this package with ``conda``.
+
+Performance
+===========
+
+Very crude and non-scientific benchmarking (most benchmarks fall in this
+category because proper benchmarking is hard) show that the Python bindings
+perform within 10% of the native C implementation.
+
+The following table compares the performance of compressing and decompressing
+a 1.1 GB tar file comprised of the files in a Firefox source checkout. Values
+obtained with the ``zstd`` program are on the left. The remaining columns detail
+performance of various compression APIs in the Python bindings.
+
++-------+-----------------+-----------------+-----------------+---------------+
+| Level | Native          | Simple          | Stream In       | Stream Out    |
+|       | Comp / Decomp   | Comp / Decomp   | Comp / Decomp   | Comp          |
++=======+=================+=================+=================+===============+
+|   1   | 490 / 1338 MB/s | 458 / 1266 MB/s | 407 / 1156 MB/s |  405 MB/s     |
++-------+-----------------+-----------------+-----------------+---------------+
+|   2   | 412 / 1288 MB/s | 381 / 1203 MB/s | 345 / 1128 MB/s |  349 MB/s     |
++-------+-----------------+-----------------+-----------------+---------------+
+|   3   | 342 / 1312 MB/s | 319 / 1182 MB/s | 285 / 1165 MB/s |  287 MB/s     |
++-------+-----------------+-----------------+-----------------+---------------+
+|  11   |  64 / 1506 MB/s |  66 / 1436 MB/s |  56 / 1342 MB/s |   57 MB/s     |
++-------+-----------------+-----------------+-----------------+---------------+
+
+Again, these are very unscientific. But it shows that Python is capable of
+compressing at several hundred MB/s and decompressing at over 1 GB/s.
+
+Comparison to Other Python Bindings
+===================================
+
+https://pypi.python.org/pypi/zstd is an alternative Python binding to
+Zstandard. At the time this was written, the latest release of that
+package (1.0.0.2) had the following significant differences from this package:
+
+* It only exposes the simple API for compression and decompression operations.
+  This extension exposes the streaming API, dictionary training, and more.
+* It adds a custom framing header to compressed data and there is no way to
+  disable it. This means that data produced with that module cannot be used by
+  other Zstandard implementations.
+
+Bundling of Zstandard Source Code
+=================================
+
+The source repository for this project contains a vendored copy of the
+Zstandard source code. This is done for a few reasons.
+
+First, Zstandard is relatively new and not yet widely available as a system
+package. Providing a copy of the source code enables the Python C extension
+to be compiled without requiring the user to obtain the Zstandard source code
+separately.
+
+Second, Zstandard has both a stable *public* API and an *experimental* API.
+The *experimental* API is actually quite useful (contains functionality for
+training dictionaries for example), so it is something we wish to expose to
+Python. However, the *experimental* API is only available via static linking.
+Furthermore, the *experimental* API can change at any time. So, control over
+the exact version of the Zstandard library linked against is important to
+ensure known behavior.
+
+Instructions for Building and Testing
+=====================================
+
+Once you have the source code, the extension can be built via setup.py::
+
+   $ python setup.py build_ext
+
+We recommend testing with ``nose``::
+
+   $ nosetests
+
+A Tox configuration is present to test against multiple Python versions::
+
+   $ tox
+
+Tests use the ``hypothesis`` Python package to perform fuzzing. If you
+don't have it, those tests won't run.
+
+There is also an experimental CFFI module. You need the ``cffi`` Python
+package installed to build and test that.
+
+To create a virtualenv with all development dependencies, do something
+like the following::
+
+  # Python 2
+  $ virtualenv venv
+
+  # Python 3
+  $ python3 -m venv venv
+
+  $ source venv/bin/activate
+  $ pip install cffi hypothesis nose tox
+
+API
+===
+
+The compiled C extension provides a ``zstd`` Python module. This module
+exposes the following interfaces.
+
+ZstdCompressor
+--------------
+
+The ``ZstdCompressor`` class provides an interface for performing
+compression operations.
+
+Each instance is associated with parameters that control compression
+behavior. These come from the following named arguments (all optional):
+
+level
+   Integer compression level. Valid values are between 1 and 22.
+dict_data
+   Compression dictionary to use.
+
+   Note: When using dictionary data and ``compress()`` is called multiple
+   times, the ``CompressionParameters`` derived from an integer compression
+   ``level`` and the first compressed data's size will be reused for all
+   subsequent operations. This may not be desirable if source data size
+   varies significantly.
+compression_params
+   A ``CompressionParameters`` instance (overrides the ``level`` value).
+write_checksum
+   Whether a 4 byte checksum should be written with the compressed data.
+   Defaults to False. If True, the decompressor can verify that decompressed
+   data matches the original input data.
+write_content_size
+   Whether the size of the uncompressed data will be written into the
+   header of compressed data. Defaults to False. The data will only be
+   written if the compressor knows the size of the input data. This is
+   likely not true for streaming compression.
+write_dict_id
+   Whether to write the dictionary ID into the compressed data.
+   Defaults to True. The dictionary ID is only written if a dictionary
+   is being used.
+
+Unless specified otherwise, assume that no two methods of ``ZstdCompressor``
+instances can be called from multiple Python threads simultaneously. In other
+words, assume instances are not thread safe unless stated otherwise.
+
+Simple API
+^^^^^^^^^^
+
+``compress(data)`` compresses and returns data as a one-shot operation.::
+
+   cctx = zstd.ZstdCompressor()
+   compressed = cctx.compress(b'data to compress')
+
+Unless ``compression_params`` or ``dict_data`` are passed to the
+``ZstdCompressor``, each invocation of ``compress()`` will calculate the
+optimal compression parameters for the configured compression ``level`` and
+input data size (some parameters are fine-tuned for small input sizes).
+
+If a compression dictionary is being used, the compression parameters
+determined from the first input's size will be reused for subsequent
+operations.
+
+There is currently a deficiency in zstd's C APIs that makes it difficult
+to round trip empty inputs when ``write_content_size=True``. Attempting
+this will raise a ``ValueError`` unless ``allow_empty=True`` is passed
+to ``compress()``.
+
+Streaming Input API
+^^^^^^^^^^^^^^^^^^^
+
+``write_to(fh)`` (which behaves as a context manager) allows you to *stream*
+data into a compressor.::
+
+   cctx = zstd.ZstdCompressor(level=10)
+   with cctx.write_to(fh) as compressor:
+       compressor.write(b'chunk 0')
+       compressor.write(b'chunk 1')
+       ...
+
+The argument to ``write_to()`` must have a ``write(data)`` method. As
+compressed data is available, ``write()`` will be called with the compressed
+data as its argument. Many common Python types implement ``write()``, including
+open file handles and ``io.BytesIO``.
+
+``write_to()`` returns an object representing a streaming compressor instance.
+It **must** be used as a context manager. That object's ``write(data)`` method
+is used to feed data into the compressor.
+
+A ``flush()`` method can be called to evict whatever data remains within the
+compressor's internal state into the output object. This may result in 0 or
+more ``write()`` calls to the output object.
+
+If the size of the data being fed to this streaming compressor is known,
+you can declare it before compression begins::
+
+   cctx = zstd.ZstdCompressor()
+   with cctx.write_to(fh, size=data_len) as compressor:
+       compressor.write(chunk0)
+       compressor.write(chunk1)
+       ...
+
+Declaring the size of the source data allows compression parameters to
+be tuned. And if ``write_content_size`` is used, it also results in the
+content size being written into the frame header of the output data.
+
+The size of chunks being ``write()`` to the destination can be specified::
+
+    cctx = zstd.ZstdCompressor()
+    with cctx.write_to(fh, write_size=32768) as compressor:
+        ...
+
+To see how much memory is being used by the streaming compressor::
+
+    cctx = zstd.ZstdCompressor()
+    with cctx.write_to(fh) as compressor:
+        ...
+        byte_size = compressor.memory_size()
+
+Streaming Output API
+^^^^^^^^^^^^^^^^^^^^
+
+``read_from(reader)`` provides a mechanism to stream data out of a compressor
+as an iterator of data chunks.::
+
+   cctx = zstd.ZstdCompressor()
+   for chunk in cctx.read_from(fh):
+        # Do something with emitted data.
+
+``read_from()`` accepts an object that has a ``read(size)`` method or conforms
+to the buffer protocol. (``bytes`` and ``memoryview`` are 2 common types that
+provide the buffer protocol.)
+
+Uncompressed data is fetched from the source either by calling ``read(size)``
+or by fetching a slice of data from the object directly (in the case where
+the buffer protocol is being used). The returned iterator consists of chunks
+of compressed data.
+
+If reading from the source via ``read()``, ``read()`` will be called until
+it raises or returns an empty bytes (``b''``). It is perfectly valid for
+the source to deliver fewer bytes than were what requested by ``read(size)``.
+
+Like ``write_to()``, ``read_from()`` also accepts a ``size`` argument
+declaring the size of the input stream::
+
+    cctx = zstd.ZstdCompressor()
+    for chunk in cctx.read_from(fh, size=some_int):
+        pass
+
+You can also control the size that data is ``read()`` from the source and
+the ideal size of output chunks::
+
+    cctx = zstd.ZstdCompressor()
+    for chunk in cctx.read_from(fh, read_size=16384, write_size=8192):
+        pass
+
+Unlike ``write_to()``, ``read_from()`` does not give direct control over the
+sizes of chunks fed into the compressor. Instead, chunk sizes will be whatever
+the object being read from delivers. These will often be of a uniform size.
+
+Stream Copying API
+^^^^^^^^^^^^^^^^^^
+
+``copy_stream(ifh, ofh)`` can be used to copy data between 2 streams while
+compressing it.::
+
+   cctx = zstd.ZstdCompressor()
+   cctx.copy_stream(ifh, ofh)
+
+For example, say you wish to compress a file::
+
+   cctx = zstd.ZstdCompressor()
+   with open(input_path, 'rb') as ifh, open(output_path, 'wb') as ofh:
+       cctx.copy_stream(ifh, ofh)
+
+It is also possible to declare the size of the source stream::
+
+   cctx = zstd.ZstdCompressor()
+   cctx.copy_stream(ifh, ofh, size=len_of_input)
+
+You can also specify how large the chunks that are ``read()`` and ``write()``
+from and to the streams::
+
+   cctx = zstd.ZstdCompressor()
+   cctx.copy_stream(ifh, ofh, read_size=32768, write_size=16384)
+
+The stream copier returns a 2-tuple of bytes read and written::
+
+   cctx = zstd.ZstdCompressor()
+   read_count, write_count = cctx.copy_stream(ifh, ofh)
+
+Compressor API
+^^^^^^^^^^^^^^
+
+``compressobj()`` returns an object that exposes ``compress(data)`` and
+``flush()`` methods. Each returns compressed data or an empty bytes.
+
+The purpose of ``compressobj()`` is to provide an API-compatible interface
+with ``zlib.compressobj`` and ``bz2.BZ2Compressor``. This allows callers to
+swap in different compressor objects while using the same API.
+
+``flush()`` accepts an optional argument indicating how to end the stream.
+``zstd.COMPRESSOBJ_FLUSH_FINISH`` (the default) ends the compression stream.
+Once this type of flush is performed, ``compress()`` and ``flush()`` can
+no longer be called. This type of flush **must** be called to end the
+compression context. If not called, returned data may be incomplete.
+
+A ``zstd.COMPRESSOBJ_FLUSH_BLOCK`` argument to ``flush()`` will flush a
+zstd block. Flushes of this type can be performed multiple times. The next
+call to ``compress()`` will begin a new zstd block.
+
+Here is how this API should be used::
+
+   cctx = zstd.ZstdCompressor()
+   cobj = cctx.compressobj()
+   data = cobj.compress(b'raw input 0')
+   data = cobj.compress(b'raw input 1')
+   data = cobj.flush()
+
+Or to flush blocks::
+
+   cctx.zstd.ZstdCompressor()
+   cobj = cctx.compressobj()
+   data = cobj.compress(b'chunk in first block')
+   data = cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK)
+   data = cobj.compress(b'chunk in second block')
+   data = cobj.flush()
+
+For best performance results, keep input chunks under 256KB. This avoids
+extra allocations for a large output object.
+
+It is possible to declare the input size of the data that will be fed into
+the compressor::
+
+   cctx = zstd.ZstdCompressor()
+   cobj = cctx.compressobj(size=6)
+   data = cobj.compress(b'foobar')
+   data = cobj.flush()
+
+ZstdDecompressor
+----------------
+
+The ``ZstdDecompressor`` class provides an interface for performing
+decompression.
+
+Each instance is associated with parameters that control decompression. These
+come from the following named arguments (all optional):
+
+dict_data
+   Compression dictionary to use.
+
+The interface of this class is very similar to ``ZstdCompressor`` (by design).
+
+Unless specified otherwise, assume that no two methods of ``ZstdDecompressor``
+instances can be called from multiple Python threads simultaneously. In other
+words, assume instances are not thread safe unless stated otherwise.
+
+Simple API
+^^^^^^^^^^
+
+``decompress(data)`` can be used to decompress an entire compressed zstd
+frame in a single operation.::
+
+    dctx = zstd.ZstdDecompressor()
+    decompressed = dctx.decompress(data)
+
+By default, ``decompress(data)`` will only work on data written with the content
+size encoded in its header. This can be achieved by creating a
+``ZstdCompressor`` with ``write_content_size=True``. If compressed data without
+an embedded content size is seen, ``zstd.ZstdError`` will be raised.
+
+If the compressed data doesn't have its content size embedded within it,
+decompression can be attempted by specifying the ``max_output_size``
+argument.::
+
+    dctx = zstd.ZstdDecompressor()
+    uncompressed = dctx.decompress(data, max_output_size=1048576)
+
+Ideally, ``max_output_size`` will be identical to the decompressed output
+size.
+
+If ``max_output_size`` is too small to hold the decompressed data,
+``zstd.ZstdError`` will be raised.
+
+If ``max_output_size`` is larger than the decompressed data, the allocated
+output buffer will be resized to only use the space required.
+
+Please note that an allocation of the requested ``max_output_size`` will be
+performed every time the method is called. Setting to a very large value could
+result in a lot of work for the memory allocator and may result in
+``MemoryError`` being raised if the allocation fails.
+
+If the exact size of decompressed data is unknown, it is **strongly**
+recommended to use a streaming API.
+
+Streaming Input API
+^^^^^^^^^^^^^^^^^^^
+
+``write_to(fh)`` can be used to incrementally send compressed data to a
+decompressor.::
+
+    dctx = zstd.ZstdDecompressor()
+    with dctx.write_to(fh) as decompressor:
+        decompressor.write(compressed_data)
+
+This behaves similarly to ``zstd.ZstdCompressor``: compressed data is written to
+the decompressor by calling ``write(data)`` and decompressed output is written
+to the output object by calling its ``write(data)`` method.
+
+The size of chunks being ``write()`` to the destination can be specified::
+
+    dctx = zstd.ZstdDecompressor()
+    with dctx.write_to(fh, write_size=16384) as decompressor:
+        pass
+
+You can see how much memory is being used by the decompressor::
+
+    dctx = zstd.ZstdDecompressor()
+    with dctx.write_to(fh) as decompressor:
+        byte_size = decompressor.memory_size()
+
+Streaming Output API
+^^^^^^^^^^^^^^^^^^^^
+
+``read_from(fh)`` provides a mechanism to stream decompressed data out of a
+compressed source as an iterator of data chunks.:: 
+
+    dctx = zstd.ZstdDecompressor()
+    for chunk in dctx.read_from(fh):
+        # Do something with original data.
+
+``read_from()`` accepts a) an object with a ``read(size)`` method that will
+return  compressed bytes b) an object conforming to the buffer protocol that
+can expose its data as a contiguous range of bytes. The ``bytes`` and
+``memoryview`` types expose this buffer protocol.
+
+``read_from()`` returns an iterator whose elements are chunks of the
+decompressed data.
+
+The size of requested ``read()`` from the source can be specified::
+
+    dctx = zstd.ZstdDecompressor()
+    for chunk in dctx.read_from(fh, read_size=16384):
+        pass
+
+It is also possible to skip leading bytes in the input data::
+
+    dctx = zstd.ZstdDecompressor()
+    for chunk in dctx.read_from(fh, skip_bytes=1):
+        pass
+
+Skipping leading bytes is useful if the source data contains extra
+*header* data but you want to avoid the overhead of making a buffer copy
+or allocating a new ``memoryview`` object in order to decompress the data.
+
+Similarly to ``ZstdCompressor.read_from()``, the consumer of the iterator
+controls when data is decompressed. If the iterator isn't consumed,
+decompression is put on hold.
+
+When ``read_from()`` is passed an object conforming to the buffer protocol,
+the behavior may seem similar to what occurs when the simple decompression
+API is used. However, this API works when the decompressed size is unknown.
+Furthermore, if feeding large inputs, the decompressor will work in chunks
+instead of performing a single operation.
+
+Stream Copying API
+^^^^^^^^^^^^^^^^^^
+
+``copy_stream(ifh, ofh)`` can be used to copy data across 2 streams while
+performing decompression.::
+
+    dctx = zstd.ZstdDecompressor()
+    dctx.copy_stream(ifh, ofh)
+
+e.g. to decompress a file to another file::
+
+    dctx = zstd.ZstdDecompressor()
+    with open(input_path, 'rb') as ifh, open(output_path, 'wb') as ofh:
+        dctx.copy_stream(ifh, ofh)
+
+The size of chunks being ``read()`` and ``write()`` from and to the streams
+can be specified::
+
+    dctx = zstd.ZstdDecompressor()
+    dctx.copy_stream(ifh, ofh, read_size=8192, write_size=16384)
+
+Decompressor API
+^^^^^^^^^^^^^^^^
+
+``decompressobj()`` returns an object that exposes a ``decompress(data)``
+methods. Compressed data chunks are fed into ``decompress(data)`` and
+uncompressed output (or an empty bytes) is returned. Output from subsequent
+calls needs to be concatenated to reassemble the full decompressed byte
+sequence.
+
+The purpose of ``decompressobj()`` is to provide an API-compatible interface
+with ``zlib.decompressobj`` and ``bz2.BZ2Decompressor``. This allows callers
+to swap in different decompressor objects while using the same API.
+
+Each object is single use: once an input frame is decoded, ``decompress()``
+can no longer be called.
+
+Here is how this API should be used::
+
+   dctx = zstd.ZstdDeompressor()
+   dobj = cctx.decompressobj()
+   data = dobj.decompress(compressed_chunk_0)
+   data = dobj.decompress(compressed_chunk_1)
+
+Choosing an API
+---------------
+
+Various forms of compression and decompression APIs are provided because each
+are suitable for different use cases.
+
+The simple/one-shot APIs are useful for small data, when the decompressed
+data size is known (either recorded in the zstd frame header via
+``write_content_size`` or known via an out-of-band mechanism, such as a file
+size).
+
+A limitation of the simple APIs is that input or output data must fit in memory.
+And unless using advanced tricks with Python *buffer objects*, both input and
+output must fit in memory simultaneously.
+
+Another limitation is that compression or decompression is performed as a single
+operation. So if you feed large input, it could take a long time for the
+function to return.
+
+The streaming APIs do not have the limitations of the simple API. The cost to
+this is they are more complex to use than a single function call.
+
+The streaming APIs put the caller in control of compression and decompression
+behavior by allowing them to directly control either the input or output side
+of the operation.
+
+With the streaming input APIs, the caller feeds data into the compressor or
+decompressor as they see fit. Output data will only be written after the caller
+has explicitly written data.
+
+With the streaming output APIs, the caller consumes output from the compressor
+or decompressor as they see fit. The compressor or decompressor will only
+consume data from the source when the caller is ready to receive it.
+
+One end of the streaming APIs involves a file-like object that must
+``write()`` output data or ``read()`` input data. Depending on what the
+backing storage for these objects is, those operations may not complete quickly.
+For example, when streaming compressed data to a file, the ``write()`` into
+a streaming compressor could result in a ``write()`` to the filesystem, which
+may take a long time to finish due to slow I/O on the filesystem. So, there
+may be overhead in streaming APIs beyond the compression and decompression
+operations.
+
+Dictionary Creation and Management
+----------------------------------
+
+Zstandard allows *dictionaries* to be used when compressing and
+decompressing data. The idea is that if you are compressing a lot of similar
+data, you can precompute common properties of that data (such as recurring
+byte sequences) to achieve better compression ratios.
+
+In Python, compression dictionaries are represented as the
+``ZstdCompressionDict`` type.
+
+Instances can be constructed from bytes::
+
+   dict_data = zstd.ZstdCompressionDict(data)
+
+More interestingly, instances can be created by *training* on sample data::
+
+   dict_data = zstd.train_dictionary(size, samples)
+
+This takes a list of bytes instances and creates and returns a
+``ZstdCompressionDict``.
+
+You can see how many bytes are in the dictionary by calling ``len()``::
+
+   dict_data = zstd.train_dictionary(size, samples)
+   dict_size = len(dict_data)  # will not be larger than ``size``
+
+Once you have a dictionary, you can pass it to the objects performing
+compression and decompression::
+
+   dict_data = zstd.train_dictionary(16384, samples)
+
+   cctx = zstd.ZstdCompressor(dict_data=dict_data)
+   for source_data in input_data:
+       compressed = cctx.compress(source_data)
+       # Do something with compressed data.
+
+   dctx = zstd.ZstdDecompressor(dict_data=dict_data)
+   for compressed_data in input_data:
+       buffer = io.BytesIO()
+       with dctx.write_to(buffer) as decompressor:
+           decompressor.write(compressed_data)
+       # Do something with raw data in ``buffer``.
+
+Dictionaries have unique integer IDs. You can retrieve this ID via::
+
+   dict_id = zstd.dictionary_id(dict_data)
+
+You can obtain the raw data in the dict (useful for persisting and constructing
+a ``ZstdCompressionDict`` later) via ``as_bytes()``::
+
+   dict_data = zstd.train_dictionary(size, samples)
+   raw_data = dict_data.as_bytes()
+
+Explicit Compression Parameters
+-------------------------------
+
+Zstandard's integer compression levels along with the input size and dictionary
+size are converted into a data structure defining multiple parameters to tune
+behavior of the compression algorithm. It is possible to use define this
+data structure explicitly to have lower-level control over compression behavior.
+
+The ``zstd.CompressionParameters`` type represents this data structure.
+You can see how Zstandard converts compression levels to this data structure
+by calling ``zstd.get_compression_parameters()``. e.g.::
+
+    params = zstd.get_compression_parameters(5)
+
+This function also accepts the uncompressed data size and dictionary size
+to adjust parameters::
+
+    params = zstd.get_compression_parameters(3, source_size=len(data), dict_size=len(dict_data))
+
+You can also construct compression parameters from their low-level components::
+
+    params = zstd.CompressionParameters(20, 6, 12, 5, 4, 10, zstd.STRATEGY_FAST)
+
+You can then configure a compressor to use the custom parameters::
+
+    cctx = zstd.ZstdCompressor(compression_params=params)
+
+The members of the ``CompressionParameters`` tuple are as follows::
+
+* 0 - Window log
+* 1 - Chain log
+* 2 - Hash log
+* 3 - Search log
+* 4 - Search length
+* 5 - Target length
+* 6 - Strategy (one of the ``zstd.STRATEGY_`` constants)
+
+You'll need to read the Zstandard documentation for what these parameters
+do.
+
+Misc Functionality
+------------------
+
+estimate_compression_context_size(CompressionParameters)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given a ``CompressionParameters`` struct, estimate the memory size required
+to perform compression.
+
+estimate_decompression_context_size()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Estimate the memory size requirements for a decompressor instance.
+
+Constants
+---------
+
+The following module constants/attributes are exposed:
+
+ZSTD_VERSION
+    This module attribute exposes a 3-tuple of the Zstandard version. e.g.
+    ``(1, 0, 0)``
+MAX_COMPRESSION_LEVEL
+    Integer max compression level accepted by compression functions
+COMPRESSION_RECOMMENDED_INPUT_SIZE
+    Recommended chunk size to feed to compressor functions
+COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+    Recommended chunk size for compression output
+DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+    Recommended chunk size to feed into decompresor functions
+DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
+    Recommended chunk size for decompression output
+
+FRAME_HEADER
+    bytes containing header of the Zstandard frame
+MAGIC_NUMBER
+    Frame header as an integer
+
+WINDOWLOG_MIN
+    Minimum value for compression parameter
+WINDOWLOG_MAX
+    Maximum value for compression parameter
+CHAINLOG_MIN
+    Minimum value for compression parameter
+CHAINLOG_MAX
+    Maximum value for compression parameter
+HASHLOG_MIN
+    Minimum value for compression parameter
+HASHLOG_MAX
+    Maximum value for compression parameter
+SEARCHLOG_MIN
+    Minimum value for compression parameter
+SEARCHLOG_MAX
+    Maximum value for compression parameter
+SEARCHLENGTH_MIN
+    Minimum value for compression parameter
+SEARCHLENGTH_MAX
+    Maximum value for compression parameter
+TARGETLENGTH_MIN
+    Minimum value for compression parameter
+TARGETLENGTH_MAX
+    Maximum value for compression parameter
+STRATEGY_FAST
+    Compression strategory
+STRATEGY_DFAST
+    Compression strategory
+STRATEGY_GREEDY
+    Compression strategory
+STRATEGY_LAZY
+    Compression strategory
+STRATEGY_LAZY2
+    Compression strategory
+STRATEGY_BTLAZY2
+    Compression strategory
+STRATEGY_BTOPT
+    Compression strategory
+
+Note on Zstandard's *Experimental* API
+======================================
+
+Many of the Zstandard APIs used by this module are marked as *experimental*
+within the Zstandard project. This includes a large number of useful
+features, such as compression and frame parameters and parts of dictionary
+compression.
+
+It is unclear how Zstandard's C API will evolve over time, especially with
+regards to this *experimental* functionality. We will try to maintain
+backwards compatibility at the Python API level. However, we cannot
+guarantee this for things not under our control.
+
+Since a copy of the Zstandard source code is distributed with this
+module and since we compile against it, the behavior of a specific
+version of this module should be constant for all of time. So if you
+pin the version of this module used in your projects (which is a Python
+best practice), you should be buffered from unwanted future changes.
+
+Donate
+======
+
+A lot of time has been invested into this project by the author.
+
+If you find this project useful and would like to thank the author for
+their work, consider donating some money. Any amount is appreciated.
+
+.. image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif
+    :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=gregory%2eszorc%40gmail%2ecom&lc=US&item_name=python%2dzstandard&currency_code=USD&bn=PP%2dDonationsBF%3abtn_donate_LG%2egif%3aNonHosted
+    :alt: Donate via PayPal
+
+.. |ci-status| image:: https://travis-ci.org/indygreg/python-zstandard.svg?branch=master
+    :target: https://travis-ci.org/indygreg/python-zstandard
+
+.. |win-ci-status| image:: https://ci.appveyor.com/api/projects/status/github/indygreg/python-zstandard?svg=true
+    :target: https://ci.appveyor.com/project/indygreg/python-zstandard
+    :alt: Windows build status
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/compressiondict.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,247 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+ZstdCompressionDict* train_dictionary(PyObject* self, PyObject* args, PyObject* kwargs) {
+	static char *kwlist[] = { "dict_size", "samples", "parameters", NULL };
+	size_t capacity;
+	PyObject* samples;
+	Py_ssize_t samplesLen;
+	PyObject* parameters = NULL;
+	ZDICT_params_t zparams;
+	Py_ssize_t sampleIndex;
+	Py_ssize_t sampleSize;
+	PyObject* sampleItem;
+	size_t zresult;
+	void* sampleBuffer;
+	void* sampleOffset;
+	size_t samplesSize = 0;
+	size_t* sampleSizes;
+	void* dict;
+	ZstdCompressionDict* result;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "nO!|O!", kwlist,
+		&capacity,
+		&PyList_Type, &samples,
+		(PyObject*)&DictParametersType, &parameters)) {
+		return NULL;
+	}
+
+	/* Validate parameters first since it is easiest. */
+	zparams.selectivityLevel = 0;
+	zparams.compressionLevel = 0;
+	zparams.notificationLevel = 0;
+	zparams.dictID = 0;
+	zparams.reserved[0] = 0;
+	zparams.reserved[1] = 0;
+
+	if (parameters) {
+		/* TODO validate data ranges */
+		zparams.selectivityLevel = PyLong_AsUnsignedLong(PyTuple_GetItem(parameters, 0));
+		zparams.compressionLevel = PyLong_AsLong(PyTuple_GetItem(parameters, 1));
+		zparams.notificationLevel = PyLong_AsUnsignedLong(PyTuple_GetItem(parameters, 2));
+		zparams.dictID = PyLong_AsUnsignedLong(PyTuple_GetItem(parameters, 3));
+	}
+
+	/* Figure out the size of the raw samples */
+	samplesLen = PyList_Size(samples);
+	for (sampleIndex = 0; sampleIndex < samplesLen; sampleIndex++) {
+		sampleItem = PyList_GetItem(samples, sampleIndex);
+		if (!PyBytes_Check(sampleItem)) {
+			PyErr_SetString(PyExc_ValueError, "samples must be bytes");
+			/* TODO probably need to perform DECREF here */
+			return NULL;
+		}
+		samplesSize += PyBytes_GET_SIZE(sampleItem);
+	}
+
+	/* Now that we know the total size of the raw simples, we can allocate
+	a buffer for the raw data */
+	sampleBuffer = PyMem_Malloc(samplesSize);
+	if (!sampleBuffer) {
+		PyErr_NoMemory();
+		return NULL;
+	}
+	sampleSizes = PyMem_Malloc(samplesLen * sizeof(size_t));
+	if (!sampleSizes) {
+		PyMem_Free(sampleBuffer);
+		PyErr_NoMemory();
+		return NULL;
+	}
+
+	sampleOffset = sampleBuffer;
+	/* Now iterate again and assemble the samples in the buffer */
+	for (sampleIndex = 0; sampleIndex < samplesLen; sampleIndex++) {
+		sampleItem = PyList_GetItem(samples, sampleIndex);
+		sampleSize = PyBytes_GET_SIZE(sampleItem);
+		sampleSizes[sampleIndex] = sampleSize;
+		memcpy(sampleOffset, PyBytes_AS_STRING(sampleItem), sampleSize);
+		sampleOffset = (char*)sampleOffset + sampleSize;
+	}
+
+	dict = PyMem_Malloc(capacity);
+	if (!dict) {
+		PyMem_Free(sampleSizes);
+		PyMem_Free(sampleBuffer);
+		PyErr_NoMemory();
+		return NULL;
+	}
+
+	zresult = ZDICT_trainFromBuffer_advanced(dict, capacity,
+		sampleBuffer, sampleSizes, (unsigned int)samplesLen,
+		zparams);
+	if (ZDICT_isError(zresult)) {
+		PyErr_Format(ZstdError, "Cannot train dict: %s", ZDICT_getErrorName(zresult));
+		PyMem_Free(dict);
+		PyMem_Free(sampleSizes);
+		PyMem_Free(sampleBuffer);
+		return NULL;
+	}
+
+	result = PyObject_New(ZstdCompressionDict, &ZstdCompressionDictType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->dictData = dict;
+	result->dictSize = zresult;
+	return result;
+}
+
+
+PyDoc_STRVAR(ZstdCompressionDict__doc__,
+"ZstdCompressionDict(data) - Represents a computed compression dictionary\n"
+"\n"
+"This type holds the results of a computed Zstandard compression dictionary.\n"
+"Instances are obtained by calling ``train_dictionary()`` or by passing bytes\n"
+"obtained from another source into the constructor.\n"
+);
+
+static int ZstdCompressionDict_init(ZstdCompressionDict* self, PyObject* args) {
+	const char* source;
+	Py_ssize_t sourceSize;
+
+	self->dictData = NULL;
+	self->dictSize = 0;
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+#else
+	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+#endif
+		return -1;
+	}
+
+	self->dictData = PyMem_Malloc(sourceSize);
+	if (!self->dictData) {
+		PyErr_NoMemory();
+		return -1;
+	}
+
+	memcpy(self->dictData, source, sourceSize);
+	self->dictSize = sourceSize;
+
+	return 0;
+	}
+
+static void ZstdCompressionDict_dealloc(ZstdCompressionDict* self) {
+	if (self->dictData) {
+		PyMem_Free(self->dictData);
+		self->dictData = NULL;
+	}
+
+	PyObject_Del(self);
+}
+
+static PyObject* ZstdCompressionDict_dict_id(ZstdCompressionDict* self) {
+	unsigned dictID = ZDICT_getDictID(self->dictData, self->dictSize);
+
+	return PyLong_FromLong(dictID);
+}
+
+static PyObject* ZstdCompressionDict_as_bytes(ZstdCompressionDict* self) {
+	return PyBytes_FromStringAndSize(self->dictData, self->dictSize);
+}
+
+static PyMethodDef ZstdCompressionDict_methods[] = {
+	{ "dict_id", (PyCFunction)ZstdCompressionDict_dict_id, METH_NOARGS,
+	PyDoc_STR("dict_id() -- obtain the numeric dictionary ID") },
+	{ "as_bytes", (PyCFunction)ZstdCompressionDict_as_bytes, METH_NOARGS,
+	PyDoc_STR("as_bytes() -- obtain the raw bytes constituting the dictionary data") },
+	{ NULL, NULL }
+};
+
+static Py_ssize_t ZstdCompressionDict_length(ZstdCompressionDict* self) {
+	return self->dictSize;
+}
+
+static PySequenceMethods ZstdCompressionDict_sq = {
+	(lenfunc)ZstdCompressionDict_length, /* sq_length */
+	0,                                   /* sq_concat */
+	0,                                   /* sq_repeat */
+	0,                                   /* sq_item */
+	0,                                   /* sq_ass_item */
+	0,                                   /* sq_contains */
+	0,                                   /* sq_inplace_concat */
+	0                                    /* sq_inplace_repeat */
+};
+
+PyTypeObject ZstdCompressionDictType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdCompressionDict",     /* tp_name */
+	sizeof(ZstdCompressionDict),    /* tp_basicsize */
+	0,                              /* tp_itemsize */
+	(destructor)ZstdCompressionDict_dealloc, /* tp_dealloc */
+	0,                              /* tp_print */
+	0,                              /* tp_getattr */
+	0,                              /* tp_setattr */
+	0,                              /* tp_compare */
+	0,                              /* tp_repr */
+	0,                              /* tp_as_number */
+	&ZstdCompressionDict_sq,        /* tp_as_sequence */
+	0,                              /* tp_as_mapping */
+	0,                              /* tp_hash */
+	0,                              /* tp_call */
+	0,                              /* tp_str */
+	0,                              /* tp_getattro */
+	0,                              /* tp_setattro */
+	0,                              /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdCompressionDict__doc__,     /* tp_doc */
+	0,                              /* tp_traverse */
+	0,                              /* tp_clear */
+	0,                              /* tp_richcompare */
+	0,                              /* tp_weaklistoffset */
+	0,                              /* tp_iter */
+	0,                              /* tp_iternext */
+	ZstdCompressionDict_methods,    /* tp_methods */
+	0,                              /* tp_members */
+	0,                              /* tp_getset */
+	0,                              /* tp_base */
+	0,                              /* tp_dict */
+	0,                              /* tp_descr_get */
+	0,                              /* tp_descr_set */
+	0,                              /* tp_dictoffset */
+	(initproc)ZstdCompressionDict_init, /* tp_init */
+	0,                              /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+void compressiondict_module_init(PyObject* mod) {
+	Py_TYPE(&ZstdCompressionDictType) = &PyType_Type;
+	if (PyType_Ready(&ZstdCompressionDictType) < 0) {
+		return;
+	}
+
+	Py_INCREF((PyObject*)&ZstdCompressionDictType);
+	PyModule_AddObject(mod, "ZstdCompressionDict",
+		(PyObject*)&ZstdCompressionDictType);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/compressionparams.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,226 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+void ztopy_compression_parameters(CompressionParametersObject* params, ZSTD_compressionParameters* zparams) {
+	zparams->windowLog = params->windowLog;
+	zparams->chainLog = params->chainLog;
+	zparams->hashLog = params->hashLog;
+	zparams->searchLog = params->searchLog;
+	zparams->searchLength = params->searchLength;
+	zparams->targetLength = params->targetLength;
+	zparams->strategy = params->strategy;
+}
+
+CompressionParametersObject* get_compression_parameters(PyObject* self, PyObject* args) {
+	int compressionLevel;
+	unsigned PY_LONG_LONG sourceSize = 0;
+	Py_ssize_t dictSize = 0;
+	ZSTD_compressionParameters params;
+	CompressionParametersObject* result;
+
+	if (!PyArg_ParseTuple(args, "i|Kn", &compressionLevel, &sourceSize, &dictSize)) {
+		return NULL;
+	}
+
+	params = ZSTD_getCParams(compressionLevel, sourceSize, dictSize);
+
+	result = PyObject_New(CompressionParametersObject, &CompressionParametersType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->windowLog = params.windowLog;
+	result->chainLog = params.chainLog;
+	result->hashLog = params.hashLog;
+	result->searchLog = params.searchLog;
+	result->searchLength = params.searchLength;
+	result->targetLength = params.targetLength;
+	result->strategy = params.strategy;
+
+	return result;
+}
+
+PyObject* estimate_compression_context_size(PyObject* self, PyObject* args) {
+	CompressionParametersObject* params;
+	ZSTD_compressionParameters zparams;
+	PyObject* result;
+
+	if (!PyArg_ParseTuple(args, "O!", &CompressionParametersType, &params)) {
+		return NULL;
+	}
+
+	ztopy_compression_parameters(params, &zparams);
+	result = PyLong_FromSize_t(ZSTD_estimateCCtxSize(zparams));
+	return result;
+}
+
+PyDoc_STRVAR(CompressionParameters__doc__,
+"CompressionParameters: low-level control over zstd compression");
+
+static PyObject* CompressionParameters_new(PyTypeObject* subtype, PyObject* args, PyObject* kwargs) {
+	CompressionParametersObject* self;
+	unsigned windowLog;
+	unsigned chainLog;
+	unsigned hashLog;
+	unsigned searchLog;
+	unsigned searchLength;
+	unsigned targetLength;
+	unsigned strategy;
+
+	if (!PyArg_ParseTuple(args, "IIIIIII", &windowLog, &chainLog, &hashLog, &searchLog,
+		&searchLength, &targetLength, &strategy)) {
+		return NULL;
+	}
+
+	if (windowLog < ZSTD_WINDOWLOG_MIN || windowLog > ZSTD_WINDOWLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid window log value");
+		return NULL;
+	}
+
+	if (chainLog < ZSTD_CHAINLOG_MIN || chainLog > ZSTD_CHAINLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid chain log value");
+		return NULL;
+	}
+
+	if (hashLog < ZSTD_HASHLOG_MIN || hashLog > ZSTD_HASHLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid hash log value");
+		return NULL;
+	}
+
+	if (searchLog < ZSTD_SEARCHLOG_MIN || searchLog > ZSTD_SEARCHLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid search log value");
+		return NULL;
+	}
+
+	if (searchLength < ZSTD_SEARCHLENGTH_MIN || searchLength > ZSTD_SEARCHLENGTH_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid search length value");
+		return NULL;
+	}
+
+	if (targetLength < ZSTD_TARGETLENGTH_MIN || targetLength > ZSTD_TARGETLENGTH_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid target length value");
+		return NULL;
+	}
+
+	if (strategy < ZSTD_fast || strategy > ZSTD_btopt) {
+		PyErr_SetString(PyExc_ValueError, "invalid strategy value");
+		return NULL;
+	}
+
+	self = (CompressionParametersObject*)subtype->tp_alloc(subtype, 1);
+	if (!self) {
+		return NULL;
+	}
+
+	self->windowLog = windowLog;
+	self->chainLog = chainLog;
+	self->hashLog = hashLog;
+	self->searchLog = searchLog;
+	self->searchLength = searchLength;
+	self->targetLength = targetLength;
+	self->strategy = strategy;
+
+	return (PyObject*)self;
+}
+
+static void CompressionParameters_dealloc(PyObject* self) {
+	PyObject_Del(self);
+}
+
+static Py_ssize_t CompressionParameters_length(PyObject* self) {
+	return 7;
+}
+
+static PyObject* CompressionParameters_item(PyObject* o, Py_ssize_t i) {
+	CompressionParametersObject* self = (CompressionParametersObject*)o;
+
+	switch (i) {
+	case 0:
+		return PyLong_FromLong(self->windowLog);
+	case 1:
+		return PyLong_FromLong(self->chainLog);
+	case 2:
+		return PyLong_FromLong(self->hashLog);
+	case 3:
+		return PyLong_FromLong(self->searchLog);
+	case 4:
+		return PyLong_FromLong(self->searchLength);
+	case 5:
+		return PyLong_FromLong(self->targetLength);
+	case 6:
+		return PyLong_FromLong(self->strategy);
+	default:
+		PyErr_SetString(PyExc_IndexError, "index out of range");
+		return NULL;
+	}
+}
+
+static PySequenceMethods CompressionParameters_sq = {
+	CompressionParameters_length, /* sq_length */
+	0,							  /* sq_concat */
+	0,                            /* sq_repeat */
+	CompressionParameters_item,   /* sq_item */
+	0,                            /* sq_ass_item */
+	0,                            /* sq_contains */
+	0,                            /* sq_inplace_concat */
+	0                             /* sq_inplace_repeat */
+};
+
+PyTypeObject CompressionParametersType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"CompressionParameters", /* tp_name */
+	sizeof(CompressionParametersObject), /* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)CompressionParameters_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	&CompressionParameters_sq, /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash  */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	CompressionParameters__doc__, /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	0,                         /* tp_methods */
+	0,                         /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	0,                         /* tp_init */
+	0,                         /* tp_alloc */
+	CompressionParameters_new, /* tp_new */
+};
+
+void compressionparams_module_init(PyObject* mod) {
+	Py_TYPE(&CompressionParametersType) = &PyType_Type;
+	if (PyType_Ready(&CompressionParametersType) < 0) {
+		return;
+	}
+
+	Py_IncRef((PyObject*)&CompressionParametersType);
+	PyModule_AddObject(mod, "CompressionParameters",
+		(PyObject*)&CompressionParametersType);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/compressionwriter.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,288 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(ZstdCompresssionWriter__doc__,
+"""A context manager used for writing compressed output to a writer.\n"
+);
+
+static void ZstdCompressionWriter_dealloc(ZstdCompressionWriter* self) {
+	Py_XDECREF(self->compressor);
+	Py_XDECREF(self->writer);
+
+	if (self->cstream) {
+		ZSTD_freeCStream(self->cstream);
+		self->cstream = NULL;
+	}
+
+	PyObject_Del(self);
+}
+
+static PyObject* ZstdCompressionWriter_enter(ZstdCompressionWriter* self) {
+	if (self->entered) {
+		PyErr_SetString(ZstdError, "cannot __enter__ multiple times");
+		return NULL;
+	}
+
+	self->cstream = CStream_from_ZstdCompressor(self->compressor, self->sourceSize);
+	if (!self->cstream) {
+		return NULL;
+	}
+
+	self->entered = 1;
+
+	Py_INCREF(self);
+	return (PyObject*)self;
+}
+
+static PyObject* ZstdCompressionWriter_exit(ZstdCompressionWriter* self, PyObject* args) {
+	PyObject* exc_type;
+	PyObject* exc_value;
+	PyObject* exc_tb;
+	size_t zresult;
+
+	ZSTD_outBuffer output;
+	PyObject* res;
+
+	if (!PyArg_ParseTuple(args, "OOO", &exc_type, &exc_value, &exc_tb)) {
+		return NULL;
+	}
+
+	self->entered = 0;
+
+	if (self->cstream && exc_type == Py_None && exc_value == Py_None &&
+		exc_tb == Py_None) {
+
+		output.dst = PyMem_Malloc(self->outSize);
+		if (!output.dst) {
+			return PyErr_NoMemory();
+		}
+		output.size = self->outSize;
+		output.pos = 0;
+
+		while (1) {
+			zresult = ZSTD_endStream(self->cstream, &output);
+			if (ZSTD_isError(zresult)) {
+				PyErr_Format(ZstdError, "error ending compression stream: %s",
+					ZSTD_getErrorName(zresult));
+				PyMem_Free(output.dst);
+				return NULL;
+			}
+
+			if (output.pos) {
+#if PY_MAJOR_VERSION >= 3
+				res = PyObject_CallMethod(self->writer, "write", "y#",
+#else
+				res = PyObject_CallMethod(self->writer, "write", "s#",
+#endif
+					output.dst, output.pos);
+				Py_XDECREF(res);
+			}
+
+			if (!zresult) {
+				break;
+			}
+
+			output.pos = 0;
+		}
+
+		PyMem_Free(output.dst);
+		ZSTD_freeCStream(self->cstream);
+		self->cstream = NULL;
+	}
+
+	Py_RETURN_FALSE;
+}
+
+static PyObject* ZstdCompressionWriter_memory_size(ZstdCompressionWriter* self) {
+	if (!self->cstream) {
+		PyErr_SetString(ZstdError, "cannot determine size of an inactive compressor; "
+			"call when a context manager is active");
+		return NULL;
+	}
+
+	return PyLong_FromSize_t(ZSTD_sizeof_CStream(self->cstream));
+}
+
+static PyObject* ZstdCompressionWriter_write(ZstdCompressionWriter* self, PyObject* args) {
+	const char* source;
+	Py_ssize_t sourceSize;
+	size_t zresult;
+	ZSTD_inBuffer input;
+	ZSTD_outBuffer output;
+	PyObject* res;
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+#else
+	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+#endif
+		return NULL;
+	}
+
+	if (!self->entered) {
+		PyErr_SetString(ZstdError, "compress must be called from an active context manager");
+		return NULL;
+	}
+
+	output.dst = PyMem_Malloc(self->outSize);
+	if (!output.dst) {
+		return PyErr_NoMemory();
+	}
+	output.size = self->outSize;
+	output.pos = 0;
+
+	input.src = source;
+	input.size = sourceSize;
+	input.pos = 0;
+
+	while ((ssize_t)input.pos < sourceSize) {
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_compressStream(self->cstream, &output, &input);
+		Py_END_ALLOW_THREADS
+
+		if (ZSTD_isError(zresult)) {
+			PyMem_Free(output.dst);
+			PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		/* Copy data from output buffer to writer. */
+		if (output.pos) {
+#if PY_MAJOR_VERSION >= 3
+			res = PyObject_CallMethod(self->writer, "write", "y#",
+#else
+			res = PyObject_CallMethod(self->writer, "write", "s#",
+#endif
+				output.dst, output.pos);
+			Py_XDECREF(res);
+		}
+		output.pos = 0;
+	}
+
+	PyMem_Free(output.dst);
+
+	/* TODO return bytes written */
+	Py_RETURN_NONE;
+}
+
+static PyObject* ZstdCompressionWriter_flush(ZstdCompressionWriter* self, PyObject* args) {
+	size_t zresult;
+	ZSTD_outBuffer output;
+	PyObject* res;
+
+	if (!self->entered) {
+		PyErr_SetString(ZstdError, "flush must be called from an active context manager");
+		return NULL;
+	}
+
+	output.dst = PyMem_Malloc(self->outSize);
+	if (!output.dst) {
+		return PyErr_NoMemory();
+	}
+	output.size = self->outSize;
+	output.pos = 0;
+
+	while (1) {
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_flushStream(self->cstream, &output);
+		Py_END_ALLOW_THREADS
+
+		if (ZSTD_isError(zresult)) {
+			PyMem_Free(output.dst);
+			PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		if (!output.pos) {
+			break;
+		}
+
+		/* Copy data from output buffer to writer. */
+		if (output.pos) {
+#if PY_MAJOR_VERSION >= 3
+			res = PyObject_CallMethod(self->writer, "write", "y#",
+#else
+			res = PyObject_CallMethod(self->writer, "write", "s#",
+#endif
+				output.dst, output.pos);
+			Py_XDECREF(res);
+		}
+		output.pos = 0;
+	}
+
+	PyMem_Free(output.dst);
+
+	/* TODO return bytes written */
+	Py_RETURN_NONE;
+}
+
+static PyMethodDef ZstdCompressionWriter_methods[] = {
+	{ "__enter__", (PyCFunction)ZstdCompressionWriter_enter, METH_NOARGS,
+	PyDoc_STR("Enter a compression context.") },
+	{ "__exit__", (PyCFunction)ZstdCompressionWriter_exit, METH_VARARGS,
+	PyDoc_STR("Exit a compression context.") },
+	{ "memory_size", (PyCFunction)ZstdCompressionWriter_memory_size, METH_NOARGS,
+	PyDoc_STR("Obtain the memory size of the underlying compressor") },
+	{ "write", (PyCFunction)ZstdCompressionWriter_write, METH_VARARGS,
+	PyDoc_STR("Compress data") },
+	{ "flush", (PyCFunction)ZstdCompressionWriter_flush, METH_NOARGS,
+	PyDoc_STR("Flush data and finish a zstd frame") },
+	{ NULL, NULL }
+};
+
+PyTypeObject ZstdCompressionWriterType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdCompressionWriter",  /* tp_name */
+	sizeof(ZstdCompressionWriter),  /* tp_basicsize */
+	0,                              /* tp_itemsize */
+	(destructor)ZstdCompressionWriter_dealloc, /* tp_dealloc */
+	0,                              /* tp_print */
+	0,                              /* tp_getattr */
+	0,                              /* tp_setattr */
+	0,                              /* tp_compare */
+	0,                              /* tp_repr */
+	0,                              /* tp_as_number */
+	0,                              /* tp_as_sequence */
+	0,                              /* tp_as_mapping */
+	0,                              /* tp_hash */
+	0,                              /* tp_call */
+	0,                              /* tp_str */
+	0,                              /* tp_getattro */
+	0,                              /* tp_setattro */
+	0,                              /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdCompresssionWriter__doc__,  /* tp_doc */
+	0,                              /* tp_traverse */
+	0,                              /* tp_clear */
+	0,                              /* tp_richcompare */
+	0,                              /* tp_weaklistoffset */
+	0,                              /* tp_iter */
+	0,                              /* tp_iternext */
+	ZstdCompressionWriter_methods,  /* tp_methods */
+	0,                              /* tp_members */
+	0,                              /* tp_getset */
+	0,                              /* tp_base */
+	0,                              /* tp_dict */
+	0,                              /* tp_descr_get */
+	0,                              /* tp_descr_set */
+	0,                              /* tp_dictoffset */
+	0,                              /* tp_init */
+	0,                              /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+void compressionwriter_module_init(PyObject* mod) {
+	Py_TYPE(&ZstdCompressionWriterType) = &PyType_Type;
+	if (PyType_Ready(&ZstdCompressionWriterType) < 0) {
+		return;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/compressobj.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,250 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(ZstdCompressionObj__doc__,
+"Perform compression using a standard library compatible API.\n"
+);
+
+static void ZstdCompressionObj_dealloc(ZstdCompressionObj* self) {
+	PyMem_Free(self->output.dst);
+	self->output.dst = NULL;
+
+	if (self->cstream) {
+		ZSTD_freeCStream(self->cstream);
+		self->cstream = NULL;
+	}
+
+	Py_XDECREF(self->compressor);
+
+	PyObject_Del(self);
+}
+
+static PyObject* ZstdCompressionObj_compress(ZstdCompressionObj* self, PyObject* args) {
+	const char* source;
+	Py_ssize_t sourceSize;
+	ZSTD_inBuffer input;
+	size_t zresult;
+	PyObject* result = NULL;
+	Py_ssize_t resultSize = 0;
+
+	if (self->finished) {
+		PyErr_SetString(ZstdError, "cannot call compress() after compressor finished");
+		return NULL;
+	}
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+#else
+	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+#endif
+		return NULL;
+	}
+
+	input.src = source;
+	input.size = sourceSize;
+	input.pos = 0;
+
+	while ((ssize_t)input.pos < sourceSize) {
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_compressStream(self->cstream, &self->output, &input);
+		Py_END_ALLOW_THREADS
+
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		if (self->output.pos) {
+			if (result) {
+				resultSize = PyBytes_GET_SIZE(result);
+				if (-1 == _PyBytes_Resize(&result, resultSize + self->output.pos)) {
+					return NULL;
+				}
+
+				memcpy(PyBytes_AS_STRING(result) + resultSize,
+					self->output.dst, self->output.pos);
+			}
+			else {
+				result = PyBytes_FromStringAndSize(self->output.dst, self->output.pos);
+				if (!result) {
+					return NULL;
+				}
+			}
+
+			self->output.pos = 0;
+		}
+	}
+
+	if (result) {
+		return result;
+	}
+	else {
+		return PyBytes_FromString("");
+	}
+}
+
+static PyObject* ZstdCompressionObj_flush(ZstdCompressionObj* self, PyObject* args) {
+	int flushMode = compressorobj_flush_finish;
+	size_t zresult;
+	PyObject* result = NULL;
+	Py_ssize_t resultSize = 0;
+
+	if (!PyArg_ParseTuple(args, "|i", &flushMode)) {
+		return NULL;
+	}
+
+	if (flushMode != compressorobj_flush_finish && flushMode != compressorobj_flush_block) {
+		PyErr_SetString(PyExc_ValueError, "flush mode not recognized");
+		return NULL;
+	}
+
+	if (self->finished) {
+		PyErr_SetString(ZstdError, "compressor object already finished");
+		return NULL;
+	}
+
+	assert(self->output.pos == 0);
+
+	if (flushMode == compressorobj_flush_block) {
+		/* The output buffer is of size ZSTD_CStreamOutSize(), which is 
+		   guaranteed to hold a full block. */
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_flushStream(self->cstream, &self->output);
+		Py_END_ALLOW_THREADS
+
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		/* Output buffer is guaranteed to hold full block. */
+		assert(zresult == 0);
+
+		if (self->output.pos) {
+			result = PyBytes_FromStringAndSize(self->output.dst, self->output.pos);
+			if (!result) {
+				return NULL;
+			}
+		}
+
+		self->output.pos = 0;
+
+		if (result) {
+			return result;
+		}
+		else {
+			return PyBytes_FromString("");
+		}
+	}
+
+	assert(flushMode == compressorobj_flush_finish);
+	self->finished = 1;
+
+	while (1) {
+		zresult = ZSTD_endStream(self->cstream, &self->output);
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "error ending compression stream: %s",
+				ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		if (self->output.pos) {
+			if (result) {
+				resultSize = PyBytes_GET_SIZE(result);
+				if (-1 == _PyBytes_Resize(&result, resultSize + self->output.pos)) {
+					return NULL;
+				}
+
+				memcpy(PyBytes_AS_STRING(result) + resultSize,
+					self->output.dst, self->output.pos);
+			}
+			else {
+				result = PyBytes_FromStringAndSize(self->output.dst, self->output.pos);
+				if (!result) {
+					return NULL;
+				}
+			}
+
+			self->output.pos = 0;
+		}
+
+		if (!zresult) {
+			break;
+		}
+	}
+
+	ZSTD_freeCStream(self->cstream);
+	self->cstream = NULL;
+
+	if (result) {
+		return result;
+	}
+	else {
+		return PyBytes_FromString("");
+	}
+}
+
+static PyMethodDef ZstdCompressionObj_methods[] = {
+	{ "compress", (PyCFunction)ZstdCompressionObj_compress, METH_VARARGS,
+	PyDoc_STR("compress data") },
+	{ "flush", (PyCFunction)ZstdCompressionObj_flush, METH_VARARGS,
+	PyDoc_STR("finish compression operation") },
+	{ NULL, NULL }
+};
+
+PyTypeObject ZstdCompressionObjType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdCompressionObj",      /* tp_name */
+	sizeof(ZstdCompressionObj),     /* tp_basicsize */
+	0,                              /* tp_itemsize */
+	(destructor)ZstdCompressionObj_dealloc, /* tp_dealloc */
+	0,                              /* tp_print */
+	0,                              /* tp_getattr */
+	0,                              /* tp_setattr */
+	0,                              /* tp_compare */
+	0,                              /* tp_repr */
+	0,                              /* tp_as_number */
+	0,                              /* tp_as_sequence */
+	0,                              /* tp_as_mapping */
+	0,                              /* tp_hash */
+	0,                              /* tp_call */
+	0,                              /* tp_str */
+	0,                              /* tp_getattro */
+	0,                              /* tp_setattro */
+	0,                              /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdCompressionObj__doc__,      /* tp_doc */
+	0,                              /* tp_traverse */
+	0,                              /* tp_clear */
+	0,                              /* tp_richcompare */
+	0,                              /* tp_weaklistoffset */
+	0,                              /* tp_iter */
+	0,                              /* tp_iternext */
+	ZstdCompressionObj_methods,     /* tp_methods */
+	0,                              /* tp_members */
+	0,                              /* tp_getset */
+	0,                              /* tp_base */
+	0,                              /* tp_dict */
+	0,                              /* tp_descr_get */
+	0,                              /* tp_descr_set */
+	0,                              /* tp_dictoffset */
+	0,                              /* tp_init */
+	0,                              /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+void compressobj_module_init(PyObject* module) {
+	Py_TYPE(&ZstdCompressionObjType) = &PyType_Type;
+	if (PyType_Ready(&ZstdCompressionObjType) < 0) {
+		return;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/compressor.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,791 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+int populate_cdict(ZstdCompressor* compressor, void* dictData, size_t dictSize, ZSTD_parameters* zparams) {
+	ZSTD_customMem zmem;
+	assert(!compressor->cdict);
+	Py_BEGIN_ALLOW_THREADS
+	memset(&zmem, 0, sizeof(zmem));
+	compressor->cdict = ZSTD_createCDict_advanced(compressor->dict->dictData,
+		compressor->dict->dictSize, *zparams, zmem);
+	Py_END_ALLOW_THREADS
+
+	if (!compressor->cdict) {
+		PyErr_SetString(ZstdError, "could not create compression dictionary");
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+* Initialize a zstd CStream from a ZstdCompressor instance.
+*
+* Returns a ZSTD_CStream on success or NULL on failure. If NULL, a Python
+* exception will be set.
+*/
+ZSTD_CStream* CStream_from_ZstdCompressor(ZstdCompressor* compressor, Py_ssize_t sourceSize) {
+	ZSTD_CStream* cstream;
+	ZSTD_parameters zparams;
+	void* dictData = NULL;
+	size_t dictSize = 0;
+	size_t zresult;
+
+	cstream = ZSTD_createCStream();
+	if (!cstream) {
+		PyErr_SetString(ZstdError, "cannot create CStream");
+		return NULL;
+	}
+
+	if (compressor->dict) {
+		dictData = compressor->dict->dictData;
+		dictSize = compressor->dict->dictSize;
+	}
+
+	memset(&zparams, 0, sizeof(zparams));
+	if (compressor->cparams) {
+		ztopy_compression_parameters(compressor->cparams, &zparams.cParams);
+		/* Do NOT call ZSTD_adjustCParams() here because the compression params
+		come from the user. */
+	}
+	else {
+		zparams.cParams = ZSTD_getCParams(compressor->compressionLevel, sourceSize, dictSize);
+	}
+
+	zparams.fParams = compressor->fparams;
+
+	zresult = ZSTD_initCStream_advanced(cstream, dictData, dictSize, zparams, sourceSize);
+
+	if (ZSTD_isError(zresult)) {
+		ZSTD_freeCStream(cstream);
+		PyErr_Format(ZstdError, "cannot init CStream: %s", ZSTD_getErrorName(zresult));
+		return NULL;
+	}
+
+	return cstream;
+}
+
+PyDoc_STRVAR(ZstdCompressor__doc__,
+"ZstdCompressor(level=None, dict_data=None, compression_params=None)\n"
+"\n"
+"Create an object used to perform Zstandard compression.\n"
+"\n"
+"An instance can compress data various ways. Instances can be used multiple\n"
+"times. Each compression operation will use the compression parameters\n"
+"defined at construction time.\n"
+"\n"
+"Compression can be configured via the following names arguments:\n"
+"\n"
+"level\n"
+"   Integer compression level.\n"
+"dict_data\n"
+"   A ``ZstdCompressionDict`` to be used to compress with dictionary data.\n"
+"compression_params\n"
+"   A ``CompressionParameters`` instance defining low-level compression"
+"   parameters. If defined, this will overwrite the ``level`` argument.\n"
+"write_checksum\n"
+"   If True, a 4 byte content checksum will be written with the compressed\n"
+"   data, allowing the decompressor to perform content verification.\n"
+"write_content_size\n"
+"   If True, the decompressed content size will be included in the header of\n"
+"   the compressed data. This data will only be written if the compressor\n"
+"   knows the size of the input data.\n"
+"write_dict_id\n"
+"   Determines whether the dictionary ID will be written into the compressed\n"
+"   data. Defaults to True. Only adds content to the compressed data if\n"
+"   a dictionary is being used.\n"
+);
+
+static int ZstdCompressor_init(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"level",
+		"dict_data",
+		"compression_params",
+		"write_checksum",
+		"write_content_size",
+		"write_dict_id",
+		NULL
+	};
+
+	int level = 3;
+	ZstdCompressionDict* dict = NULL;
+	CompressionParametersObject* params = NULL;
+	PyObject* writeChecksum = NULL;
+	PyObject* writeContentSize = NULL;
+	PyObject* writeDictID = NULL;
+
+	self->cctx = NULL;
+	self->dict = NULL;
+	self->cparams = NULL;
+	self->cdict = NULL;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO!O!OOO", kwlist,
+		&level, &ZstdCompressionDictType, &dict,
+		&CompressionParametersType, &params,
+		&writeChecksum, &writeContentSize, &writeDictID)) {
+		return -1;
+	}
+
+	if (level < 1) {
+		PyErr_SetString(PyExc_ValueError, "level must be greater than 0");
+		return -1;
+	}
+
+	if (level > ZSTD_maxCLevel()) {
+		PyErr_Format(PyExc_ValueError, "level must be less than %d",
+			ZSTD_maxCLevel() + 1);
+		return -1;
+	}
+
+	/* We create a ZSTD_CCtx for reuse among multiple operations to reduce the
+	   overhead of each compression operation. */
+	self->cctx = ZSTD_createCCtx();
+	if (!self->cctx) {
+		PyErr_NoMemory();
+		return -1;
+	}
+
+	self->compressionLevel = level;
+
+	if (dict) {
+		self->dict = dict;
+		Py_INCREF(dict);
+	}
+
+	if (params) {
+		self->cparams = params;
+		Py_INCREF(params);
+	}
+
+	memset(&self->fparams, 0, sizeof(self->fparams));
+
+	if (writeChecksum && PyObject_IsTrue(writeChecksum)) {
+		self->fparams.checksumFlag = 1;
+	}
+	if (writeContentSize && PyObject_IsTrue(writeContentSize)) {
+		self->fparams.contentSizeFlag = 1;
+	}
+	if (writeDictID && PyObject_Not(writeDictID)) {
+		self->fparams.noDictIDFlag = 1;
+	}
+
+	return 0;
+}
+
+static void ZstdCompressor_dealloc(ZstdCompressor* self) {
+	Py_XDECREF(self->cparams);
+	Py_XDECREF(self->dict);
+
+	if (self->cdict) {
+		ZSTD_freeCDict(self->cdict);
+		self->cdict = NULL;
+	}
+
+	if (self->cctx) {
+		ZSTD_freeCCtx(self->cctx);
+		self->cctx = NULL;
+	}
+
+	PyObject_Del(self);
+}
+
+PyDoc_STRVAR(ZstdCompressor_copy_stream__doc__,
+"copy_stream(ifh, ofh[, size=0, read_size=default, write_size=default])\n"
+"compress data between streams\n"
+"\n"
+"Data will be read from ``ifh``, compressed, and written to ``ofh``.\n"
+"``ifh`` must have a ``read(size)`` method. ``ofh`` must have a ``write(data)``\n"
+"method.\n"
+"\n"
+"An optional ``size`` argument specifies the size of the source stream.\n"
+"If defined, compression parameters will be tuned based on the size.\n"
+"\n"
+"Optional arguments ``read_size`` and ``write_size`` define the chunk sizes\n"
+"of ``read()`` and ``write()`` operations, respectively. By default, they use\n"
+"the default compression stream input and output sizes, respectively.\n"
+);
+
+static PyObject* ZstdCompressor_copy_stream(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"ifh",
+		"ofh",
+		"size",
+		"read_size",
+		"write_size",
+		NULL
+	};
+
+	PyObject* source;
+	PyObject* dest;
+	Py_ssize_t sourceSize = 0;
+	size_t inSize = ZSTD_CStreamInSize();
+	size_t outSize = ZSTD_CStreamOutSize();
+	ZSTD_CStream* cstream;
+	ZSTD_inBuffer input;
+	ZSTD_outBuffer output;
+	Py_ssize_t totalRead = 0;
+	Py_ssize_t totalWrite = 0;
+	char* readBuffer;
+	Py_ssize_t readSize;
+	PyObject* readResult;
+	PyObject* res = NULL;
+	size_t zresult;
+	PyObject* writeResult;
+	PyObject* totalReadPy;
+	PyObject* totalWritePy;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|nkk", kwlist, &source, &dest, &sourceSize,
+		&inSize, &outSize)) {
+		return NULL;
+	}
+
+	if (!PyObject_HasAttrString(source, "read")) {
+		PyErr_SetString(PyExc_ValueError, "first argument must have a read() method");
+		return NULL;
+	}
+
+	if (!PyObject_HasAttrString(dest, "write")) {
+		PyErr_SetString(PyExc_ValueError, "second argument must have a write() method");
+		return NULL;
+	}
+
+	/* Prevent free on uninitialized memory in finally. */
+	output.dst = NULL;
+
+	cstream = CStream_from_ZstdCompressor(self, sourceSize);
+	if (!cstream) {
+		res = NULL;
+		goto finally;
+	}
+
+	output.dst = PyMem_Malloc(outSize);
+	if (!output.dst) {
+		PyErr_NoMemory();
+		res = NULL;
+		goto finally;
+	}
+	output.size = outSize;
+	output.pos = 0;
+
+	while (1) {
+		/* Try to read from source stream. */
+		readResult = PyObject_CallMethod(source, "read", "n", inSize);
+		if (!readResult) {
+			PyErr_SetString(ZstdError, "could not read() from source");
+			goto finally;
+		}
+
+		PyBytes_AsStringAndSize(readResult, &readBuffer, &readSize);
+
+		/* If no data was read, we're at EOF. */
+		if (0 == readSize) {
+			break;
+		}
+
+		totalRead += readSize;
+
+		/* Send data to compressor */
+		input.src = readBuffer;
+		input.size = readSize;
+		input.pos = 0;
+
+		while (input.pos < input.size) {
+			Py_BEGIN_ALLOW_THREADS
+			zresult = ZSTD_compressStream(cstream, &output, &input);
+			Py_END_ALLOW_THREADS
+
+			if (ZSTD_isError(zresult)) {
+				res = NULL;
+				PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+				goto finally;
+			}
+
+			if (output.pos) {
+#if PY_MAJOR_VERSION >= 3
+				writeResult = PyObject_CallMethod(dest, "write", "y#",
+#else
+				writeResult = PyObject_CallMethod(dest, "write", "s#",
+#endif
+					output.dst, output.pos);
+				Py_XDECREF(writeResult);
+				totalWrite += output.pos;
+				output.pos = 0;
+			}
+		}
+	}
+
+	/* We've finished reading. Now flush the compressor stream. */
+	while (1) {
+		zresult = ZSTD_endStream(cstream, &output);
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "error ending compression stream: %s",
+				ZSTD_getErrorName(zresult));
+			res = NULL;
+			goto finally;
+		}
+
+		if (output.pos) {
+#if PY_MAJOR_VERSION >= 3
+			writeResult = PyObject_CallMethod(dest, "write", "y#",
+#else
+			writeResult = PyObject_CallMethod(dest, "write", "s#",
+#endif
+				output.dst, output.pos);
+			totalWrite += output.pos;
+			Py_XDECREF(writeResult);
+			output.pos = 0;
+		}
+
+		if (!zresult) {
+			break;
+		}
+	}
+
+	ZSTD_freeCStream(cstream);
+	cstream = NULL;
+
+	totalReadPy = PyLong_FromSsize_t(totalRead);
+	totalWritePy = PyLong_FromSsize_t(totalWrite);
+	res = PyTuple_Pack(2, totalReadPy, totalWritePy);
+	Py_DecRef(totalReadPy);
+	Py_DecRef(totalWritePy);
+
+finally:
+	if (output.dst) {
+		PyMem_Free(output.dst);
+	}
+
+	if (cstream) {
+		ZSTD_freeCStream(cstream);
+	}
+
+	return res;
+}
+
+PyDoc_STRVAR(ZstdCompressor_compress__doc__,
+"compress(data, allow_empty=False)\n"
+"\n"
+"Compress data in a single operation.\n"
+"\n"
+"This is the simplest mechanism to perform compression: simply pass in a\n"
+"value and get a compressed value back. It is almost the most prone to abuse.\n"
+"The input and output values must fit in memory, so passing in very large\n"
+"values can result in excessive memory usage. For this reason, one of the\n"
+"streaming based APIs is preferred for larger values.\n"
+);
+
+static PyObject* ZstdCompressor_compress(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"data",
+		"allow_empty",
+		NULL
+	};
+
+	const char* source;
+	Py_ssize_t sourceSize;
+	PyObject* allowEmpty = NULL;
+	size_t destSize;
+	PyObject* output;
+	char* dest;
+	void* dictData = NULL;
+	size_t dictSize = 0;
+	size_t zresult;
+	ZSTD_parameters zparams;
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|O",
+#else
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|O",
+#endif
+		kwlist, &source, &sourceSize, &allowEmpty)) {
+		return NULL;
+	}
+
+	/* Limitation in zstd C API doesn't let decompression side distinguish
+	   between content size of 0 and unknown content size. This can make round
+	   tripping via Python difficult. Until this is fixed, require a flag
+	   to fire the footgun.
+	   https://github.com/indygreg/python-zstandard/issues/11 */
+	if (0 == sourceSize && self->fparams.contentSizeFlag
+		&& (!allowEmpty || PyObject_Not(allowEmpty))) {
+		PyErr_SetString(PyExc_ValueError, "cannot write empty inputs when writing content sizes");
+		return NULL;
+	}
+
+	destSize = ZSTD_compressBound(sourceSize);
+	output = PyBytes_FromStringAndSize(NULL, destSize);
+	if (!output) {
+		return NULL;
+	}
+
+	dest = PyBytes_AsString(output);
+
+	if (self->dict) {
+		dictData = self->dict->dictData;
+		dictSize = self->dict->dictSize;
+	}
+
+	memset(&zparams, 0, sizeof(zparams));
+	if (!self->cparams) {
+		zparams.cParams = ZSTD_getCParams(self->compressionLevel, sourceSize, dictSize);
+	}
+	else {
+		ztopy_compression_parameters(self->cparams, &zparams.cParams);
+		/* Do NOT call ZSTD_adjustCParams() here because the compression params
+		come from the user. */
+	}
+
+	zparams.fParams = self->fparams;
+
+	/* The raw dict data has to be processed before it can be used. Since this
+	adds overhead - especially if multiple dictionary compression operations
+	are performed on the same ZstdCompressor instance - we create a
+	ZSTD_CDict once and reuse it for all operations.
+
+	Note: the compression parameters used for the first invocation (possibly
+	derived from the source size) will be reused on all subsequent invocations.
+	https://github.com/facebook/zstd/issues/358 contains more info. We could
+	potentially add an argument somewhere to control this behavior.
+	*/
+	if (dictData && !self->cdict) {
+		if (populate_cdict(self, dictData, dictSize, &zparams)) {
+			Py_DECREF(output);
+			return NULL;
+		}
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	/* By avoiding ZSTD_compress(), we don't necessarily write out content
+	   size. This means the argument to ZstdCompressor to control frame
+	   parameters is honored. */
+	if (self->cdict) {
+		zresult = ZSTD_compress_usingCDict(self->cctx, dest, destSize,
+			source, sourceSize, self->cdict);
+	}
+	else {
+		zresult = ZSTD_compress_advanced(self->cctx, dest, destSize,
+			source, sourceSize, dictData, dictSize, zparams);
+	}
+	Py_END_ALLOW_THREADS
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "cannot compress: %s", ZSTD_getErrorName(zresult));
+		Py_CLEAR(output);
+		return NULL;
+	}
+	else {
+		Py_SIZE(output) = zresult;
+	}
+
+	return output;
+}
+
+PyDoc_STRVAR(ZstdCompressionObj__doc__,
+"compressobj()\n"
+"\n"
+"Return an object exposing ``compress(data)`` and ``flush()`` methods.\n"
+"\n"
+"The returned object exposes an API similar to ``zlib.compressobj`` and\n"
+"``bz2.BZ2Compressor`` so that callers can swap in the zstd compressor\n"
+"without changing how compression is performed.\n"
+);
+
+static ZstdCompressionObj* ZstdCompressor_compressobj(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"size",
+		NULL
+	};
+
+	Py_ssize_t inSize = 0;
+	size_t outSize = ZSTD_CStreamOutSize();
+	ZstdCompressionObj* result = PyObject_New(ZstdCompressionObj, &ZstdCompressionObjType);
+	if (!result) {
+		return NULL;
+	}
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|n", kwlist, &inSize)) {
+		return NULL;
+	}
+
+	result->cstream = CStream_from_ZstdCompressor(self, inSize);
+	if (!result->cstream) {
+		Py_DECREF(result);
+		return NULL;
+	}
+
+	result->output.dst = PyMem_Malloc(outSize);
+	if (!result->output.dst) {
+		PyErr_NoMemory();
+		Py_DECREF(result);
+		return NULL;
+	}
+	result->output.size = outSize;
+	result->output.pos = 0;
+
+	result->compressor = self;
+	Py_INCREF(result->compressor);
+
+	result->finished = 0;
+
+	return result;
+}
+
+PyDoc_STRVAR(ZstdCompressor_read_from__doc__,
+"read_from(reader, [size=0, read_size=default, write_size=default])\n"
+"Read uncompress data from a reader and return an iterator\n"
+"\n"
+"Returns an iterator of compressed data produced from reading from ``reader``.\n"
+"\n"
+"Uncompressed data will be obtained from ``reader`` by calling the\n"
+"``read(size)`` method of it. The source data will be streamed into a\n"
+"compressor. As compressed data is available, it will be exposed to the\n"
+"iterator.\n"
+"\n"
+"Data is read from the source in chunks of ``read_size``. Compressed chunks\n"
+"are at most ``write_size`` bytes. Both values default to the zstd input and\n"
+"and output defaults, respectively.\n"
+"\n"
+"The caller is partially in control of how fast data is fed into the\n"
+"compressor by how it consumes the returned iterator. The compressor will\n"
+"not consume from the reader unless the caller consumes from the iterator.\n"
+);
+
+static ZstdCompressorIterator* ZstdCompressor_read_from(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"reader",
+		"size",
+		"read_size",
+		"write_size",
+		NULL
+	};
+
+	PyObject* reader;
+	Py_ssize_t sourceSize = 0;
+	size_t inSize = ZSTD_CStreamInSize();
+	size_t outSize = ZSTD_CStreamOutSize();
+	ZstdCompressorIterator* result;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nkk", kwlist, &reader, &sourceSize,
+		&inSize, &outSize)) {
+		return NULL;
+	}
+
+	result = PyObject_New(ZstdCompressorIterator, &ZstdCompressorIteratorType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->compressor = NULL;
+	result->reader = NULL;
+	result->buffer = NULL;
+	result->cstream = NULL;
+	result->input.src = NULL;
+	result->output.dst = NULL;
+	result->readResult = NULL;
+
+	if (PyObject_HasAttrString(reader, "read")) {
+		result->reader = reader;
+		Py_INCREF(result->reader);
+	}
+	else if (1 == PyObject_CheckBuffer(reader)) {
+		result->buffer = PyMem_Malloc(sizeof(Py_buffer));
+		if (!result->buffer) {
+			goto except;
+		}
+
+		memset(result->buffer, 0, sizeof(Py_buffer));
+
+		if (0 != PyObject_GetBuffer(reader, result->buffer, PyBUF_CONTIG_RO)) {
+			goto except;
+		}
+
+		result->bufferOffset = 0;
+		sourceSize = result->buffer->len;
+	}
+	else {
+		PyErr_SetString(PyExc_ValueError,
+			"must pass an object with a read() method or conforms to buffer protocol");
+		goto except;
+	}
+
+	result->compressor = self;
+	Py_INCREF(result->compressor);
+
+	result->sourceSize = sourceSize;
+	result->cstream = CStream_from_ZstdCompressor(self, sourceSize);
+	if (!result->cstream) {
+		goto except;
+	}
+
+	result->inSize = inSize;
+	result->outSize = outSize;
+
+	result->output.dst = PyMem_Malloc(outSize);
+	if (!result->output.dst) {
+		PyErr_NoMemory();
+		goto except;
+	}
+	result->output.size = outSize;
+	result->output.pos = 0;
+
+	result->input.src = NULL;
+	result->input.size = 0;
+	result->input.pos = 0;
+
+	result->finishedInput = 0;
+	result->finishedOutput = 0;
+
+	goto finally;
+
+except:
+	if (result->cstream) {
+		ZSTD_freeCStream(result->cstream);
+		result->cstream = NULL;
+	}
+
+	Py_DecRef((PyObject*)result->compressor);
+	Py_DecRef(result->reader);
+
+	Py_DECREF(result);
+	result = NULL;
+
+finally:
+	return result;
+}
+
+PyDoc_STRVAR(ZstdCompressor_write_to___doc__,
+"Create a context manager to write compressed data to an object.\n"
+"\n"
+"The passed object must have a ``write()`` method.\n"
+"\n"
+"The caller feeds input data to the object by calling ``compress(data)``.\n"
+"Compressed data is written to the argument given to this function.\n"
+"\n"
+"The function takes an optional ``size`` argument indicating the total size\n"
+"of the eventual input. If specified, the size will influence compression\n"
+"parameter tuning and could result in the size being written into the\n"
+"header of the compressed data.\n"
+"\n"
+"An optional ``write_size`` argument is also accepted. It defines the maximum\n"
+"byte size of chunks fed to ``write()``. By default, it uses the zstd default\n"
+"for a compressor output stream.\n"
+);
+
+static ZstdCompressionWriter* ZstdCompressor_write_to(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"writer",
+		"size",
+		"write_size",
+		NULL
+	};
+
+	PyObject* writer;
+	ZstdCompressionWriter* result;
+	Py_ssize_t sourceSize = 0;
+	size_t outSize = ZSTD_CStreamOutSize();
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nk", kwlist, &writer, &sourceSize,
+		&outSize)) {
+		return NULL;
+	}
+
+	if (!PyObject_HasAttrString(writer, "write")) {
+		PyErr_SetString(PyExc_ValueError, "must pass an object with a write() method");
+		return NULL;
+	}
+
+	result = PyObject_New(ZstdCompressionWriter, &ZstdCompressionWriterType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->compressor = self;
+	Py_INCREF(result->compressor);
+
+	result->writer = writer;
+	Py_INCREF(result->writer);
+
+	result->sourceSize = sourceSize;
+
+	result->outSize = outSize;
+
+	result->entered = 0;
+	result->cstream = NULL;
+
+	return result;
+}
+
+static PyMethodDef ZstdCompressor_methods[] = {
+	{ "compress", (PyCFunction)ZstdCompressor_compress,
+	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_compress__doc__ },
+	{ "compressobj", (PyCFunction)ZstdCompressor_compressobj,
+	METH_VARARGS | METH_KEYWORDS, ZstdCompressionObj__doc__ },
+	{ "copy_stream", (PyCFunction)ZstdCompressor_copy_stream,
+	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_copy_stream__doc__ },
+	{ "read_from", (PyCFunction)ZstdCompressor_read_from,
+	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_read_from__doc__ },
+	{ "write_to", (PyCFunction)ZstdCompressor_write_to,
+	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_write_to___doc__ },
+	{ NULL, NULL }
+};
+
+PyTypeObject ZstdCompressorType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdCompressor",         /* tp_name */
+	sizeof(ZstdCompressor),        /* tp_basicsize */
+	0,                              /* tp_itemsize */
+	(destructor)ZstdCompressor_dealloc, /* tp_dealloc */
+	0,                              /* tp_print */
+	0,                              /* tp_getattr */
+	0,                              /* tp_setattr */
+	0,                              /* tp_compare */
+	0,                              /* tp_repr */
+	0,                              /* tp_as_number */
+	0,                              /* tp_as_sequence */
+	0,                              /* tp_as_mapping */
+	0,                              /* tp_hash */
+	0,                              /* tp_call */
+	0,                              /* tp_str */
+	0,                              /* tp_getattro */
+	0,                              /* tp_setattro */
+	0,                              /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdCompressor__doc__,          /* tp_doc */
+	0,                              /* tp_traverse */
+	0,                              /* tp_clear */
+	0,                              /* tp_richcompare */
+	0,                              /* tp_weaklistoffset */
+	0,                              /* tp_iter */
+	0,                              /* tp_iternext */
+	ZstdCompressor_methods,         /* tp_methods */
+	0,                              /* tp_members */
+	0,                              /* tp_getset */
+	0,                              /* tp_base */
+	0,                              /* tp_dict */
+	0,                              /* tp_descr_get */
+	0,                              /* tp_descr_set */
+	0,                              /* tp_dictoffset */
+	(initproc)ZstdCompressor_init,  /* tp_init */
+	0,                              /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+void compressor_module_init(PyObject* mod) {
+	Py_TYPE(&ZstdCompressorType) = &PyType_Type;
+	if (PyType_Ready(&ZstdCompressorType) < 0) {
+		return;
+	}
+
+	Py_INCREF((PyObject*)&ZstdCompressorType);
+	PyModule_AddObject(mod, "ZstdCompressor",
+		(PyObject*)&ZstdCompressorType);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/compressoriterator.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,234 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(ZstdCompressorIterator__doc__,
+"Represents an iterator of compressed data.\n"
+);
+
+static void ZstdCompressorIterator_dealloc(ZstdCompressorIterator* self) {
+	Py_XDECREF(self->readResult);
+	Py_XDECREF(self->compressor);
+	Py_XDECREF(self->reader);
+
+	if (self->buffer) {
+		PyBuffer_Release(self->buffer);
+		PyMem_FREE(self->buffer);
+		self->buffer = NULL;
+	}
+
+	if (self->cstream) {
+		ZSTD_freeCStream(self->cstream);
+		self->cstream = NULL;
+	}
+
+	if (self->output.dst) {
+		PyMem_Free(self->output.dst);
+		self->output.dst = NULL;
+	}
+
+	PyObject_Del(self);
+}
+
+static PyObject* ZstdCompressorIterator_iter(PyObject* self) {
+	Py_INCREF(self);
+	return self;
+}
+
+static PyObject* ZstdCompressorIterator_iternext(ZstdCompressorIterator* self) {
+	size_t zresult;
+	PyObject* readResult = NULL;
+	PyObject* chunk;
+	char* readBuffer;
+	Py_ssize_t readSize = 0;
+	Py_ssize_t bufferRemaining;
+
+	if (self->finishedOutput) {
+		PyErr_SetString(PyExc_StopIteration, "output flushed");
+		return NULL;
+	}
+
+feedcompressor:
+
+	/* If we have data left in the input, consume it. */
+	if (self->input.pos < self->input.size) {
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_compressStream(self->cstream, &self->output, &self->input);
+		Py_END_ALLOW_THREADS
+
+		/* Release the Python object holding the input buffer. */
+		if (self->input.pos == self->input.size) {
+			self->input.src = NULL;
+			self->input.pos = 0;
+			self->input.size = 0;
+			Py_DECREF(self->readResult);
+			self->readResult = NULL;
+		}
+
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		/* If it produced output data, emit it. */
+		if (self->output.pos) {
+			chunk = PyBytes_FromStringAndSize(self->output.dst, self->output.pos);
+			self->output.pos = 0;
+			return chunk;
+		}
+	}
+
+	/* We should never have output data sitting around after a previous call. */
+	assert(self->output.pos == 0);
+
+	/* The code above should have either emitted a chunk and returned or consumed
+	the entire input buffer. So the state of the input buffer is not
+	relevant. */
+	if (!self->finishedInput) {
+		if (self->reader) {
+			readResult = PyObject_CallMethod(self->reader, "read", "I", self->inSize);
+			if (!readResult) {
+				PyErr_SetString(ZstdError, "could not read() from source");
+				return NULL;
+			}
+
+			PyBytes_AsStringAndSize(readResult, &readBuffer, &readSize);
+		}
+		else {
+			assert(self->buffer && self->buffer->buf);
+
+			/* Only support contiguous C arrays. */
+			assert(self->buffer->strides == NULL && self->buffer->suboffsets == NULL);
+			assert(self->buffer->itemsize == 1);
+
+			readBuffer = (char*)self->buffer->buf + self->bufferOffset;
+			bufferRemaining = self->buffer->len - self->bufferOffset;
+			readSize = min(bufferRemaining, (Py_ssize_t)self->inSize);
+			self->bufferOffset += readSize;
+		}
+
+		if (0 == readSize) {
+			Py_XDECREF(readResult);
+			self->finishedInput = 1;
+		}
+		else {
+			self->readResult = readResult;
+		}
+	}
+
+	/* EOF */
+	if (0 == readSize) {
+		zresult = ZSTD_endStream(self->cstream, &self->output);
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "error ending compression stream: %s",
+				ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		assert(self->output.pos);
+
+		if (0 == zresult) {
+			self->finishedOutput = 1;
+		}
+
+		chunk = PyBytes_FromStringAndSize(self->output.dst, self->output.pos);
+		self->output.pos = 0;
+		return chunk;
+	}
+
+	/* New data from reader. Feed into compressor. */
+	self->input.src = readBuffer;
+	self->input.size = readSize;
+	self->input.pos = 0;
+
+	Py_BEGIN_ALLOW_THREADS
+	zresult = ZSTD_compressStream(self->cstream, &self->output, &self->input);
+	Py_END_ALLOW_THREADS
+
+	/* The input buffer currently points to memory managed by Python
+	(readBuffer). This object was allocated by this function. If it wasn't
+	fully consumed, we need to release it in a subsequent function call.
+	If it is fully consumed, do that now.
+	*/
+	if (self->input.pos == self->input.size) {
+		self->input.src = NULL;
+		self->input.pos = 0;
+		self->input.size = 0;
+		Py_XDECREF(self->readResult);
+		self->readResult = NULL;
+	}
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+		return NULL;
+	}
+
+	assert(self->input.pos <= self->input.size);
+
+	/* If we didn't write anything, start the process over. */
+	if (0 == self->output.pos) {
+		goto feedcompressor;
+	}
+
+	chunk = PyBytes_FromStringAndSize(self->output.dst, self->output.pos);
+	self->output.pos = 0;
+	return chunk;
+}
+
+PyTypeObject ZstdCompressorIteratorType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdCompressorIterator",   /* tp_name */
+	sizeof(ZstdCompressorIterator),  /* tp_basicsize */
+	0,                               /* tp_itemsize */
+	(destructor)ZstdCompressorIterator_dealloc, /* tp_dealloc */
+	0,                               /* tp_print */
+	0,                               /* tp_getattr */
+	0,                               /* tp_setattr */
+	0,                               /* tp_compare */
+	0,                               /* tp_repr */
+	0,                               /* tp_as_number */
+	0,                               /* tp_as_sequence */
+	0,                               /* tp_as_mapping */
+	0,                               /* tp_hash */
+	0,                               /* tp_call */
+	0,                               /* tp_str */
+	0,                               /* tp_getattro */
+	0,                               /* tp_setattro */
+	0,                               /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdCompressorIterator__doc__,   /* tp_doc */
+	0,                               /* tp_traverse */
+	0,                               /* tp_clear */
+	0,                               /* tp_richcompare */
+	0,                               /* tp_weaklistoffset */
+	ZstdCompressorIterator_iter,     /* tp_iter */
+	(iternextfunc)ZstdCompressorIterator_iternext, /* tp_iternext */
+	0,                               /* tp_methods */
+	0,                               /* tp_members */
+	0,                               /* tp_getset */
+	0,                               /* tp_base */
+	0,                               /* tp_dict */
+	0,                               /* tp_descr_get */
+	0,                               /* tp_descr_set */
+	0,                               /* tp_dictoffset */
+	0,                               /* tp_init */
+	0,                               /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+void compressoriterator_module_init(PyObject* mod) {
+	Py_TYPE(&ZstdCompressorIteratorType) = &PyType_Type;
+	if (PyType_Ready(&ZstdCompressorIteratorType) < 0) {
+		return;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/constants.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,87 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+static char frame_header[] = {
+	'\x28',
+	'\xb5',
+	'\x2f',
+	'\xfd',
+};
+
+void constants_module_init(PyObject* mod) {
+	PyObject* version;
+	PyObject* zstdVersion;
+	PyObject* frameHeader;
+
+#if PY_MAJOR_VERSION >= 3
+	version = PyUnicode_FromString(PYTHON_ZSTANDARD_VERSION);
+#else
+	version = PyString_FromString(PYTHON_ZSTANDARD_VERSION);
+#endif
+	Py_INCREF(version);
+	PyModule_AddObject(mod, "__version__", version);
+
+	ZstdError = PyErr_NewException("zstd.ZstdError", NULL, NULL);
+	PyModule_AddObject(mod, "ZstdError", ZstdError);
+
+	PyModule_AddIntConstant(mod, "COMPRESSOBJ_FLUSH_FINISH", compressorobj_flush_finish);
+	PyModule_AddIntConstant(mod, "COMPRESSOBJ_FLUSH_BLOCK", compressorobj_flush_block);
+
+	/* For now, the version is a simple tuple instead of a dedicated type. */
+	zstdVersion = PyTuple_New(3);
+	PyTuple_SetItem(zstdVersion, 0, PyLong_FromLong(ZSTD_VERSION_MAJOR));
+	PyTuple_SetItem(zstdVersion, 1, PyLong_FromLong(ZSTD_VERSION_MINOR));
+	PyTuple_SetItem(zstdVersion, 2, PyLong_FromLong(ZSTD_VERSION_RELEASE));
+	Py_IncRef(zstdVersion);
+	PyModule_AddObject(mod, "ZSTD_VERSION", zstdVersion);
+
+	frameHeader = PyBytes_FromStringAndSize(frame_header, sizeof(frame_header));
+	if (frameHeader) {
+		PyModule_AddObject(mod, "FRAME_HEADER", frameHeader);
+	}
+	else {
+		PyErr_Format(PyExc_ValueError, "could not create frame header object");
+	}
+
+	PyModule_AddIntConstant(mod, "MAX_COMPRESSION_LEVEL", ZSTD_maxCLevel());
+	PyModule_AddIntConstant(mod, "COMPRESSION_RECOMMENDED_INPUT_SIZE",
+		(long)ZSTD_CStreamInSize());
+	PyModule_AddIntConstant(mod, "COMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+		(long)ZSTD_CStreamOutSize());
+	PyModule_AddIntConstant(mod, "DECOMPRESSION_RECOMMENDED_INPUT_SIZE",
+		(long)ZSTD_DStreamInSize());
+	PyModule_AddIntConstant(mod, "DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+		(long)ZSTD_DStreamOutSize());
+
+	PyModule_AddIntConstant(mod, "MAGIC_NUMBER", ZSTD_MAGICNUMBER);
+	PyModule_AddIntConstant(mod, "WINDOWLOG_MIN", ZSTD_WINDOWLOG_MIN);
+	PyModule_AddIntConstant(mod, "WINDOWLOG_MAX", ZSTD_WINDOWLOG_MAX);
+	PyModule_AddIntConstant(mod, "CHAINLOG_MIN", ZSTD_CHAINLOG_MIN);
+	PyModule_AddIntConstant(mod, "CHAINLOG_MAX", ZSTD_CHAINLOG_MAX);
+	PyModule_AddIntConstant(mod, "HASHLOG_MIN", ZSTD_HASHLOG_MIN);
+	PyModule_AddIntConstant(mod, "HASHLOG_MAX", ZSTD_HASHLOG_MAX);
+	PyModule_AddIntConstant(mod, "HASHLOG3_MAX", ZSTD_HASHLOG3_MAX);
+	PyModule_AddIntConstant(mod, "SEARCHLOG_MIN", ZSTD_SEARCHLOG_MIN);
+	PyModule_AddIntConstant(mod, "SEARCHLOG_MAX", ZSTD_SEARCHLOG_MAX);
+	PyModule_AddIntConstant(mod, "SEARCHLENGTH_MIN", ZSTD_SEARCHLENGTH_MIN);
+	PyModule_AddIntConstant(mod, "SEARCHLENGTH_MAX", ZSTD_SEARCHLENGTH_MAX);
+	PyModule_AddIntConstant(mod, "TARGETLENGTH_MIN", ZSTD_TARGETLENGTH_MIN);
+	PyModule_AddIntConstant(mod, "TARGETLENGTH_MAX", ZSTD_TARGETLENGTH_MAX);
+
+	PyModule_AddIntConstant(mod, "STRATEGY_FAST", ZSTD_fast);
+	PyModule_AddIntConstant(mod, "STRATEGY_DFAST", ZSTD_dfast);
+	PyModule_AddIntConstant(mod, "STRATEGY_GREEDY", ZSTD_greedy);
+	PyModule_AddIntConstant(mod, "STRATEGY_LAZY", ZSTD_lazy);
+	PyModule_AddIntConstant(mod, "STRATEGY_LAZY2", ZSTD_lazy2);
+	PyModule_AddIntConstant(mod, "STRATEGY_BTLAZY2", ZSTD_btlazy2);
+	PyModule_AddIntConstant(mod, "STRATEGY_BTOPT", ZSTD_btopt);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/decompressionwriter.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,187 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(ZstdDecompressionWriter__doc,
+"""A context manager used for writing decompressed output.\n"
+);
+
+static void ZstdDecompressionWriter_dealloc(ZstdDecompressionWriter* self) {
+	Py_XDECREF(self->decompressor);
+	Py_XDECREF(self->writer);
+
+	if (self->dstream) {
+		ZSTD_freeDStream(self->dstream);
+		self->dstream = NULL;
+	}
+
+	PyObject_Del(self);
+}
+
+static PyObject* ZstdDecompressionWriter_enter(ZstdDecompressionWriter* self) {
+	if (self->entered) {
+		PyErr_SetString(ZstdError, "cannot __enter__ multiple times");
+		return NULL;
+	}
+
+	self->dstream = DStream_from_ZstdDecompressor(self->decompressor);
+	if (!self->dstream) {
+		return NULL;
+	}
+
+	self->entered = 1;
+
+	Py_INCREF(self);
+	return (PyObject*)self;
+}
+
+static PyObject* ZstdDecompressionWriter_exit(ZstdDecompressionWriter* self, PyObject* args) {
+	self->entered = 0;
+
+	if (self->dstream) {
+		ZSTD_freeDStream(self->dstream);
+		self->dstream = NULL;
+	}
+
+	Py_RETURN_FALSE;
+}
+
+static PyObject* ZstdDecompressionWriter_memory_size(ZstdDecompressionWriter* self) {
+	if (!self->dstream) {
+		PyErr_SetString(ZstdError, "cannot determine size of inactive decompressor; "
+			"call when context manager is active");
+		return NULL;
+	}
+
+	return PyLong_FromSize_t(ZSTD_sizeof_DStream(self->dstream));
+}
+
+static PyObject* ZstdDecompressionWriter_write(ZstdDecompressionWriter* self, PyObject* args) {
+	const char* source;
+	Py_ssize_t sourceSize;
+	size_t zresult = 0;
+	ZSTD_inBuffer input;
+	ZSTD_outBuffer output;
+	PyObject* res;
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+#else
+	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+#endif
+		return NULL;
+	}
+
+	if (!self->entered) {
+		PyErr_SetString(ZstdError, "write must be called from an active context manager");
+		return NULL;
+	}
+
+	output.dst = PyMem_Malloc(self->outSize);
+	if (!output.dst) {
+		return PyErr_NoMemory();
+	}
+	output.size = self->outSize;
+	output.pos = 0;
+
+	input.src = source;
+	input.size = sourceSize;
+	input.pos = 0;
+
+	while ((ssize_t)input.pos < sourceSize) {
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_decompressStream(self->dstream, &output, &input);
+		Py_END_ALLOW_THREADS
+
+		if (ZSTD_isError(zresult)) {
+			PyMem_Free(output.dst);
+			PyErr_Format(ZstdError, "zstd decompress error: %s",
+				ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		if (output.pos) {
+#if PY_MAJOR_VERSION >= 3
+			res = PyObject_CallMethod(self->writer, "write", "y#",
+#else
+			res = PyObject_CallMethod(self->writer, "write", "s#",
+#endif
+				output.dst, output.pos);
+			Py_XDECREF(res);
+			output.pos = 0;
+		}
+	}
+
+	PyMem_Free(output.dst);
+
+	/* TODO return bytes written */
+	Py_RETURN_NONE;
+	}
+
+static PyMethodDef ZstdDecompressionWriter_methods[] = {
+	{ "__enter__", (PyCFunction)ZstdDecompressionWriter_enter, METH_NOARGS,
+	PyDoc_STR("Enter a decompression context.") },
+	{ "__exit__", (PyCFunction)ZstdDecompressionWriter_exit, METH_VARARGS,
+	PyDoc_STR("Exit a decompression context.") },
+	{ "memory_size", (PyCFunction)ZstdDecompressionWriter_memory_size, METH_NOARGS,
+	PyDoc_STR("Obtain the memory size in bytes of the underlying decompressor.") },
+	{ "write", (PyCFunction)ZstdDecompressionWriter_write, METH_VARARGS,
+	PyDoc_STR("Compress data") },
+	{ NULL, NULL }
+};
+
+PyTypeObject ZstdDecompressionWriterType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdDecompressionWriter", /* tp_name */
+	sizeof(ZstdDecompressionWriter),/* tp_basicsize */
+	0,                              /* tp_itemsize */
+	(destructor)ZstdDecompressionWriter_dealloc, /* tp_dealloc */
+	0,                              /* tp_print */
+	0,                              /* tp_getattr */
+	0,                              /* tp_setattr */
+	0,                              /* tp_compare */
+	0,                              /* tp_repr */
+	0,                              /* tp_as_number */
+	0,                              /* tp_as_sequence */
+	0,                              /* tp_as_mapping */
+	0,                              /* tp_hash */
+	0,                              /* tp_call */
+	0,                              /* tp_str */
+	0,                              /* tp_getattro */
+	0,                              /* tp_setattro */
+	0,                              /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdDecompressionWriter__doc,   /* tp_doc */
+	0,                              /* tp_traverse */
+	0,                              /* tp_clear */
+	0,                              /* tp_richcompare */
+	0,                              /* tp_weaklistoffset */
+	0,                              /* tp_iter */
+	0,                              /* tp_iternext */
+	ZstdDecompressionWriter_methods,/* tp_methods */
+	0,                              /* tp_members */
+	0,                              /* tp_getset */
+	0,                              /* tp_base */
+	0,                              /* tp_dict */
+	0,                              /* tp_descr_get */
+	0,                              /* tp_descr_set */
+	0,                              /* tp_dictoffset */
+	0,                              /* tp_init */
+	0,                              /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+void decompressionwriter_module_init(PyObject* mod) {
+	Py_TYPE(&ZstdDecompressionWriterType) = &PyType_Type;
+	if (PyType_Ready(&ZstdDecompressionWriterType) < 0) {
+		return;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/decompressobj.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,170 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(DecompressionObj__doc__,
+"Perform decompression using a standard library compatible API.\n"
+);
+
+static void DecompressionObj_dealloc(ZstdDecompressionObj* self) {
+	if (self->dstream) {
+		ZSTD_freeDStream(self->dstream);
+		self->dstream = NULL;
+	}
+
+	Py_XDECREF(self->decompressor);
+
+	PyObject_Del(self);
+}
+
+static PyObject* DecompressionObj_decompress(ZstdDecompressionObj* self, PyObject* args) {
+	const char* source;
+	Py_ssize_t sourceSize;
+	size_t zresult;
+	ZSTD_inBuffer input;
+	ZSTD_outBuffer output;
+	size_t outSize = ZSTD_DStreamOutSize();
+	PyObject* result = NULL;
+	Py_ssize_t resultSize = 0;
+
+	if (self->finished) {
+		PyErr_SetString(ZstdError, "cannot use a decompressobj multiple times");
+		return NULL;
+	}
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTuple(args, "y#",
+#else
+	if (!PyArg_ParseTuple(args, "s#",
+#endif
+		&source, &sourceSize)) {
+		return NULL;
+	}
+
+	input.src = source;
+	input.size = sourceSize;
+	input.pos = 0;
+
+	output.dst = PyMem_Malloc(outSize);
+	if (!output.dst) {
+		PyErr_NoMemory();
+		return NULL;
+	}
+	output.size = outSize;
+	output.pos = 0;
+
+	/* Read input until exhausted. */
+	while (input.pos < input.size) {
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_decompressStream(self->dstream, &output, &input);
+		Py_END_ALLOW_THREADS
+
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "zstd decompressor error: %s",
+				ZSTD_getErrorName(zresult));
+			result = NULL;
+			goto finally;
+		}
+
+		if (0 == zresult) {
+			self->finished = 1;
+		}
+
+		if (output.pos) {
+			if (result) {
+				resultSize = PyBytes_GET_SIZE(result);
+				if (-1 == _PyBytes_Resize(&result, resultSize + output.pos)) {
+					goto except;
+				}
+
+				memcpy(PyBytes_AS_STRING(result) + resultSize,
+					output.dst, output.pos);
+			}
+			else {
+				result = PyBytes_FromStringAndSize(output.dst, output.pos);
+				if (!result) {
+					goto except;
+				}
+			}
+
+			output.pos = 0;
+		}
+	}
+
+	if (!result) {
+		result = PyBytes_FromString("");
+	}
+
+	goto finally;
+
+except:
+	Py_DecRef(result);
+	result = NULL;
+
+finally:
+	PyMem_Free(output.dst);
+
+	return result;
+}
+
+static PyMethodDef DecompressionObj_methods[] = {
+	{ "decompress", (PyCFunction)DecompressionObj_decompress,
+	  METH_VARARGS, PyDoc_STR("decompress data") },
+	{ NULL, NULL }
+};
+
+PyTypeObject ZstdDecompressionObjType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdDecompressionObj",    /* tp_name */
+	sizeof(ZstdDecompressionObj),   /* tp_basicsize */
+	0,                              /* tp_itemsize */
+	(destructor)DecompressionObj_dealloc, /* tp_dealloc */
+	0,                              /* tp_print */
+	0,                              /* tp_getattr */
+	0,                              /* tp_setattr */
+	0,                              /* tp_compare */
+	0,                              /* tp_repr */
+	0,                              /* tp_as_number */
+	0,                              /* tp_as_sequence */
+	0,                              /* tp_as_mapping */
+	0,                              /* tp_hash */
+	0,                              /* tp_call */
+	0,                              /* tp_str */
+	0,                              /* tp_getattro */
+	0,                              /* tp_setattro */
+	0,                              /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	DecompressionObj__doc__,        /* tp_doc */
+	0,                              /* tp_traverse */
+	0,                              /* tp_clear */
+	0,                              /* tp_richcompare */
+	0,                              /* tp_weaklistoffset */
+	0,                              /* tp_iter */
+	0,                              /* tp_iternext */
+	DecompressionObj_methods,       /* tp_methods */
+	0,                              /* tp_members */
+	0,                              /* tp_getset */
+	0,                              /* tp_base */
+	0,                              /* tp_dict */
+	0,                              /* tp_descr_get */
+	0,                              /* tp_descr_set */
+	0,                              /* tp_dictoffset */
+	0,                              /* tp_init */
+	0,                              /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+void decompressobj_module_init(PyObject* module) {
+	Py_TYPE(&ZstdDecompressionObjType) = &PyType_Type;
+	if (PyType_Ready(&ZstdDecompressionObjType) < 0) {
+		return;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/decompressor.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,672 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+ZSTD_DStream* DStream_from_ZstdDecompressor(ZstdDecompressor* decompressor) {
+	ZSTD_DStream* dstream;
+	void* dictData = NULL;
+	size_t dictSize = 0;
+	size_t zresult;
+
+	dstream = ZSTD_createDStream();
+	if (!dstream) {
+		PyErr_SetString(ZstdError, "could not create DStream");
+		return NULL;
+	}
+
+	if (decompressor->dict) {
+		dictData = decompressor->dict->dictData;
+		dictSize = decompressor->dict->dictSize;
+	}
+
+	if (dictData) {
+		zresult = ZSTD_initDStream_usingDict(dstream, dictData, dictSize);
+	}
+	else {
+		zresult = ZSTD_initDStream(dstream);
+	}
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "could not initialize DStream: %s",
+			ZSTD_getErrorName(zresult));
+		return NULL;
+	}
+
+	return dstream;
+}
+
+PyDoc_STRVAR(Decompressor__doc__,
+"ZstdDecompressor(dict_data=None)\n"
+"\n"
+"Create an object used to perform Zstandard decompression.\n"
+"\n"
+"An instance can perform multiple decompression operations."
+);
+
+static int Decompressor_init(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"dict_data",
+		NULL
+	};
+
+	ZstdCompressionDict* dict = NULL;
+
+	self->refdctx = NULL;
+	self->dict = NULL;
+	self->ddict = NULL;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!", kwlist,
+		&ZstdCompressionDictType, &dict)) {
+		return -1;
+	}
+
+	/* Instead of creating a ZSTD_DCtx for every decompression operation,
+	   we create an instance at object creation time and recycle it via
+	   ZSTD_copyDCTx() on each use. This means each use is a malloc+memcpy
+	   instead of a malloc+init. */
+	/* TODO lazily initialize the reference ZSTD_DCtx on first use since
+	   not instances of ZstdDecompressor will use a ZSTD_DCtx. */
+	self->refdctx = ZSTD_createDCtx();
+	if (!self->refdctx) {
+		PyErr_NoMemory();
+		goto except;
+	}
+
+	if (dict) {
+		self->dict = dict;
+		Py_INCREF(dict);
+	}
+
+	return 0;
+
+except:
+	if (self->refdctx) {
+		ZSTD_freeDCtx(self->refdctx);
+		self->refdctx = NULL;
+	}
+
+	return -1;
+}
+
+static void Decompressor_dealloc(ZstdDecompressor* self) {
+	if (self->refdctx) {
+		ZSTD_freeDCtx(self->refdctx);
+	}
+
+	Py_XDECREF(self->dict);
+
+	if (self->ddict) {
+		ZSTD_freeDDict(self->ddict);
+		self->ddict = NULL;
+	}
+
+	PyObject_Del(self);
+}
+
+PyDoc_STRVAR(Decompressor_copy_stream__doc__,
+	"copy_stream(ifh, ofh[, read_size=default, write_size=default]) -- decompress data between streams\n"
+	"\n"
+	"Compressed data will be read from ``ifh``, decompressed, and written to\n"
+	"``ofh``. ``ifh`` must have a ``read(size)`` method. ``ofh`` must have a\n"
+	"``write(data)`` method.\n"
+	"\n"
+	"The optional ``read_size`` and ``write_size`` arguments control the chunk\n"
+	"size of data that is ``read()`` and ``write()`` between streams. They default\n"
+	"to the default input and output sizes of zstd decompressor streams.\n"
+);
+
+static PyObject* Decompressor_copy_stream(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"ifh",
+		"ofh",
+		"read_size",
+		"write_size",
+		NULL
+	};
+
+	PyObject* source;
+	PyObject* dest;
+	size_t inSize = ZSTD_DStreamInSize();
+	size_t outSize = ZSTD_DStreamOutSize();
+	ZSTD_DStream* dstream;
+	ZSTD_inBuffer input;
+	ZSTD_outBuffer output;
+	Py_ssize_t totalRead = 0;
+	Py_ssize_t totalWrite = 0;
+	char* readBuffer;
+	Py_ssize_t readSize;
+	PyObject* readResult;
+	PyObject* res = NULL;
+	size_t zresult = 0;
+	PyObject* writeResult;
+	PyObject* totalReadPy;
+	PyObject* totalWritePy;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|kk", kwlist, &source,
+		&dest, &inSize, &outSize)) {
+		return NULL;
+	}
+
+	if (!PyObject_HasAttrString(source, "read")) {
+		PyErr_SetString(PyExc_ValueError, "first argument must have a read() method");
+		return NULL;
+	}
+
+	if (!PyObject_HasAttrString(dest, "write")) {
+		PyErr_SetString(PyExc_ValueError, "second argument must have a write() method");
+		return NULL;
+	}
+
+	/* Prevent free on uninitialized memory in finally. */
+	output.dst = NULL;
+
+	dstream = DStream_from_ZstdDecompressor(self);
+	if (!dstream) {
+		res = NULL;
+		goto finally;
+	}
+
+	output.dst = PyMem_Malloc(outSize);
+	if (!output.dst) {
+		PyErr_NoMemory();
+		res = NULL;
+		goto finally;
+	}
+	output.size = outSize;
+	output.pos = 0;
+
+	/* Read source stream until EOF */
+	while (1) {
+		readResult = PyObject_CallMethod(source, "read", "n", inSize);
+		if (!readResult) {
+			PyErr_SetString(ZstdError, "could not read() from source");
+			goto finally;
+		}
+
+		PyBytes_AsStringAndSize(readResult, &readBuffer, &readSize);
+
+		/* If no data was read, we're at EOF. */
+		if (0 == readSize) {
+			break;
+		}
+
+		totalRead += readSize;
+
+		/* Send data to decompressor */
+		input.src = readBuffer;
+		input.size = readSize;
+		input.pos = 0;
+
+		while (input.pos < input.size) {
+			Py_BEGIN_ALLOW_THREADS
+			zresult = ZSTD_decompressStream(dstream, &output, &input);
+			Py_END_ALLOW_THREADS
+
+			if (ZSTD_isError(zresult)) {
+				PyErr_Format(ZstdError, "zstd decompressor error: %s",
+					ZSTD_getErrorName(zresult));
+				res = NULL;
+				goto finally;
+			}
+
+			if (output.pos) {
+#if PY_MAJOR_VERSION >= 3
+				writeResult = PyObject_CallMethod(dest, "write", "y#",
+#else
+				writeResult = PyObject_CallMethod(dest, "write", "s#",
+#endif
+					output.dst, output.pos);
+
+				Py_XDECREF(writeResult);
+				totalWrite += output.pos;
+				output.pos = 0;
+			}
+		}
+	}
+
+	/* Source stream is exhausted. Finish up. */
+
+	ZSTD_freeDStream(dstream);
+	dstream = NULL;
+
+	totalReadPy = PyLong_FromSsize_t(totalRead);
+	totalWritePy = PyLong_FromSsize_t(totalWrite);
+	res = PyTuple_Pack(2, totalReadPy, totalWritePy);
+	Py_DecRef(totalReadPy);
+	Py_DecRef(totalWritePy);
+
+	finally:
+	if (output.dst) {
+		PyMem_Free(output.dst);
+	}
+
+	if (dstream) {
+		ZSTD_freeDStream(dstream);
+	}
+
+	return res;
+}
+
+PyDoc_STRVAR(Decompressor_decompress__doc__,
+"decompress(data[, max_output_size=None]) -- Decompress data in its entirety\n"
+"\n"
+"This method will decompress the entirety of the argument and return the\n"
+"result.\n"
+"\n"
+"The input bytes are expected to contain a full Zstandard frame (something\n"
+"compressed with ``ZstdCompressor.compress()`` or similar). If the input does\n"
+"not contain a full frame, an exception will be raised.\n"
+"\n"
+"If the frame header of the compressed data does not contain the content size\n"
+"``max_output_size`` must be specified or ``ZstdError`` will be raised. An\n"
+"allocation of size ``max_output_size`` will be performed and an attempt will\n"
+"be made to perform decompression into that buffer. If the buffer is too\n"
+"small or cannot be allocated, ``ZstdError`` will be raised. The buffer will\n"
+"be resized if it is too large.\n"
+"\n"
+"Uncompressed data could be much larger than compressed data. As a result,\n"
+"calling this function could result in a very large memory allocation being\n"
+"performed to hold the uncompressed data. Therefore it is **highly**\n"
+"recommended to use a streaming decompression method instead of this one.\n"
+);
+
+PyObject* Decompressor_decompress(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"data",
+		"max_output_size",
+		NULL
+	};
+
+	const char* source;
+	Py_ssize_t sourceSize;
+	Py_ssize_t maxOutputSize = 0;
+	unsigned long long decompressedSize;
+	size_t destCapacity;
+	PyObject* result = NULL;
+	ZSTD_DCtx* dctx = NULL;
+	void* dictData = NULL;
+	size_t dictSize = 0;
+	size_t zresult;
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|n", kwlist,
+#else
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|n", kwlist,
+#endif
+		&source, &sourceSize, &maxOutputSize)) {
+		return NULL;
+	}
+
+	dctx = PyMem_Malloc(ZSTD_sizeof_DCtx(self->refdctx));
+	if (!dctx) {
+		PyErr_NoMemory();
+		return NULL;
+	}
+
+	ZSTD_copyDCtx(dctx, self->refdctx);
+
+	if (self->dict) {
+		dictData = self->dict->dictData;
+		dictSize = self->dict->dictSize;
+	}
+
+	if (dictData && !self->ddict) {
+		Py_BEGIN_ALLOW_THREADS
+		self->ddict = ZSTD_createDDict(dictData, dictSize);
+		Py_END_ALLOW_THREADS
+
+		if (!self->ddict) {
+			PyErr_SetString(ZstdError, "could not create decompression dict");
+			goto except;
+		}
+	}
+
+	decompressedSize = ZSTD_getDecompressedSize(source, sourceSize);
+	/* 0 returned if content size not in the zstd frame header */
+	if (0 == decompressedSize) {
+		if (0 == maxOutputSize) {
+			PyErr_SetString(ZstdError, "input data invalid or missing content size "
+				"in frame header");
+			goto except;
+		}
+		else {
+			result = PyBytes_FromStringAndSize(NULL, maxOutputSize);
+			destCapacity = maxOutputSize;
+		}
+	}
+	else {
+		result = PyBytes_FromStringAndSize(NULL, decompressedSize);
+		destCapacity = decompressedSize;
+	}
+
+	if (!result) {
+		goto except;
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	if (self->ddict) {
+		zresult = ZSTD_decompress_usingDDict(dctx, PyBytes_AsString(result), destCapacity,
+			source, sourceSize, self->ddict);
+	}
+	else {
+		zresult = ZSTD_decompressDCtx(dctx, PyBytes_AsString(result), destCapacity, source, sourceSize);
+	}
+	Py_END_ALLOW_THREADS
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "decompression error: %s", ZSTD_getErrorName(zresult));
+		goto except;
+	}
+	else if (decompressedSize && zresult != decompressedSize) {
+		PyErr_Format(ZstdError, "decompression error: decompressed %zu bytes; expected %llu",
+			zresult, decompressedSize);
+		goto except;
+	}
+	else if (zresult < destCapacity) {
+		if (_PyBytes_Resize(&result, zresult)) {
+			goto except;
+		}
+	}
+
+	goto finally;
+
+except:
+	Py_DecRef(result);
+	result = NULL;
+
+finally:
+	if (dctx) {
+		PyMem_FREE(dctx);
+	}
+
+	return result;
+}
+
+PyDoc_STRVAR(Decompressor_decompressobj__doc__,
+"decompressobj()\n"
+"\n"
+"Incrementally feed data into a decompressor.\n"
+"\n"
+"The returned object exposes a ``decompress(data)`` method. This makes it\n"
+"compatible with ``zlib.decompressobj`` and ``bz2.BZ2Decompressor`` so that\n"
+"callers can swap in the zstd decompressor while using the same API.\n"
+);
+
+static ZstdDecompressionObj* Decompressor_decompressobj(ZstdDecompressor* self) {
+	ZstdDecompressionObj* result = PyObject_New(ZstdDecompressionObj, &ZstdDecompressionObjType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->dstream = DStream_from_ZstdDecompressor(self);
+	if (!result->dstream) {
+		Py_DecRef((PyObject*)result);
+		return NULL;
+	}
+
+	result->decompressor = self;
+	Py_INCREF(result->decompressor);
+
+	result->finished = 0;
+
+	return result;
+}
+
+PyDoc_STRVAR(Decompressor_read_from__doc__,
+"read_from(reader[, read_size=default, write_size=default, skip_bytes=0])\n"
+"Read compressed data and return an iterator\n"
+"\n"
+"Returns an iterator of decompressed data chunks produced from reading from\n"
+"the ``reader``.\n"
+"\n"
+"Compressed data will be obtained from ``reader`` by calling the\n"
+"``read(size)`` method of it. The source data will be streamed into a\n"
+"decompressor. As decompressed data is available, it will be exposed to the\n"
+"returned iterator.\n"
+"\n"
+"Data is ``read()`` in chunks of size ``read_size`` and exposed to the\n"
+"iterator in chunks of size ``write_size``. The default values are the input\n"
+"and output sizes for a zstd streaming decompressor.\n"
+"\n"
+"There is also support for skipping the first ``skip_bytes`` of data from\n"
+"the source.\n"
+);
+
+static ZstdDecompressorIterator* Decompressor_read_from(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"reader",
+		"read_size",
+		"write_size",
+		"skip_bytes",
+		NULL
+	};
+
+	PyObject* reader;
+	size_t inSize = ZSTD_DStreamInSize();
+	size_t outSize = ZSTD_DStreamOutSize();
+	ZstdDecompressorIterator* result;
+	size_t skipBytes = 0;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|kkk", kwlist, &reader,
+		&inSize, &outSize, &skipBytes)) {
+		return NULL;
+	}
+
+	if (skipBytes >= inSize) {
+		PyErr_SetString(PyExc_ValueError,
+			"skip_bytes must be smaller than read_size");
+		return NULL;
+	}
+
+	result = PyObject_New(ZstdDecompressorIterator, &ZstdDecompressorIteratorType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->decompressor = NULL;
+	result->reader = NULL;
+	result->buffer = NULL;
+	result->dstream = NULL;
+	result->input.src = NULL;
+	result->output.dst = NULL;
+
+	if (PyObject_HasAttrString(reader, "read")) {
+		result->reader = reader;
+		Py_INCREF(result->reader);
+	}
+	else if (1 == PyObject_CheckBuffer(reader)) {
+		/* Object claims it is a buffer. Try to get a handle to it. */
+		result->buffer = PyMem_Malloc(sizeof(Py_buffer));
+		if (!result->buffer) {
+			goto except;
+		}
+
+		memset(result->buffer, 0, sizeof(Py_buffer));
+
+		if (0 != PyObject_GetBuffer(reader, result->buffer, PyBUF_CONTIG_RO)) {
+			goto except;
+		}
+
+		result->bufferOffset = 0;
+	}
+	else {
+		PyErr_SetString(PyExc_ValueError,
+			"must pass an object with a read() method or conforms to buffer protocol");
+		goto except;
+	}
+
+	result->decompressor = self;
+	Py_INCREF(result->decompressor);
+
+	result->inSize = inSize;
+	result->outSize = outSize;
+	result->skipBytes = skipBytes;
+
+	result->dstream = DStream_from_ZstdDecompressor(self);
+	if (!result->dstream) {
+		goto except;
+	}
+
+	result->input.src = PyMem_Malloc(inSize);
+	if (!result->input.src) {
+		PyErr_NoMemory();
+		goto except;
+	}
+	result->input.size = 0;
+	result->input.pos = 0;
+
+	result->output.dst = NULL;
+	result->output.size = 0;
+	result->output.pos = 0;
+
+	result->readCount = 0;
+	result->finishedInput = 0;
+	result->finishedOutput = 0;
+
+	goto finally;
+
+except:
+	if (result->reader) {
+		Py_DECREF(result->reader);
+		result->reader = NULL;
+	}
+
+	if (result->buffer) {
+		PyBuffer_Release(result->buffer);
+		Py_DECREF(result->buffer);
+		result->buffer = NULL;
+	}
+
+	Py_DECREF(result);
+	result = NULL;
+
+finally:
+
+	return result;
+}
+
+PyDoc_STRVAR(Decompressor_write_to__doc__,
+"Create a context manager to write decompressed data to an object.\n"
+"\n"
+"The passed object must have a ``write()`` method.\n"
+"\n"
+"The caller feeds intput data to the object by calling ``write(data)``.\n"
+"Decompressed data is written to the argument given as it is decompressed.\n"
+"\n"
+"An optional ``write_size`` argument defines the size of chunks to\n"
+"``write()`` to the writer. It defaults to the default output size for a zstd\n"
+"streaming decompressor.\n"
+);
+
+static ZstdDecompressionWriter* Decompressor_write_to(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"writer",
+		"write_size",
+		NULL
+	};
+
+	PyObject* writer;
+	size_t outSize = ZSTD_DStreamOutSize();
+	ZstdDecompressionWriter* result;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|k", kwlist, &writer, &outSize)) {
+		return NULL;
+	}
+
+	if (!PyObject_HasAttrString(writer, "write")) {
+		PyErr_SetString(PyExc_ValueError, "must pass an object with a write() method");
+		return NULL;
+	}
+
+	result = PyObject_New(ZstdDecompressionWriter, &ZstdDecompressionWriterType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->decompressor = self;
+	Py_INCREF(result->decompressor);
+
+	result->writer = writer;
+	Py_INCREF(result->writer);
+
+	result->outSize = outSize;
+
+	result->entered = 0;
+	result->dstream = NULL;
+
+	return result;
+}
+
+static PyMethodDef Decompressor_methods[] = {
+	{ "copy_stream", (PyCFunction)Decompressor_copy_stream, METH_VARARGS | METH_KEYWORDS,
+	Decompressor_copy_stream__doc__ },
+	{ "decompress", (PyCFunction)Decompressor_decompress, METH_VARARGS | METH_KEYWORDS,
+	Decompressor_decompress__doc__ },
+	{ "decompressobj", (PyCFunction)Decompressor_decompressobj, METH_NOARGS,
+	Decompressor_decompressobj__doc__ },
+	{ "read_from", (PyCFunction)Decompressor_read_from, METH_VARARGS | METH_KEYWORDS,
+	Decompressor_read_from__doc__ },
+	{ "write_to", (PyCFunction)Decompressor_write_to, METH_VARARGS | METH_KEYWORDS,
+	Decompressor_write_to__doc__ },
+	{ NULL, NULL }
+};
+
+PyTypeObject ZstdDecompressorType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdDecompressor",        /* tp_name */
+	sizeof(ZstdDecompressor),       /* tp_basicsize */
+	0,                              /* tp_itemsize */
+	(destructor)Decompressor_dealloc, /* tp_dealloc */
+	0,                              /* tp_print */
+	0,                              /* tp_getattr */
+	0,                              /* tp_setattr */
+	0,                              /* tp_compare */
+	0,                              /* tp_repr */
+	0,                              /* tp_as_number */
+	0,                              /* tp_as_sequence */
+	0,                              /* tp_as_mapping */
+	0,                              /* tp_hash */
+	0,                              /* tp_call */
+	0,                              /* tp_str */
+	0,                              /* tp_getattro */
+	0,                              /* tp_setattro */
+	0,                              /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	Decompressor__doc__,            /* tp_doc */
+	0,                              /* tp_traverse */
+	0,                              /* tp_clear */
+	0,                              /* tp_richcompare */
+	0,                              /* tp_weaklistoffset */
+	0,                              /* tp_iter */
+	0,                              /* tp_iternext */
+	Decompressor_methods,           /* tp_methods */
+	0,                              /* tp_members */
+	0,                              /* tp_getset */
+	0,                              /* tp_base */
+	0,                              /* tp_dict */
+	0,                              /* tp_descr_get */
+	0,                              /* tp_descr_set */
+	0,                              /* tp_dictoffset */
+	(initproc)Decompressor_init,    /* tp_init */
+	0,                              /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+void decompressor_module_init(PyObject* mod) {
+	Py_TYPE(&ZstdDecompressorType) = &PyType_Type;
+	if (PyType_Ready(&ZstdDecompressorType) < 0) {
+		return;
+	}
+
+	Py_INCREF((PyObject*)&ZstdDecompressorType);
+	PyModule_AddObject(mod, "ZstdDecompressor",
+		(PyObject*)&ZstdDecompressorType);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/decompressoriterator.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,254 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(ZstdDecompressorIterator__doc__,
+"Represents an iterator of decompressed data.\n"
+);
+
+static void ZstdDecompressorIterator_dealloc(ZstdDecompressorIterator* self) {
+	Py_XDECREF(self->decompressor);
+	Py_XDECREF(self->reader);
+
+	if (self->buffer) {
+		PyBuffer_Release(self->buffer);
+		PyMem_FREE(self->buffer);
+		self->buffer = NULL;
+	}
+
+	if (self->dstream) {
+		ZSTD_freeDStream(self->dstream);
+		self->dstream = NULL;
+	}
+
+	if (self->input.src) {
+		PyMem_Free((void*)self->input.src);
+		self->input.src = NULL;
+	}
+
+	PyObject_Del(self);
+}
+
+static PyObject* ZstdDecompressorIterator_iter(PyObject* self) {
+	Py_INCREF(self);
+	return self;
+}
+
+static DecompressorIteratorResult read_decompressor_iterator(ZstdDecompressorIterator* self) {
+	size_t zresult;
+	PyObject* chunk;
+	DecompressorIteratorResult result;
+	size_t oldInputPos = self->input.pos;
+
+	result.chunk = NULL;
+
+	chunk = PyBytes_FromStringAndSize(NULL, self->outSize);
+	if (!chunk) {
+		result.errored = 1;
+		return result;
+	}
+
+	self->output.dst = PyBytes_AsString(chunk);
+	self->output.size = self->outSize;
+	self->output.pos = 0;
+
+	Py_BEGIN_ALLOW_THREADS
+	zresult = ZSTD_decompressStream(self->dstream, &self->output, &self->input);
+	Py_END_ALLOW_THREADS
+
+	/* We're done with the pointer. Nullify to prevent anyone from getting a
+	handle on a Python object. */
+	self->output.dst = NULL;
+
+	if (ZSTD_isError(zresult)) {
+		Py_DECREF(chunk);
+		PyErr_Format(ZstdError, "zstd decompress error: %s",
+			ZSTD_getErrorName(zresult));
+		result.errored = 1;
+		return result;
+	}
+
+	self->readCount += self->input.pos - oldInputPos;
+
+	/* Frame is fully decoded. Input exhausted and output sitting in buffer. */
+	if (0 == zresult) {
+		self->finishedInput = 1;
+		self->finishedOutput = 1;
+	}
+
+	/* If it produced output data, return it. */
+	if (self->output.pos) {
+		if (self->output.pos < self->outSize) {
+			if (_PyBytes_Resize(&chunk, self->output.pos)) {
+				result.errored = 1;
+				return result;
+			}
+		}
+	}
+	else {
+		Py_DECREF(chunk);
+		chunk = NULL;
+	}
+
+	result.errored = 0;
+	result.chunk = chunk;
+
+	return result;
+}
+
+static PyObject* ZstdDecompressorIterator_iternext(ZstdDecompressorIterator* self) {
+	PyObject* readResult = NULL;
+	char* readBuffer;
+	Py_ssize_t readSize;
+	Py_ssize_t bufferRemaining;
+	DecompressorIteratorResult result;
+
+	if (self->finishedOutput) {
+		PyErr_SetString(PyExc_StopIteration, "output flushed");
+		return NULL;
+	}
+
+	/* If we have data left in the input, consume it. */
+	if (self->input.pos < self->input.size) {
+		result = read_decompressor_iterator(self);
+		if (result.chunk || result.errored) {
+			return result.chunk;
+		}
+
+		/* Else fall through to get more data from input. */
+	}
+
+read_from_source:
+
+	if (!self->finishedInput) {
+		if (self->reader) {
+			readResult = PyObject_CallMethod(self->reader, "read", "I", self->inSize);
+			if (!readResult) {
+				return NULL;
+			}
+
+			PyBytes_AsStringAndSize(readResult, &readBuffer, &readSize);
+		}
+		else {
+			assert(self->buffer && self->buffer->buf);
+
+			/* Only support contiguous C arrays for now */
+			assert(self->buffer->strides == NULL && self->buffer->suboffsets == NULL);
+			assert(self->buffer->itemsize == 1);
+
+			/* TODO avoid memcpy() below */
+			readBuffer = (char *)self->buffer->buf + self->bufferOffset;
+			bufferRemaining = self->buffer->len - self->bufferOffset;
+			readSize = min(bufferRemaining, (Py_ssize_t)self->inSize);
+			self->bufferOffset += readSize;
+		}
+
+		if (readSize) {
+			if (!self->readCount && self->skipBytes) {
+				assert(self->skipBytes < self->inSize);
+				if ((Py_ssize_t)self->skipBytes >= readSize) {
+					PyErr_SetString(PyExc_ValueError,
+						"skip_bytes larger than first input chunk; "
+						"this scenario is currently unsupported");
+					Py_DecRef(readResult);
+					return NULL;
+				}
+
+				readBuffer = readBuffer + self->skipBytes;
+				readSize -= self->skipBytes;
+			}
+
+			/* Copy input into previously allocated buffer because it can live longer
+			than a single function call and we don't want to keep a ref to a Python
+			object around. This could be changed... */
+			memcpy((void*)self->input.src, readBuffer, readSize);
+			self->input.size = readSize;
+			self->input.pos = 0;
+		}
+		/* No bytes on first read must mean an empty input stream. */
+		else if (!self->readCount) {
+			self->finishedInput = 1;
+			self->finishedOutput = 1;
+			Py_DecRef(readResult);
+			PyErr_SetString(PyExc_StopIteration, "empty input");
+			return NULL;
+		}
+		else {
+			self->finishedInput = 1;
+		}
+
+		/* We've copied the data managed by memory. Discard the Python object. */
+		Py_DecRef(readResult);
+	}
+
+	result = read_decompressor_iterator(self);
+	if (result.errored || result.chunk) {
+		return result.chunk;
+	}
+
+	/* No new output data. Try again unless we know there is no more data. */
+	if (!self->finishedInput) {
+		goto read_from_source;
+	}
+
+	PyErr_SetString(PyExc_StopIteration, "input exhausted");
+	return NULL;
+}
+
+PyTypeObject ZstdDecompressorIteratorType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdDecompressorIterator",   /* tp_name */
+	sizeof(ZstdDecompressorIterator),  /* tp_basicsize */
+	0,                                 /* tp_itemsize */
+	(destructor)ZstdDecompressorIterator_dealloc, /* tp_dealloc */
+	0,                                 /* tp_print */
+	0,                                 /* tp_getattr */
+	0,                                 /* tp_setattr */
+	0,                                 /* tp_compare */
+	0,                                 /* tp_repr */
+	0,                                 /* tp_as_number */
+	0,                                 /* tp_as_sequence */
+	0,                                 /* tp_as_mapping */
+	0,                                 /* tp_hash */
+	0,                                 /* tp_call */
+	0,                                 /* tp_str */
+	0,                                 /* tp_getattro */
+	0,                                 /* tp_setattro */
+	0,                                 /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdDecompressorIterator__doc__,   /* tp_doc */
+	0,                                 /* tp_traverse */
+	0,                                 /* tp_clear */
+	0,                                 /* tp_richcompare */
+	0,                                 /* tp_weaklistoffset */
+	ZstdDecompressorIterator_iter,     /* tp_iter */
+	(iternextfunc)ZstdDecompressorIterator_iternext, /* tp_iternext */
+	0,                                 /* tp_methods */
+	0,                                 /* tp_members */
+	0,                                 /* tp_getset */
+	0,                                 /* tp_base */
+	0,                                 /* tp_dict */
+	0,                                 /* tp_descr_get */
+	0,                                 /* tp_descr_set */
+	0,                                 /* tp_dictoffset */
+	0,                                 /* tp_init */
+	0,                                 /* tp_alloc */
+	PyType_GenericNew,                 /* tp_new */
+};
+
+void decompressoriterator_module_init(PyObject* mod) {
+	Py_TYPE(&ZstdDecompressorIteratorType) = &PyType_Type;
+	if (PyType_Ready(&ZstdDecompressorIteratorType) < 0) {
+		return;
+	}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/dictparams.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,125 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+PyDoc_STRVAR(DictParameters__doc__,
+"DictParameters: low-level control over dictionary generation");
+
+static PyObject* DictParameters_new(PyTypeObject* subtype, PyObject* args, PyObject* kwargs) {
+	DictParametersObject* self;
+	unsigned selectivityLevel;
+	int compressionLevel;
+	unsigned notificationLevel;
+	unsigned dictID;
+
+	if (!PyArg_ParseTuple(args, "IiII", &selectivityLevel, &compressionLevel,
+		&notificationLevel, &dictID)) {
+		return NULL;
+	}
+
+	self = (DictParametersObject*)subtype->tp_alloc(subtype, 1);
+	if (!self) {
+		return NULL;
+	}
+
+	self->selectivityLevel = selectivityLevel;
+	self->compressionLevel = compressionLevel;
+	self->notificationLevel = notificationLevel;
+	self->dictID = dictID;
+
+	return (PyObject*)self;
+}
+
+static void DictParameters_dealloc(PyObject* self) {
+	PyObject_Del(self);
+}
+
+static Py_ssize_t DictParameters_length(PyObject* self) {
+	return 4;
+}
+
+static PyObject* DictParameters_item(PyObject* o, Py_ssize_t i) {
+	DictParametersObject* self = (DictParametersObject*)o;
+
+	switch (i) {
+	case 0:
+		return PyLong_FromLong(self->selectivityLevel);
+	case 1:
+		return PyLong_FromLong(self->compressionLevel);
+	case 2:
+		return PyLong_FromLong(self->notificationLevel);
+	case 3:
+		return PyLong_FromLong(self->dictID);
+	default:
+		PyErr_SetString(PyExc_IndexError, "index out of range");
+		return NULL;
+	}
+}
+
+static PySequenceMethods DictParameters_sq = {
+	DictParameters_length, /* sq_length */
+	0,	                   /* sq_concat */
+	0,                     /* sq_repeat */
+	DictParameters_item,   /* sq_item */
+	0,                     /* sq_ass_item */
+	0,                     /* sq_contains */
+	0,                     /* sq_inplace_concat */
+	0                      /* sq_inplace_repeat */
+};
+
+PyTypeObject DictParametersType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"DictParameters", /* tp_name */
+	sizeof(DictParametersObject), /* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)DictParameters_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	&DictParameters_sq,        /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash  */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	DictParameters__doc__,     /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	0,                         /* tp_methods */
+	0,                         /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	0,                         /* tp_init */
+	0,                         /* tp_alloc */
+	DictParameters_new,        /* tp_new */
+};
+
+void dictparams_module_init(PyObject* mod) {
+	Py_TYPE(&DictParametersType) = &PyType_Type;
+	if (PyType_Ready(&DictParametersType) < 0) {
+		return;
+	}
+
+	Py_IncRef((PyObject*)&DictParametersType);
+	PyModule_AddObject(mod, "DictParameters", (PyObject*)&DictParametersType);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,178 @@
+/**
+* Copyright (c) 2016-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+#define ZSTD_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#include "mem.h"
+#include "zstd.h"
+#include "zdict.h"
+
+#define PYTHON_ZSTANDARD_VERSION "0.6.0"
+
+typedef enum {
+	compressorobj_flush_finish,
+	compressorobj_flush_block,
+} CompressorObj_Flush;
+
+typedef struct {
+	PyObject_HEAD
+	unsigned windowLog;
+	unsigned chainLog;
+	unsigned hashLog;
+	unsigned searchLog;
+	unsigned searchLength;
+	unsigned targetLength;
+	ZSTD_strategy strategy;
+} CompressionParametersObject;
+
+extern PyTypeObject CompressionParametersType;
+
+typedef struct {
+	PyObject_HEAD
+	unsigned selectivityLevel;
+	int compressionLevel;
+	unsigned notificationLevel;
+	unsigned dictID;
+} DictParametersObject;
+
+extern PyTypeObject DictParametersType;
+
+typedef struct {
+	PyObject_HEAD
+
+	void* dictData;
+	size_t dictSize;
+} ZstdCompressionDict;
+
+extern PyTypeObject ZstdCompressionDictType;
+
+typedef struct {
+	PyObject_HEAD
+
+	int compressionLevel;
+	ZstdCompressionDict* dict;
+	ZSTD_CCtx* cctx;
+	ZSTD_CDict* cdict;
+	CompressionParametersObject* cparams;
+	ZSTD_frameParameters fparams;
+} ZstdCompressor;
+
+extern PyTypeObject ZstdCompressorType;
+
+typedef struct {
+	PyObject_HEAD
+
+	ZstdCompressor* compressor;
+	ZSTD_CStream* cstream;
+	ZSTD_outBuffer output;
+	int finished;
+} ZstdCompressionObj;
+
+extern PyTypeObject ZstdCompressionObjType;
+
+typedef struct {
+	PyObject_HEAD
+
+	ZstdCompressor* compressor;
+	PyObject* writer;
+	Py_ssize_t sourceSize;
+	size_t outSize;
+	ZSTD_CStream* cstream;
+	int entered;
+} ZstdCompressionWriter;
+
+extern PyTypeObject ZstdCompressionWriterType;
+
+typedef struct {
+	PyObject_HEAD
+
+	ZstdCompressor* compressor;
+	PyObject* reader;
+	Py_buffer* buffer;
+	Py_ssize_t bufferOffset;
+	Py_ssize_t sourceSize;
+	size_t inSize;
+	size_t outSize;
+
+	ZSTD_CStream* cstream;
+	ZSTD_inBuffer input;
+	ZSTD_outBuffer output;
+	int finishedOutput;
+	int finishedInput;
+	PyObject* readResult;
+} ZstdCompressorIterator;
+
+extern PyTypeObject ZstdCompressorIteratorType;
+
+typedef struct {
+	PyObject_HEAD
+
+	ZSTD_DCtx* refdctx;
+
+	ZstdCompressionDict* dict;
+	ZSTD_DDict* ddict;
+} ZstdDecompressor;
+
+extern PyTypeObject ZstdDecompressorType;
+
+typedef struct {
+	PyObject_HEAD
+
+	ZstdDecompressor* decompressor;
+	ZSTD_DStream* dstream;
+	int finished;
+} ZstdDecompressionObj;
+
+extern PyTypeObject ZstdDecompressionObjType;
+
+typedef struct {
+	PyObject_HEAD
+
+	ZstdDecompressor* decompressor;
+	PyObject* writer;
+	size_t outSize;
+	ZSTD_DStream* dstream;
+	int entered;
+} ZstdDecompressionWriter;
+
+extern PyTypeObject ZstdDecompressionWriterType;
+
+typedef struct {
+	PyObject_HEAD
+
+	ZstdDecompressor* decompressor;
+	PyObject* reader;
+	Py_buffer* buffer;
+	Py_ssize_t bufferOffset;
+	size_t inSize;
+	size_t outSize;
+	size_t skipBytes;
+	ZSTD_DStream* dstream;
+	ZSTD_inBuffer input;
+	ZSTD_outBuffer output;
+	Py_ssize_t readCount;
+	int finishedInput;
+	int finishedOutput;
+} ZstdDecompressorIterator;
+
+extern PyTypeObject ZstdDecompressorIteratorType;
+
+typedef struct {
+	int errored;
+	PyObject* chunk;
+} DecompressorIteratorResult;
+
+void ztopy_compression_parameters(CompressionParametersObject* params, ZSTD_compressionParameters* zparams);
+CompressionParametersObject* get_compression_parameters(PyObject* self, PyObject* args);
+PyObject* estimate_compression_context_size(PyObject* self, PyObject* args);
+ZSTD_CStream* CStream_from_ZstdCompressor(ZstdCompressor* compressor, Py_ssize_t sourceSize);
+ZSTD_DStream* DStream_from_ZstdDecompressor(ZstdDecompressor* decompressor);
+ZstdCompressionDict* train_dictionary(PyObject* self, PyObject* args, PyObject* kwargs);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/make_cffi.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,108 @@
+# Copyright (c) 2016-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+from __future__ import absolute_import
+
+import cffi
+import distutils.ccompiler
+import os
+import subprocess
+import tempfile
+
+
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+SOURCES = ['zstd/%s' % p for p in (
+    'common/entropy_common.c',
+    'common/error_private.c',
+    'common/fse_decompress.c',
+    'common/xxhash.c',
+    'common/zstd_common.c',
+    'compress/fse_compress.c',
+    'compress/huf_compress.c',
+    'compress/zstd_compress.c',
+    'decompress/huf_decompress.c',
+    'decompress/zstd_decompress.c',
+    'dictBuilder/divsufsort.c',
+    'dictBuilder/zdict.c',
+)]
+
+INCLUDE_DIRS = [os.path.join(HERE, d) for d in (
+    'zstd',
+    'zstd/common',
+    'zstd/compress',
+    'zstd/decompress',
+    'zstd/dictBuilder',
+)]
+
+# cffi can't parse some of the primitives in zstd.h. So we invoke the
+# preprocessor and feed its output into cffi.
+compiler = distutils.ccompiler.new_compiler()
+
+# Needed for MSVC.
+if hasattr(compiler, 'initialize'):
+    compiler.initialize()
+
+# Distutils doesn't set compiler.preprocessor, so invoke the preprocessor
+# manually.
+if compiler.compiler_type == 'unix':
+    args = list(compiler.executables['compiler'])
+    args.extend([
+        '-E',
+        '-DZSTD_STATIC_LINKING_ONLY',
+    ])
+elif compiler.compiler_type == 'msvc':
+    args = [compiler.cc]
+    args.extend([
+        '/EP',
+        '/DZSTD_STATIC_LINKING_ONLY',
+    ])
+else:
+    raise Exception('unsupported compiler type: %s' % compiler.compiler_type)
+
+# zstd.h includes <stddef.h>, which is also included by cffi's boilerplate.
+# This can lead to duplicate declarations. So we strip this include from the
+# preprocessor invocation.
+
+with open(os.path.join(HERE, 'zstd', 'zstd.h'), 'rb') as fh:
+    lines = [l for l in fh if not l.startswith(b'#include <stddef.h>')]
+
+fd, input_file = tempfile.mkstemp(suffix='.h')
+os.write(fd, b''.join(lines))
+os.close(fd)
+
+args.append(input_file)
+
+try:
+    process = subprocess.Popen(args, stdout=subprocess.PIPE)
+    output = process.communicate()[0]
+    ret = process.poll()
+    if ret:
+        raise Exception('preprocessor exited with error')
+finally:
+    os.unlink(input_file)
+
+def normalize_output():
+    lines = []
+    for line in output.splitlines():
+        # CFFI's parser doesn't like __attribute__ on UNIX compilers.
+        if line.startswith(b'__attribute__ ((visibility ("default"))) '):
+            line = line[len(b'__attribute__ ((visibility ("default"))) '):]
+
+        lines.append(line)
+
+    return b'\n'.join(lines)
+
+ffi = cffi.FFI()
+ffi.set_source('_zstd_cffi', '''
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+''', sources=SOURCES, include_dirs=INCLUDE_DIRS)
+
+ffi.cdef(normalize_output().decode('latin1'))
+
+if __name__ == '__main__':
+    ffi.compile()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/setup.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# Copyright (c) 2016-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import sys
+from setuptools import setup
+
+try:
+    import cffi
+except ImportError:
+    cffi = None
+
+import setup_zstd
+
+SUPPORT_LEGACY = False
+
+if "--legacy" in sys.argv:
+    SUPPORT_LEGACY = True
+    sys.argv.remove("--legacy")
+
+# Code for obtaining the Extension instance is in its own module to
+# facilitate reuse in other projects.
+extensions = [setup_zstd.get_c_extension(SUPPORT_LEGACY, 'zstd')]
+
+if cffi:
+    import make_cffi
+    extensions.append(make_cffi.ffi.distutils_extension())
+
+version = None
+
+with open('c-ext/python-zstandard.h', 'r') as fh:
+    for line in fh:
+        if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'):
+            continue
+
+        version = line.split()[2][1:-1]
+        break
+
+if not version:
+    raise Exception('could not resolve package version; '
+                    'this should never happen')
+
+setup(
+    name='zstandard',
+    version=version,
+    description='Zstandard bindings for Python',
+    long_description=open('README.rst', 'r').read(),
+    url='https://github.com/indygreg/python-zstandard',
+    author='Gregory Szorc',
+    author_email='gregory.szorc@gmail.com',
+    license='BSD',
+    classifiers=[
+        'Development Status :: 4 - Beta',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: BSD License',
+        'Programming Language :: C',
+        'Programming Language :: Python :: 2.6',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3.3',
+        'Programming Language :: Python :: 3.4',
+        'Programming Language :: Python :: 3.5',
+    ],
+    keywords='zstandard zstd compression',
+    ext_modules=extensions,
+    test_suite='tests',
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/setup_zstd.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,91 @@
+# Copyright (c) 2016-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+import os
+from distutils.extension import Extension
+
+
+zstd_sources = ['zstd/%s' % p for p in (
+    'common/entropy_common.c',
+    'common/error_private.c',
+    'common/fse_decompress.c',
+    'common/xxhash.c',
+    'common/zstd_common.c',
+    'compress/fse_compress.c',
+    'compress/huf_compress.c',
+    'compress/zstd_compress.c',
+    'decompress/huf_decompress.c',
+    'decompress/zstd_decompress.c',
+    'dictBuilder/divsufsort.c',
+    'dictBuilder/zdict.c',
+)]
+
+zstd_sources_legacy = ['zstd/%s' % p for p in (
+    'deprecated/zbuff_compress.c',
+    'deprecated/zbuff_decompress.c',
+    'legacy/zstd_v01.c',
+    'legacy/zstd_v02.c',
+    'legacy/zstd_v03.c',
+    'legacy/zstd_v04.c',
+    'legacy/zstd_v05.c',
+    'legacy/zstd_v06.c',
+    'legacy/zstd_v07.c'
+)]
+
+zstd_includes = [
+    'c-ext',
+    'zstd',
+    'zstd/common',
+    'zstd/compress',
+    'zstd/decompress',
+    'zstd/dictBuilder',
+]
+
+zstd_includes_legacy = [
+    'zstd/deprecated',
+    'zstd/legacy',
+]
+
+ext_sources = [
+    'zstd.c',
+    'c-ext/compressiondict.c',
+    'c-ext/compressobj.c',
+    'c-ext/compressor.c',
+    'c-ext/compressoriterator.c',
+    'c-ext/compressionparams.c',
+    'c-ext/compressionwriter.c',
+    'c-ext/constants.c',
+    'c-ext/decompressobj.c',
+    'c-ext/decompressor.c',
+    'c-ext/decompressoriterator.c',
+    'c-ext/decompressionwriter.c',
+    'c-ext/dictparams.c',
+]
+
+zstd_depends = [
+    'c-ext/python-zstandard.h',
+]
+
+
+def get_c_extension(support_legacy=False, name='zstd'):
+    """Obtain a distutils.extension.Extension for the C extension."""
+    root = os.path.abspath(os.path.dirname(__file__))
+
+    sources = [os.path.join(root, p) for p in zstd_sources + ext_sources]
+    if support_legacy:
+        sources.extend([os.path.join(root, p) for p in zstd_sources_legacy])
+
+    include_dirs = [os.path.join(root, d) for d in zstd_includes]
+    if support_legacy:
+        include_dirs.extend([os.path.join(root, d) for d in zstd_includes_legacy])
+
+    depends = [os.path.join(root, p) for p in zstd_depends]
+
+    # TODO compile with optimizations.
+    return Extension(name, sources,
+                     include_dirs=include_dirs,
+                     depends=depends,
+                     extra_compile_args=["-DZSTD_LEGACY_SUPPORT=1"] if support_legacy else [])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/common.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,15 @@
+import io
+
+class OpCountingBytesIO(io.BytesIO):
+    def __init__(self, *args, **kwargs):
+        self._read_count = 0
+        self._write_count = 0
+        return super(OpCountingBytesIO, self).__init__(*args, **kwargs)
+
+    def read(self, *args):
+        self._read_count += 1
+        return super(OpCountingBytesIO, self).read(*args)
+
+    def write(self, data):
+        self._write_count += 1
+        return super(OpCountingBytesIO, self).write(data)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_cffi.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,35 @@
+import io
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+import zstd
+
+try:
+    import zstd_cffi
+except ImportError:
+    raise unittest.SkipTest('cffi version of zstd not available')
+
+
+class TestCFFIWriteToToCDecompressor(unittest.TestCase):
+    def test_simple(self):
+        orig = io.BytesIO()
+        orig.write(b'foo')
+        orig.write(b'bar')
+        orig.write(b'foobar' * 16384)
+
+        dest = io.BytesIO()
+        cctx = zstd_cffi.ZstdCompressor()
+        with cctx.write_to(dest) as compressor:
+            compressor.write(orig.getvalue())
+
+        uncompressed = io.BytesIO()
+        dctx = zstd.ZstdDecompressor()
+        with dctx.write_to(uncompressed) as decompressor:
+            decompressor.write(dest.getvalue())
+
+        self.assertEqual(uncompressed.getvalue(), orig.getvalue())
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_compressor.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,536 @@
+import hashlib
+import io
+import struct
+import sys
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+import zstd
+
+from .common import OpCountingBytesIO
+
+
+if sys.version_info[0] >= 3:
+    next = lambda it: it.__next__()
+else:
+    next = lambda it: it.next()
+
+
+class TestCompressor(unittest.TestCase):
+    def test_level_bounds(self):
+        with self.assertRaises(ValueError):
+            zstd.ZstdCompressor(level=0)
+
+        with self.assertRaises(ValueError):
+            zstd.ZstdCompressor(level=23)
+
+
+class TestCompressor_compress(unittest.TestCase):
+    def test_compress_empty(self):
+        cctx = zstd.ZstdCompressor(level=1)
+        cctx.compress(b'')
+
+        cctx = zstd.ZstdCompressor(level=22)
+        cctx.compress(b'')
+
+    def test_compress_empty(self):
+        cctx = zstd.ZstdCompressor(level=1)
+        self.assertEqual(cctx.compress(b''),
+                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+
+        # TODO should be temporary until https://github.com/facebook/zstd/issues/506
+        # is fixed.
+        cctx = zstd.ZstdCompressor(write_content_size=True)
+        with self.assertRaises(ValueError):
+            cctx.compress(b'')
+
+        cctx.compress(b'', allow_empty=True)
+
+    def test_compress_large(self):
+        chunks = []
+        for i in range(255):
+            chunks.append(struct.Struct('>B').pack(i) * 16384)
+
+        cctx = zstd.ZstdCompressor(level=3)
+        result = cctx.compress(b''.join(chunks))
+        self.assertEqual(len(result), 999)
+        self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd')
+
+    def test_write_checksum(self):
+        cctx = zstd.ZstdCompressor(level=1)
+        no_checksum = cctx.compress(b'foobar')
+        cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
+        with_checksum = cctx.compress(b'foobar')
+
+        self.assertEqual(len(with_checksum), len(no_checksum) + 4)
+
+    def test_write_content_size(self):
+        cctx = zstd.ZstdCompressor(level=1)
+        no_size = cctx.compress(b'foobar' * 256)
+        cctx = zstd.ZstdCompressor(level=1, write_content_size=True)
+        with_size = cctx.compress(b'foobar' * 256)
+
+        self.assertEqual(len(with_size), len(no_size) + 1)
+
+    def test_no_dict_id(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_dictionary(1024, samples)
+
+        cctx = zstd.ZstdCompressor(level=1, dict_data=d)
+        with_dict_id = cctx.compress(b'foobarfoobar')
+
+        cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False)
+        no_dict_id = cctx.compress(b'foobarfoobar')
+
+        self.assertEqual(len(with_dict_id), len(no_dict_id) + 4)
+
+    def test_compress_dict_multiple(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_dictionary(8192, samples)
+
+        cctx = zstd.ZstdCompressor(level=1, dict_data=d)
+
+        for i in range(32):
+            cctx.compress(b'foo bar foobar foo bar foobar')
+
+
+class TestCompressor_compressobj(unittest.TestCase):
+    def test_compressobj_empty(self):
+        cctx = zstd.ZstdCompressor(level=1)
+        cobj = cctx.compressobj()
+        self.assertEqual(cobj.compress(b''), b'')
+        self.assertEqual(cobj.flush(),
+                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+
+    def test_compressobj_large(self):
+        chunks = []
+        for i in range(255):
+            chunks.append(struct.Struct('>B').pack(i) * 16384)
+
+        cctx = zstd.ZstdCompressor(level=3)
+        cobj = cctx.compressobj()
+
+        result = cobj.compress(b''.join(chunks)) + cobj.flush()
+        self.assertEqual(len(result), 999)
+        self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd')
+
+    def test_write_checksum(self):
+        cctx = zstd.ZstdCompressor(level=1)
+        cobj = cctx.compressobj()
+        no_checksum = cobj.compress(b'foobar') + cobj.flush()
+        cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
+        cobj = cctx.compressobj()
+        with_checksum = cobj.compress(b'foobar') + cobj.flush()
+
+        self.assertEqual(len(with_checksum), len(no_checksum) + 4)
+
+    def test_write_content_size(self):
+        cctx = zstd.ZstdCompressor(level=1)
+        cobj = cctx.compressobj(size=len(b'foobar' * 256))
+        no_size = cobj.compress(b'foobar' * 256) + cobj.flush()
+        cctx = zstd.ZstdCompressor(level=1, write_content_size=True)
+        cobj = cctx.compressobj(size=len(b'foobar' * 256))
+        with_size = cobj.compress(b'foobar' * 256) + cobj.flush()
+
+        self.assertEqual(len(with_size), len(no_size) + 1)
+
+    def test_compress_after_finished(self):
+        cctx = zstd.ZstdCompressor()
+        cobj = cctx.compressobj()
+
+        cobj.compress(b'foo')
+        cobj.flush()
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'cannot call compress\(\) after compressor'):
+            cobj.compress(b'foo')
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'compressor object already finished'):
+            cobj.flush()
+
+    def test_flush_block_repeated(self):
+        cctx = zstd.ZstdCompressor(level=1)
+        cobj = cctx.compressobj()
+
+        self.assertEqual(cobj.compress(b'foo'), b'')
+        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK),
+                         b'\x28\xb5\x2f\xfd\x00\x48\x18\x00\x00foo')
+        self.assertEqual(cobj.compress(b'bar'), b'')
+        # 3 byte header plus content.
+        self.assertEqual(cobj.flush(), b'\x19\x00\x00bar')
+
+    def test_flush_empty_block(self):
+        cctx = zstd.ZstdCompressor(write_checksum=True)
+        cobj = cctx.compressobj()
+
+        cobj.compress(b'foobar')
+        cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK)
+        # No-op if no block is active (this is internal to zstd).
+        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b'')
+
+        trailing = cobj.flush()
+        # 3 bytes block header + 4 bytes frame checksum
+        self.assertEqual(len(trailing), 7)
+        header = trailing[0:3]
+        self.assertEqual(header, b'\x01\x00\x00')
+
+
+class TestCompressor_copy_stream(unittest.TestCase):
+    def test_no_read(self):
+        source = object()
+        dest = io.BytesIO()
+
+        cctx = zstd.ZstdCompressor()
+        with self.assertRaises(ValueError):
+            cctx.copy_stream(source, dest)
+
+    def test_no_write(self):
+        source = io.BytesIO()
+        dest = object()
+
+        cctx = zstd.ZstdCompressor()
+        with self.assertRaises(ValueError):
+            cctx.copy_stream(source, dest)
+
+    def test_empty(self):
+        source = io.BytesIO()
+        dest = io.BytesIO()
+
+        cctx = zstd.ZstdCompressor(level=1)
+        r, w = cctx.copy_stream(source, dest)
+        self.assertEqual(int(r), 0)
+        self.assertEqual(w, 9)
+
+        self.assertEqual(dest.getvalue(),
+                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+
+    def test_large_data(self):
+        source = io.BytesIO()
+        for i in range(255):
+            source.write(struct.Struct('>B').pack(i) * 16384)
+        source.seek(0)
+
+        dest = io.BytesIO()
+        cctx = zstd.ZstdCompressor()
+        r, w = cctx.copy_stream(source, dest)
+
+        self.assertEqual(r, 255 * 16384)
+        self.assertEqual(w, 999)
+
+    def test_write_checksum(self):
+        source = io.BytesIO(b'foobar')
+        no_checksum = io.BytesIO()
+
+        cctx = zstd.ZstdCompressor(level=1)
+        cctx.copy_stream(source, no_checksum)
+
+        source.seek(0)
+        with_checksum = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
+        cctx.copy_stream(source, with_checksum)
+
+        self.assertEqual(len(with_checksum.getvalue()),
+                         len(no_checksum.getvalue()) + 4)
+
+    def test_write_content_size(self):
+        source = io.BytesIO(b'foobar' * 256)
+        no_size = io.BytesIO()
+
+        cctx = zstd.ZstdCompressor(level=1)
+        cctx.copy_stream(source, no_size)
+
+        source.seek(0)
+        with_size = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=1, write_content_size=True)
+        cctx.copy_stream(source, with_size)
+
+        # Source content size is unknown, so no content size written.
+        self.assertEqual(len(with_size.getvalue()),
+                         len(no_size.getvalue()))
+
+        source.seek(0)
+        with_size = io.BytesIO()
+        cctx.copy_stream(source, with_size, size=len(source.getvalue()))
+
+        # We specified source size, so content size header is present.
+        self.assertEqual(len(with_size.getvalue()),
+                         len(no_size.getvalue()) + 1)
+
+    def test_read_write_size(self):
+        source = OpCountingBytesIO(b'foobarfoobar')
+        dest = OpCountingBytesIO()
+        cctx = zstd.ZstdCompressor()
+        r, w = cctx.copy_stream(source, dest, read_size=1, write_size=1)
+
+        self.assertEqual(r, len(source.getvalue()))
+        self.assertEqual(w, 21)
+        self.assertEqual(source._read_count, len(source.getvalue()) + 1)
+        self.assertEqual(dest._write_count, len(dest.getvalue()))
+
+
+def compress(data, level):
+    buffer = io.BytesIO()
+    cctx = zstd.ZstdCompressor(level=level)
+    with cctx.write_to(buffer) as compressor:
+        compressor.write(data)
+    return buffer.getvalue()
+
+
+class TestCompressor_write_to(unittest.TestCase):
+    def test_empty(self):
+        self.assertEqual(compress(b'', 1),
+                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+
+    def test_multiple_compress(self):
+        buffer = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=5)
+        with cctx.write_to(buffer) as compressor:
+            compressor.write(b'foo')
+            compressor.write(b'bar')
+            compressor.write(b'x' * 8192)
+
+        result = buffer.getvalue()
+        self.assertEqual(result,
+                         b'\x28\xb5\x2f\xfd\x00\x50\x75\x00\x00\x38\x66\x6f'
+                         b'\x6f\x62\x61\x72\x78\x01\x00\xfc\xdf\x03\x23')
+
+    def test_dictionary(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_dictionary(8192, samples)
+
+        buffer = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=9, dict_data=d)
+        with cctx.write_to(buffer) as compressor:
+            compressor.write(b'foo')
+            compressor.write(b'bar')
+            compressor.write(b'foo' * 16384)
+
+        compressed = buffer.getvalue()
+        h = hashlib.sha1(compressed).hexdigest()
+        self.assertEqual(h, '1c5bcd25181bcd8c1a73ea8773323e0056129f92')
+
+    def test_compression_params(self):
+        params = zstd.CompressionParameters(20, 6, 12, 5, 4, 10, zstd.STRATEGY_FAST)
+
+        buffer = io.BytesIO()
+        cctx = zstd.ZstdCompressor(compression_params=params)
+        with cctx.write_to(buffer) as compressor:
+            compressor.write(b'foo')
+            compressor.write(b'bar')
+            compressor.write(b'foobar' * 16384)
+
+        compressed = buffer.getvalue()
+        h = hashlib.sha1(compressed).hexdigest()
+        self.assertEqual(h, '1ae31f270ed7de14235221a604b31ecd517ebd99')
+
+    def test_write_checksum(self):
+        no_checksum = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=1)
+        with cctx.write_to(no_checksum) as compressor:
+            compressor.write(b'foobar')
+
+        with_checksum = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
+        with cctx.write_to(with_checksum) as compressor:
+            compressor.write(b'foobar')
+
+        self.assertEqual(len(with_checksum.getvalue()),
+                         len(no_checksum.getvalue()) + 4)
+
+    def test_write_content_size(self):
+        no_size = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=1)
+        with cctx.write_to(no_size) as compressor:
+            compressor.write(b'foobar' * 256)
+
+        with_size = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=1, write_content_size=True)
+        with cctx.write_to(with_size) as compressor:
+            compressor.write(b'foobar' * 256)
+
+        # Source size is not known in streaming mode, so header not
+        # written.
+        self.assertEqual(len(with_size.getvalue()),
+                         len(no_size.getvalue()))
+
+        # Declaring size will write the header.
+        with_size = io.BytesIO()
+        with cctx.write_to(with_size, size=len(b'foobar' * 256)) as compressor:
+            compressor.write(b'foobar' * 256)
+
+        self.assertEqual(len(with_size.getvalue()),
+                         len(no_size.getvalue()) + 1)
+
+    def test_no_dict_id(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_dictionary(1024, samples)
+
+        with_dict_id = io.BytesIO()
+        cctx = zstd.ZstdCompressor(level=1, dict_data=d)
+        with cctx.write_to(with_dict_id) as compressor:
+            compressor.write(b'foobarfoobar')
+
+        cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False)
+        no_dict_id = io.BytesIO()
+        with cctx.write_to(no_dict_id) as compressor:
+            compressor.write(b'foobarfoobar')
+
+        self.assertEqual(len(with_dict_id.getvalue()),
+                         len(no_dict_id.getvalue()) + 4)
+
+    def test_memory_size(self):
+        cctx = zstd.ZstdCompressor(level=3)
+        buffer = io.BytesIO()
+        with cctx.write_to(buffer) as compressor:
+            size = compressor.memory_size()
+
+        self.assertGreater(size, 100000)
+
+    def test_write_size(self):
+        cctx = zstd.ZstdCompressor(level=3)
+        dest = OpCountingBytesIO()
+        with cctx.write_to(dest, write_size=1) as compressor:
+            compressor.write(b'foo')
+            compressor.write(b'bar')
+            compressor.write(b'foobar')
+
+        self.assertEqual(len(dest.getvalue()), dest._write_count)
+
+    def test_flush_repeated(self):
+        cctx = zstd.ZstdCompressor(level=3)
+        dest = OpCountingBytesIO()
+        with cctx.write_to(dest) as compressor:
+            compressor.write(b'foo')
+            self.assertEqual(dest._write_count, 0)
+            compressor.flush()
+            self.assertEqual(dest._write_count, 1)
+            compressor.write(b'bar')
+            self.assertEqual(dest._write_count, 1)
+            compressor.flush()
+            self.assertEqual(dest._write_count, 2)
+            compressor.write(b'baz')
+
+        self.assertEqual(dest._write_count, 3)
+
+    def test_flush_empty_block(self):
+        cctx = zstd.ZstdCompressor(level=3, write_checksum=True)
+        dest = OpCountingBytesIO()
+        with cctx.write_to(dest) as compressor:
+            compressor.write(b'foobar' * 8192)
+            count = dest._write_count
+            offset = dest.tell()
+            compressor.flush()
+            self.assertGreater(dest._write_count, count)
+            self.assertGreater(dest.tell(), offset)
+            offset = dest.tell()
+            # Ending the write here should cause an empty block to be written
+            # to denote end of frame.
+
+        trailing = dest.getvalue()[offset:]
+        # 3 bytes block header + 4 bytes frame checksum
+        self.assertEqual(len(trailing), 7)
+
+        header = trailing[0:3]
+        self.assertEqual(header, b'\x01\x00\x00')
+
+
+class TestCompressor_read_from(unittest.TestCase):
+    def test_type_validation(self):
+        cctx = zstd.ZstdCompressor()
+
+        # Object with read() works.
+        cctx.read_from(io.BytesIO())
+
+        # Buffer protocol works.
+        cctx.read_from(b'foobar')
+
+        with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'):
+            cctx.read_from(True)
+
+    def test_read_empty(self):
+        cctx = zstd.ZstdCompressor(level=1)
+
+        source = io.BytesIO()
+        it = cctx.read_from(source)
+        chunks = list(it)
+        self.assertEqual(len(chunks), 1)
+        compressed = b''.join(chunks)
+        self.assertEqual(compressed, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+
+        # And again with the buffer protocol.
+        it = cctx.read_from(b'')
+        chunks = list(it)
+        self.assertEqual(len(chunks), 1)
+        compressed2 = b''.join(chunks)
+        self.assertEqual(compressed2, compressed)
+
+    def test_read_large(self):
+        cctx = zstd.ZstdCompressor(level=1)
+
+        source = io.BytesIO()
+        source.write(b'f' * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE)
+        source.write(b'o')
+        source.seek(0)
+
+        # Creating an iterator should not perform any compression until
+        # first read.
+        it = cctx.read_from(source, size=len(source.getvalue()))
+        self.assertEqual(source.tell(), 0)
+
+        # We should have exactly 2 output chunks.
+        chunks = []
+        chunk = next(it)
+        self.assertIsNotNone(chunk)
+        self.assertEqual(source.tell(), zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE)
+        chunks.append(chunk)
+        chunk = next(it)
+        self.assertIsNotNone(chunk)
+        chunks.append(chunk)
+
+        self.assertEqual(source.tell(), len(source.getvalue()))
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        # And again for good measure.
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        # We should get the same output as the one-shot compression mechanism.
+        self.assertEqual(b''.join(chunks), cctx.compress(source.getvalue()))
+
+        # Now check the buffer protocol.
+        it = cctx.read_from(source.getvalue())
+        chunks = list(it)
+        self.assertEqual(len(chunks), 2)
+        self.assertEqual(b''.join(chunks), cctx.compress(source.getvalue()))
+
+    def test_read_write_size(self):
+        source = OpCountingBytesIO(b'foobarfoobar')
+        cctx = zstd.ZstdCompressor(level=3)
+        for chunk in cctx.read_from(source, read_size=1, write_size=1):
+            self.assertEqual(len(chunk), 1)
+
+        self.assertEqual(source._read_count, len(source.getvalue()) + 1)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,107 @@
+import io
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+try:
+    import hypothesis
+    import hypothesis.strategies as strategies
+except ImportError:
+    hypothesis = None
+
+import zstd
+
+class TestCompressionParameters(unittest.TestCase):
+    def test_init_bad_arg_type(self):
+        with self.assertRaises(TypeError):
+            zstd.CompressionParameters()
+
+        with self.assertRaises(TypeError):
+            zstd.CompressionParameters(0, 1)
+
+    def test_bounds(self):
+        zstd.CompressionParameters(zstd.WINDOWLOG_MIN,
+                                   zstd.CHAINLOG_MIN,
+                                   zstd.HASHLOG_MIN,
+                                   zstd.SEARCHLOG_MIN,
+                                   zstd.SEARCHLENGTH_MIN,
+                                   zstd.TARGETLENGTH_MIN,
+                                   zstd.STRATEGY_FAST)
+
+        zstd.CompressionParameters(zstd.WINDOWLOG_MAX,
+                                   zstd.CHAINLOG_MAX,
+                                   zstd.HASHLOG_MAX,
+                                   zstd.SEARCHLOG_MAX,
+                                   zstd.SEARCHLENGTH_MAX,
+                                   zstd.TARGETLENGTH_MAX,
+                                   zstd.STRATEGY_BTOPT)
+
+    def test_get_compression_parameters(self):
+        p = zstd.get_compression_parameters(1)
+        self.assertIsInstance(p, zstd.CompressionParameters)
+
+        self.assertEqual(p[0], 19)
+
+if hypothesis:
+    s_windowlog = strategies.integers(min_value=zstd.WINDOWLOG_MIN,
+                                      max_value=zstd.WINDOWLOG_MAX)
+    s_chainlog = strategies.integers(min_value=zstd.CHAINLOG_MIN,
+                                     max_value=zstd.CHAINLOG_MAX)
+    s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN,
+                                    max_value=zstd.HASHLOG_MAX)
+    s_searchlog = strategies.integers(min_value=zstd.SEARCHLOG_MIN,
+                                      max_value=zstd.SEARCHLOG_MAX)
+    s_searchlength = strategies.integers(min_value=zstd.SEARCHLENGTH_MIN,
+                                         max_value=zstd.SEARCHLENGTH_MAX)
+    s_targetlength = strategies.integers(min_value=zstd.TARGETLENGTH_MIN,
+                                         max_value=zstd.TARGETLENGTH_MAX)
+    s_strategy = strategies.sampled_from((zstd.STRATEGY_FAST,
+                                          zstd.STRATEGY_DFAST,
+                                          zstd.STRATEGY_GREEDY,
+                                          zstd.STRATEGY_LAZY,
+                                          zstd.STRATEGY_LAZY2,
+                                          zstd.STRATEGY_BTLAZY2,
+                                          zstd.STRATEGY_BTOPT))
+
+    class TestCompressionParametersHypothesis(unittest.TestCase):
+        @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
+                          s_searchlength, s_targetlength, s_strategy)
+        def test_valid_init(self, windowlog, chainlog, hashlog, searchlog,
+                            searchlength, targetlength, strategy):
+            p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
+                                           searchlog, searchlength,
+                                           targetlength, strategy)
+            self.assertEqual(tuple(p),
+                             (windowlog, chainlog, hashlog, searchlog,
+                              searchlength, targetlength, strategy))
+
+            # Verify we can instantiate a compressor with the supplied values.
+            # ZSTD_checkCParams moves the goal posts on us from what's advertised
+            # in the constants. So move along with them.
+            if searchlength == zstd.SEARCHLENGTH_MIN and strategy in (zstd.STRATEGY_FAST, zstd.STRATEGY_GREEDY):
+                searchlength += 1
+                p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
+                                searchlog, searchlength,
+                                targetlength, strategy)
+            elif searchlength == zstd.SEARCHLENGTH_MAX and strategy != zstd.STRATEGY_FAST:
+                searchlength -= 1
+                p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
+                                searchlog, searchlength,
+                                targetlength, strategy)
+
+            cctx = zstd.ZstdCompressor(compression_params=p)
+            with cctx.write_to(io.BytesIO()):
+                pass
+
+        @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
+                          s_searchlength, s_targetlength, s_strategy)
+        def test_estimate_compression_context_size(self, windowlog, chainlog,
+                                                   hashlog, searchlog,
+                                                   searchlength, targetlength,
+                                                   strategy):
+            p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
+                                searchlog, searchlength,
+                                targetlength, strategy)
+            size = zstd.estimate_compression_context_size(p)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_decompressor.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,478 @@
+import io
+import random
+import struct
+import sys
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+import zstd
+
+from .common import OpCountingBytesIO
+
+
+if sys.version_info[0] >= 3:
+    next = lambda it: it.__next__()
+else:
+    next = lambda it: it.next()
+
+
+class TestDecompressor_decompress(unittest.TestCase):
+    def test_empty_input(self):
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'input data invalid'):
+            dctx.decompress(b'')
+
+    def test_invalid_input(self):
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'input data invalid'):
+            dctx.decompress(b'foobar')
+
+    def test_no_content_size_in_frame(self):
+        cctx = zstd.ZstdCompressor(write_content_size=False)
+        compressed = cctx.compress(b'foobar')
+
+        dctx = zstd.ZstdDecompressor()
+        with self.assertRaisesRegexp(zstd.ZstdError, 'input data invalid'):
+            dctx.decompress(compressed)
+
+    def test_content_size_present(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True)
+        compressed = cctx.compress(b'foobar')
+
+        dctx = zstd.ZstdDecompressor()
+        decompressed  = dctx.decompress(compressed)
+        self.assertEqual(decompressed, b'foobar')
+
+    def test_max_output_size(self):
+        cctx = zstd.ZstdCompressor(write_content_size=False)
+        source = b'foobar' * 256
+        compressed = cctx.compress(source)
+
+        dctx = zstd.ZstdDecompressor()
+        # Will fit into buffer exactly the size of input.
+        decompressed = dctx.decompress(compressed, max_output_size=len(source))
+        self.assertEqual(decompressed, source)
+
+        # Input size - 1 fails
+        with self.assertRaisesRegexp(zstd.ZstdError, 'Destination buffer is too small'):
+            dctx.decompress(compressed, max_output_size=len(source) - 1)
+
+        # Input size + 1 works
+        decompressed = dctx.decompress(compressed, max_output_size=len(source) + 1)
+        self.assertEqual(decompressed, source)
+
+        # A much larger buffer works.
+        decompressed = dctx.decompress(compressed, max_output_size=len(source) * 64)
+        self.assertEqual(decompressed, source)
+
+    def test_stupidly_large_output_buffer(self):
+        cctx = zstd.ZstdCompressor(write_content_size=False)
+        compressed = cctx.compress(b'foobar' * 256)
+        dctx = zstd.ZstdDecompressor()
+
+        # Will get OverflowError on some Python distributions that can't
+        # handle really large integers.
+        with self.assertRaises((MemoryError, OverflowError)):
+            dctx.decompress(compressed, max_output_size=2**62)
+
+    def test_dictionary(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_dictionary(8192, samples)
+
+        orig = b'foobar' * 16384
+        cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_content_size=True)
+        compressed = cctx.compress(orig)
+
+        dctx = zstd.ZstdDecompressor(dict_data=d)
+        decompressed = dctx.decompress(compressed)
+
+        self.assertEqual(decompressed, orig)
+
+    def test_dictionary_multiple(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_dictionary(8192, samples)
+
+        sources = (b'foobar' * 8192, b'foo' * 8192, b'bar' * 8192)
+        compressed = []
+        cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_content_size=True)
+        for source in sources:
+            compressed.append(cctx.compress(source))
+
+        dctx = zstd.ZstdDecompressor(dict_data=d)
+        for i in range(len(sources)):
+            decompressed = dctx.decompress(compressed[i])
+            self.assertEqual(decompressed, sources[i])
+
+
+class TestDecompressor_copy_stream(unittest.TestCase):
+    def test_no_read(self):
+        source = object()
+        dest = io.BytesIO()
+
+        dctx = zstd.ZstdDecompressor()
+        with self.assertRaises(ValueError):
+            dctx.copy_stream(source, dest)
+
+    def test_no_write(self):
+        source = io.BytesIO()
+        dest = object()
+
+        dctx = zstd.ZstdDecompressor()
+        with self.assertRaises(ValueError):
+            dctx.copy_stream(source, dest)
+
+    def test_empty(self):
+        source = io.BytesIO()
+        dest = io.BytesIO()
+
+        dctx = zstd.ZstdDecompressor()
+        # TODO should this raise an error?
+        r, w = dctx.copy_stream(source, dest)
+
+        self.assertEqual(r, 0)
+        self.assertEqual(w, 0)
+        self.assertEqual(dest.getvalue(), b'')
+
+    def test_large_data(self):
+        source = io.BytesIO()
+        for i in range(255):
+            source.write(struct.Struct('>B').pack(i) * 16384)
+        source.seek(0)
+
+        compressed = io.BytesIO()
+        cctx = zstd.ZstdCompressor()
+        cctx.copy_stream(source, compressed)
+
+        compressed.seek(0)
+        dest = io.BytesIO()
+        dctx = zstd.ZstdDecompressor()
+        r, w = dctx.copy_stream(compressed, dest)
+
+        self.assertEqual(r, len(compressed.getvalue()))
+        self.assertEqual(w, len(source.getvalue()))
+
+    def test_read_write_size(self):
+        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(
+            b'foobarfoobar'))
+
+        dest = OpCountingBytesIO()
+        dctx = zstd.ZstdDecompressor()
+        r, w = dctx.copy_stream(source, dest, read_size=1, write_size=1)
+
+        self.assertEqual(r, len(source.getvalue()))
+        self.assertEqual(w, len(b'foobarfoobar'))
+        self.assertEqual(source._read_count, len(source.getvalue()) + 1)
+        self.assertEqual(dest._write_count, len(dest.getvalue()))
+
+
+class TestDecompressor_decompressobj(unittest.TestCase):
+    def test_simple(self):
+        data = zstd.ZstdCompressor(level=1).compress(b'foobar')
+
+        dctx = zstd.ZstdDecompressor()
+        dobj = dctx.decompressobj()
+        self.assertEqual(dobj.decompress(data), b'foobar')
+
+    def test_reuse(self):
+        data = zstd.ZstdCompressor(level=1).compress(b'foobar')
+
+        dctx = zstd.ZstdDecompressor()
+        dobj = dctx.decompressobj()
+        dobj.decompress(data)
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'cannot use a decompressobj'):
+            dobj.decompress(data)
+
+
+def decompress_via_writer(data):
+    buffer = io.BytesIO()
+    dctx = zstd.ZstdDecompressor()
+    with dctx.write_to(buffer) as decompressor:
+        decompressor.write(data)
+    return buffer.getvalue()
+
+
+class TestDecompressor_write_to(unittest.TestCase):
+    def test_empty_roundtrip(self):
+        cctx = zstd.ZstdCompressor()
+        empty = cctx.compress(b'')
+        self.assertEqual(decompress_via_writer(empty), b'')
+
+    def test_large_roundtrip(self):
+        chunks = []
+        for i in range(255):
+            chunks.append(struct.Struct('>B').pack(i) * 16384)
+        orig = b''.join(chunks)
+        cctx = zstd.ZstdCompressor()
+        compressed = cctx.compress(orig)
+
+        self.assertEqual(decompress_via_writer(compressed), orig)
+
+    def test_multiple_calls(self):
+        chunks = []
+        for i in range(255):
+            for j in range(255):
+                chunks.append(struct.Struct('>B').pack(j) * i)
+
+        orig = b''.join(chunks)
+        cctx = zstd.ZstdCompressor()
+        compressed = cctx.compress(orig)
+
+        buffer = io.BytesIO()
+        dctx = zstd.ZstdDecompressor()
+        with dctx.write_to(buffer) as decompressor:
+            pos = 0
+            while pos < len(compressed):
+                pos2 = pos + 8192
+                decompressor.write(compressed[pos:pos2])
+                pos += 8192
+        self.assertEqual(buffer.getvalue(), orig)
+
+    def test_dictionary(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_dictionary(8192, samples)
+
+        orig = b'foobar' * 16384
+        buffer = io.BytesIO()
+        cctx = zstd.ZstdCompressor(dict_data=d)
+        with cctx.write_to(buffer) as compressor:
+            compressor.write(orig)
+
+        compressed = buffer.getvalue()
+        buffer = io.BytesIO()
+
+        dctx = zstd.ZstdDecompressor(dict_data=d)
+        with dctx.write_to(buffer) as decompressor:
+            decompressor.write(compressed)
+
+        self.assertEqual(buffer.getvalue(), orig)
+
+    def test_memory_size(self):
+        dctx = zstd.ZstdDecompressor()
+        buffer = io.BytesIO()
+        with dctx.write_to(buffer) as decompressor:
+            size = decompressor.memory_size()
+
+        self.assertGreater(size, 100000)
+
+    def test_write_size(self):
+        source = zstd.ZstdCompressor().compress(b'foobarfoobar')
+        dest = OpCountingBytesIO()
+        dctx = zstd.ZstdDecompressor()
+        with dctx.write_to(dest, write_size=1) as decompressor:
+            s = struct.Struct('>B')
+            for c in source:
+                if not isinstance(c, str):
+                    c = s.pack(c)
+                decompressor.write(c)
+
+
+        self.assertEqual(dest.getvalue(), b'foobarfoobar')
+        self.assertEqual(dest._write_count, len(dest.getvalue()))
+
+
+class TestDecompressor_read_from(unittest.TestCase):
+    def test_type_validation(self):
+        dctx = zstd.ZstdDecompressor()
+
+        # Object with read() works.
+        dctx.read_from(io.BytesIO())
+
+        # Buffer protocol works.
+        dctx.read_from(b'foobar')
+
+        with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'):
+            dctx.read_from(True)
+
+    def test_empty_input(self):
+        dctx = zstd.ZstdDecompressor()
+
+        source = io.BytesIO()
+        it = dctx.read_from(source)
+        # TODO this is arguably wrong. Should get an error about missing frame foo.
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        it = dctx.read_from(b'')
+        with self.assertRaises(StopIteration):
+            next(it)
+
+    def test_invalid_input(self):
+        dctx = zstd.ZstdDecompressor()
+
+        source = io.BytesIO(b'foobar')
+        it = dctx.read_from(source)
+        with self.assertRaisesRegexp(zstd.ZstdError, 'Unknown frame descriptor'):
+            next(it)
+
+        it = dctx.read_from(b'foobar')
+        with self.assertRaisesRegexp(zstd.ZstdError, 'Unknown frame descriptor'):
+            next(it)
+
+    def test_empty_roundtrip(self):
+        cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
+        empty = cctx.compress(b'')
+
+        source = io.BytesIO(empty)
+        source.seek(0)
+
+        dctx = zstd.ZstdDecompressor()
+        it = dctx.read_from(source)
+
+        # No chunks should be emitted since there is no data.
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        # Again for good measure.
+        with self.assertRaises(StopIteration):
+            next(it)
+
+    def test_skip_bytes_too_large(self):
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaisesRegexp(ValueError, 'skip_bytes must be smaller than read_size'):
+            dctx.read_from(b'', skip_bytes=1, read_size=1)
+
+        with self.assertRaisesRegexp(ValueError, 'skip_bytes larger than first input chunk'):
+            b''.join(dctx.read_from(b'foobar', skip_bytes=10))
+
+    def test_skip_bytes(self):
+        cctx = zstd.ZstdCompressor(write_content_size=False)
+        compressed = cctx.compress(b'foobar')
+
+        dctx = zstd.ZstdDecompressor()
+        output = b''.join(dctx.read_from(b'hdr' + compressed, skip_bytes=3))
+        self.assertEqual(output, b'foobar')
+
+    def test_large_output(self):
+        source = io.BytesIO()
+        source.write(b'f' * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        source.write(b'o')
+        source.seek(0)
+
+        cctx = zstd.ZstdCompressor(level=1)
+        compressed = io.BytesIO(cctx.compress(source.getvalue()))
+        compressed.seek(0)
+
+        dctx = zstd.ZstdDecompressor()
+        it = dctx.read_from(compressed)
+
+        chunks = []
+        chunks.append(next(it))
+        chunks.append(next(it))
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        decompressed = b''.join(chunks)
+        self.assertEqual(decompressed, source.getvalue())
+
+        # And again with buffer protocol.
+        it = dctx.read_from(compressed.getvalue())
+        chunks = []
+        chunks.append(next(it))
+        chunks.append(next(it))
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        decompressed = b''.join(chunks)
+        self.assertEqual(decompressed, source.getvalue())
+
+    def test_large_input(self):
+        bytes = list(struct.Struct('>B').pack(i) for i in range(256))
+        compressed = io.BytesIO()
+        input_size = 0
+        cctx = zstd.ZstdCompressor(level=1)
+        with cctx.write_to(compressed) as compressor:
+            while True:
+                compressor.write(random.choice(bytes))
+                input_size += 1
+
+                have_compressed = len(compressed.getvalue()) > zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+                have_raw = input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
+                if have_compressed and have_raw:
+                    break
+
+        compressed.seek(0)
+        self.assertGreater(len(compressed.getvalue()),
+                           zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE)
+
+        dctx = zstd.ZstdDecompressor()
+        it = dctx.read_from(compressed)
+
+        chunks = []
+        chunks.append(next(it))
+        chunks.append(next(it))
+        chunks.append(next(it))
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        decompressed = b''.join(chunks)
+        self.assertEqual(len(decompressed), input_size)
+
+        # And again with buffer protocol.
+        it = dctx.read_from(compressed.getvalue())
+
+        chunks = []
+        chunks.append(next(it))
+        chunks.append(next(it))
+        chunks.append(next(it))
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        decompressed = b''.join(chunks)
+        self.assertEqual(len(decompressed), input_size)
+
+    def test_interesting(self):
+        # Found this edge case via fuzzing.
+        cctx = zstd.ZstdCompressor(level=1)
+
+        source = io.BytesIO()
+
+        compressed = io.BytesIO()
+        with cctx.write_to(compressed) as compressor:
+            for i in range(256):
+                chunk = b'\0' * 1024
+                compressor.write(chunk)
+                source.write(chunk)
+
+        dctx = zstd.ZstdDecompressor()
+
+        simple = dctx.decompress(compressed.getvalue(),
+                                 max_output_size=len(source.getvalue()))
+        self.assertEqual(simple, source.getvalue())
+
+        compressed.seek(0)
+        streamed = b''.join(dctx.read_from(compressed))
+        self.assertEqual(streamed, source.getvalue())
+
+    def test_read_write_size(self):
+        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b'foobarfoobar'))
+        dctx = zstd.ZstdDecompressor()
+        for chunk in dctx.read_from(source, read_size=1, write_size=1):
+            self.assertEqual(len(chunk), 1)
+
+        self.assertEqual(source._read_count, len(source.getvalue()))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_estimate_sizes.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,17 @@
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+import zstd
+
+
+class TestSizes(unittest.TestCase):
+    def test_decompression_size(self):
+        size = zstd.estimate_decompression_context_size()
+        self.assertGreater(size, 100000)
+
+    def test_compression_size(self):
+        params = zstd.get_compression_parameters(3)
+        size = zstd.estimate_compression_context_size(params)
+        self.assertGreater(size, 100000)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_module_attributes.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,48 @@
+from __future__ import unicode_literals
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+import zstd
+
+class TestModuleAttributes(unittest.TestCase):
+    def test_version(self):
+        self.assertEqual(zstd.ZSTD_VERSION, (1, 1, 2))
+
+    def test_constants(self):
+        self.assertEqual(zstd.MAX_COMPRESSION_LEVEL, 22)
+        self.assertEqual(zstd.FRAME_HEADER, b'\x28\xb5\x2f\xfd')
+
+    def test_hasattr(self):
+        attrs = (
+            'COMPRESSION_RECOMMENDED_INPUT_SIZE',
+            'COMPRESSION_RECOMMENDED_OUTPUT_SIZE',
+            'DECOMPRESSION_RECOMMENDED_INPUT_SIZE',
+            'DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE',
+            'MAGIC_NUMBER',
+            'WINDOWLOG_MIN',
+            'WINDOWLOG_MAX',
+            'CHAINLOG_MIN',
+            'CHAINLOG_MAX',
+            'HASHLOG_MIN',
+            'HASHLOG_MAX',
+            'HASHLOG3_MAX',
+            'SEARCHLOG_MIN',
+            'SEARCHLOG_MAX',
+            'SEARCHLENGTH_MIN',
+            'SEARCHLENGTH_MAX',
+            'TARGETLENGTH_MIN',
+            'TARGETLENGTH_MAX',
+            'STRATEGY_FAST',
+            'STRATEGY_DFAST',
+            'STRATEGY_GREEDY',
+            'STRATEGY_LAZY',
+            'STRATEGY_LAZY2',
+            'STRATEGY_BTLAZY2',
+            'STRATEGY_BTOPT',
+        )
+
+        for a in attrs:
+            self.assertTrue(hasattr(zstd, a))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_roundtrip.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,64 @@
+import io
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+try:
+    import hypothesis
+    import hypothesis.strategies as strategies
+except ImportError:
+    raise unittest.SkipTest('hypothesis not available')
+
+import zstd
+
+
+compression_levels = strategies.integers(min_value=1, max_value=22)
+
+
+class TestRoundTrip(unittest.TestCase):
+    @hypothesis.given(strategies.binary(), compression_levels)
+    def test_compress_write_to(self, data, level):
+        """Random data from compress() roundtrips via write_to."""
+        cctx = zstd.ZstdCompressor(level=level)
+        compressed = cctx.compress(data)
+
+        buffer = io.BytesIO()
+        dctx = zstd.ZstdDecompressor()
+        with dctx.write_to(buffer) as decompressor:
+            decompressor.write(compressed)
+
+        self.assertEqual(buffer.getvalue(), data)
+
+    @hypothesis.given(strategies.binary(), compression_levels)
+    def test_compressor_write_to_decompressor_write_to(self, data, level):
+        """Random data from compressor write_to roundtrips via write_to."""
+        compress_buffer = io.BytesIO()
+        decompressed_buffer = io.BytesIO()
+
+        cctx = zstd.ZstdCompressor(level=level)
+        with cctx.write_to(compress_buffer) as compressor:
+            compressor.write(data)
+
+        dctx = zstd.ZstdDecompressor()
+        with dctx.write_to(decompressed_buffer) as decompressor:
+            decompressor.write(compress_buffer.getvalue())
+
+        self.assertEqual(decompressed_buffer.getvalue(), data)
+
+    @hypothesis.given(strategies.binary(average_size=1048576))
+    @hypothesis.settings(perform_health_check=False)
+    def test_compressor_write_to_decompressor_write_to_larger(self, data):
+        compress_buffer = io.BytesIO()
+        decompressed_buffer = io.BytesIO()
+
+        cctx = zstd.ZstdCompressor(level=5)
+        with cctx.write_to(compress_buffer) as compressor:
+            compressor.write(data)
+
+        dctx = zstd.ZstdDecompressor()
+        with dctx.write_to(decompressed_buffer) as decompressor:
+            decompressor.write(compress_buffer.getvalue())
+
+        self.assertEqual(decompressed_buffer.getvalue(), data)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,46 @@
+import sys
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+import zstd
+
+
+if sys.version_info[0] >= 3:
+    int_type = int
+else:
+    int_type = long
+
+
+class TestTrainDictionary(unittest.TestCase):
+    def test_no_args(self):
+        with self.assertRaises(TypeError):
+            zstd.train_dictionary()
+
+    def test_bad_args(self):
+        with self.assertRaises(TypeError):
+            zstd.train_dictionary(8192, u'foo')
+
+        with self.assertRaises(ValueError):
+            zstd.train_dictionary(8192, [u'foo'])
+
+    def test_basic(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+            samples.append(b'foobar' * 64)
+            samples.append(b'baz' * 64)
+            samples.append(b'foobaz' * 64)
+            samples.append(b'bazfoo' * 64)
+
+        d = zstd.train_dictionary(8192, samples)
+        self.assertLessEqual(len(d), 8192)
+
+        dict_id = d.dict_id()
+        self.assertIsInstance(dict_id, int_type)
+
+        data = d.as_bytes()
+        self.assertEqual(data[0:4], b'\x37\xa4\x30\xec')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,136 @@
+/**
+ * Copyright (c) 2016-present, Gregory Szorc
+ * All rights reserved.
+ *
+ * This software may be modified and distributed under the terms
+ * of the BSD license. See the LICENSE file for details.
+ */
+
+/* A Python C extension for Zstandard. */
+
+#include "python-zstandard.h"
+
+PyObject *ZstdError;
+
+PyDoc_STRVAR(estimate_compression_context_size__doc__,
+"estimate_compression_context_size(compression_parameters)\n"
+"\n"
+"Give the amount of memory allocated for a compression context given a\n"
+"CompressionParameters instance");
+
+PyDoc_STRVAR(estimate_decompression_context_size__doc__,
+"estimate_decompression_context_size()\n"
+"\n"
+"Estimate the amount of memory allocated to a decompression context.\n"
+);
+
+static PyObject* estimate_decompression_context_size(PyObject* self) {
+	return PyLong_FromSize_t(ZSTD_estimateDCtxSize());
+}
+
+PyDoc_STRVAR(get_compression_parameters__doc__,
+"get_compression_parameters(compression_level[, source_size[, dict_size]])\n"
+"\n"
+"Obtains a ``CompressionParameters`` instance from a compression level and\n"
+"optional input size and dictionary size");
+
+PyDoc_STRVAR(train_dictionary__doc__,
+"train_dictionary(dict_size, samples)\n"
+"\n"
+"Train a dictionary from sample data.\n"
+"\n"
+"A compression dictionary of size ``dict_size`` will be created from the\n"
+"iterable of samples provided by ``samples``.\n"
+"\n"
+"The raw dictionary content will be returned\n");
+
+static char zstd_doc[] = "Interface to zstandard";
+
+static PyMethodDef zstd_methods[] = {
+	{ "estimate_compression_context_size", (PyCFunction)estimate_compression_context_size,
+	METH_VARARGS, estimate_compression_context_size__doc__ },
+	{ "estimate_decompression_context_size", (PyCFunction)estimate_decompression_context_size,
+	METH_NOARGS, estimate_decompression_context_size__doc__ },
+	{ "get_compression_parameters", (PyCFunction)get_compression_parameters,
+	METH_VARARGS, get_compression_parameters__doc__ },
+	{ "train_dictionary", (PyCFunction)train_dictionary,
+	METH_VARARGS | METH_KEYWORDS, train_dictionary__doc__ },
+	{ NULL, NULL }
+};
+
+void compressobj_module_init(PyObject* mod);
+void compressor_module_init(PyObject* mod);
+void compressionparams_module_init(PyObject* mod);
+void constants_module_init(PyObject* mod);
+void dictparams_module_init(PyObject* mod);
+void compressiondict_module_init(PyObject* mod);
+void compressionwriter_module_init(PyObject* mod);
+void compressoriterator_module_init(PyObject* mod);
+void decompressor_module_init(PyObject* mod);
+void decompressobj_module_init(PyObject* mod);
+void decompressionwriter_module_init(PyObject* mod);
+void decompressoriterator_module_init(PyObject* mod);
+
+void zstd_module_init(PyObject* m) {
+	/* python-zstandard relies on unstable zstd C API features. This means
+	   that changes in zstd may break expectations in python-zstandard.
+
+	   python-zstandard is distributed with a copy of the zstd sources.
+	   python-zstandard is only guaranteed to work with the bundled version
+	   of zstd.
+
+	   However, downstream redistributors or packagers may unbundle zstd
+	   from python-zstandard. This can result in a mismatch between zstd
+	   versions and API semantics. This essentially "voids the warranty"
+	   of python-zstandard and may cause undefined behavior.
+
+	   We detect this mismatch here and refuse to load the module if this
+	   scenario is detected.
+	*/
+	if (ZSTD_VERSION_NUMBER != 10102 || ZSTD_versionNumber() != 10102) {
+		PyErr_SetString(PyExc_ImportError, "zstd C API mismatch; Python bindings not compiled against expected zstd version");
+		return;
+	}
+
+	compressionparams_module_init(m);
+	dictparams_module_init(m);
+	compressiondict_module_init(m);
+	compressobj_module_init(m);
+	compressor_module_init(m);
+	compressionwriter_module_init(m);
+	compressoriterator_module_init(m);
+	constants_module_init(m);
+	decompressor_module_init(m);
+	decompressobj_module_init(m);
+	decompressionwriter_module_init(m);
+	decompressoriterator_module_init(m);
+}
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef zstd_module = {
+	PyModuleDef_HEAD_INIT,
+	"zstd",
+	zstd_doc,
+	-1,
+	zstd_methods
+};
+
+PyMODINIT_FUNC PyInit_zstd(void) {
+	PyObject *m = PyModule_Create(&zstd_module);
+	if (m) {
+		zstd_module_init(m);
+		if (PyErr_Occurred()) {
+			Py_DECREF(m);
+			m = NULL;
+		}
+	}
+	return m;
+}
+#else
+PyMODINIT_FUNC initzstd(void) {
+	PyObject *m = Py_InitModule3("zstd", zstd_methods, zstd_doc);
+	if (m) {
+		zstd_module_init(m);
+	}
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/LICENSE	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,30 @@
+BSD License
+
+For Zstandard software
+
+Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+   endorse or promote products derived from this software without specific
+   prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/PATENTS	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,33 @@
+Additional Grant of Patent Rights Version 2
+
+"Software" means the Zstandard software distributed by Facebook, Inc.
+
+Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software
+("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable
+(subject to the termination provision below) license under any Necessary
+Claims, to make, have made, use, sell, offer to sell, import, and otherwise
+transfer the Software. For avoidance of doubt, no license is granted under
+Facebook’s rights in any patent claims that are infringed by (i) modifications
+to the Software made by you or any third party or (ii) the Software in
+combination with any software or other technology.
+
+The license granted hereunder will terminate, automatically and without notice,
+if you (or any of your subsidiaries, corporate affiliates or agents) initiate
+directly or indirectly, or take a direct financial interest in, any Patent
+Assertion: (i) against Facebook or any of its subsidiaries or corporate
+affiliates, (ii) against any party if such Patent Assertion arises in whole or
+in part from any software, technology, product or service of Facebook or any of
+its subsidiaries or corporate affiliates, or (iii) against any party relating
+to the Software. Notwithstanding the foregoing, if Facebook or any of its
+subsidiaries or corporate affiliates files a lawsuit alleging patent
+infringement against you in the first instance, and you respond by filing a
+patent infringement counterclaim in that lawsuit against that party that is
+unrelated to the Software, the license granted hereunder will not terminate
+under section (i) of this paragraph due to such counterclaim.
+
+A "Necessary Claim" is a claim of a patent owned by Facebook that is
+necessarily infringed by the Software standing alone.
+
+A "Patent Assertion" is any lawsuit or other action alleging direct, indirect,
+or contributory infringement or inducement to infringe any patent, including a
+cross-claim or counterclaim.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/bitstream.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,414 @@
+/* ******************************************************************
+   bitstream
+   Part of FSE library
+   header file (to include)
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+*  This API consists of small unitary functions, which must be inlined for best performance.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+/*-****************************************
+*  Dependencies
+******************************************/
+#include "mem.h"            /* unaligned access routines */
+#include "error_private.h"  /* error codes and messages */
+
+
+/*=========================================
+*  Target specific
+=========================================*/
+#if defined(__BMI__) && defined(__GNUC__)
+#  include <immintrin.h>   /* support for bextr (experimental) */
+#endif
+
+
+/*-******************************************
+*  bitStream encoding API (write forward)
+********************************************/
+/* bitStream can mix input from multiple sources.
+*  A critical property of these streams is that they encode and decode in **reverse** direction.
+*  So the first bit sequence you add will be the last to be read, like a LIFO stack.
+*/
+typedef struct
+{
+    size_t bitContainer;
+    int    bitPos;
+    char*  startPtr;
+    char*  ptr;
+    char*  endPtr;
+} BIT_CStream_t;
+
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
+MEM_STATIC void   BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void   BIT_flushBits(BIT_CStream_t* bitC);
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
+
+/* Start with initCStream, providing the size of buffer to write into.
+*  bitStream will never write outside of this buffer.
+*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
+*
+*  bits are first added to a local register.
+*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+*  Writing data into memory is an explicit operation, performed by the flushBits function.
+*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
+*  After a flushBits, a maximum of 7 bits might still be stored into local register.
+*
+*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
+*
+*  Last operation is to close the bitStream.
+*  The function returns the final size of CStream in bytes.
+*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
+*/
+
+
+/*-********************************************
+*  bitStream decoding API (read backward)
+**********************************************/
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+               BIT_DStream_endOfBuffer = 1,
+               BIT_DStream_completed = 2,
+               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/* Start by invoking BIT_initDStream().
+*  A chunk of the bitStream is then stored into a local register.
+*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+*  You can then retrieve bitFields stored into the local register, **in reverse order**.
+*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+*  Otherwise, it can be less than that, so proceed accordingly.
+*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
+*/
+
+
+/*-****************************************
+*  unsafe API
+******************************************/
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
+
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
+/* unsafe version; does not check buffer overflow */
+
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+*  Internal functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (register U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r=0;
+    _BitScanReverse ( &r, val );
+    return (unsigned) r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
+    return 31 - __builtin_clz (val);
+#   else   /* Software version */
+    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+#   endif
+}
+
+/*=====    Local Constants   =====*/
+static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,  0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF };   /* up to 26 bits */
+
+
+/*-**************************************************************
+*  bitStream encoding
+****************************************************************/
+/*! BIT_initCStream() :
+ *  `dstCapacity` must be > sizeof(void*)
+ *  @return : 0 if success,
+              otherwise an error code (can be tested using ERR_isError() ) */
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, size_t dstCapacity)
+{
+    bitC->bitContainer = 0;
+    bitC->bitPos = 0;
+    bitC->startPtr = (char*)startPtr;
+    bitC->ptr = bitC->startPtr;
+    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
+    if (dstCapacity <= sizeof(bitC->ptr)) return ERROR(dstSize_tooSmall);
+    return 0;
+}
+
+/*! BIT_addBits() :
+    can add up to 26 bits into `bitC`.
+    Does not check for register overflow ! */
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits)
+{
+    bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+    bitC->bitPos += nbBits;
+}
+
+/*! BIT_addBitsFast() :
+ *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits)
+{
+    bitC->bitContainer |= value << bitC->bitPos;
+    bitC->bitPos += nbBits;
+}
+
+/*! BIT_flushBitsFast() :
+ *  unsafe version; does not check buffer overflow */
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
+{
+    size_t const nbBytes = bitC->bitPos >> 3;
+    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+    bitC->ptr += nbBytes;
+    bitC->bitPos &= 7;
+    bitC->bitContainer >>= nbBytes*8;   /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
+}
+
+/*! BIT_flushBits() :
+ *  safe version; check for buffer overflow, and prevents it.
+ *  note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
+{
+    size_t const nbBytes = bitC->bitPos >> 3;
+    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+    bitC->ptr += nbBytes;
+    if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+    bitC->bitPos &= 7;
+    bitC->bitContainer >>= nbBytes*8;   /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
+}
+
+/*! BIT_closeCStream() :
+ *  @return : size of CStream, in bytes,
+              or 0 if it could not fit into dstBuffer */
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
+{
+    BIT_addBitsFast(bitC, 1, 1);   /* endMark */
+    BIT_flushBits(bitC);
+
+    if (bitC->ptr >= bitC->endPtr) return 0; /* doesn't fit within authorized budget : cancel */
+
+    return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+}
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BIT_initDStream() :
+*   Initialize a BIT_DStream_t.
+*   `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+*   `srcSize` must be the *exact* size of the bitStream, in bytes.
+*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */
+          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+    } else {
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;
+            default:;
+        }
+        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+
+MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+{
+    return bitContainer >> start;
+}
+
+MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+{
+#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008  /* experimental */
+#  if defined(__x86_64__)
+    if (sizeof(bitContainer)==8)
+        return _bextr_u64(bitContainer, start, nbBits);
+    else
+#  endif
+        return _bextr_u32(bitContainer, start, nbBits);
+#else
+    return (bitContainer >> start) & BIT_mask[nbBits];
+#endif
+}
+
+MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+{
+    return bitContainer & BIT_mask[nbBits];
+}
+
+/*! BIT_lookBits() :
+ *  Provides next n bits from local register.
+ *  local register is not modified.
+ *  On 32-bits, maxNbBits==24.
+ *  On 64-bits, maxNbBits==56.
+ *  @return : value extracted
+ */
+ MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+{
+#if defined(__BMI__) && defined(__GNUC__)   /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
+    return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
+#else
+    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+#endif
+}
+
+/*! BIT_lookBitsFast() :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+{
+    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+/*! BIT_readBits() :
+ *  Read (consume) next n bits from local register and update.
+ *  Pay attention to not read more than nbBits contained into local register.
+ *  @return : extracted value.
+ */
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t const value = BIT_lookBits(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*! BIT_readBitsFast() :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t const value = BIT_lookBitsFast(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*! BIT_reloadDStream() :
+*   Refill `bitD` from buffer previously set in BIT_initDStream() .
+*   This function is safe, it guarantees it will not read beyond src buffer.
+*   @return : status of `BIT_DStream_t` internal register.
+              if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+	if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should not happen => corruption detected */
+		return BIT_DStream_overflow;
+
+    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        return BIT_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start) {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+        return BIT_DStream_completed;
+    }
+    {   U32 nbBytes = bitD->bitsConsumed >> 3;
+        BIT_DStream_status result = BIT_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start) {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = BIT_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
+        return result;
+    }
+}
+
+/*! BIT_endOfDStream() :
+*   @return Tells if DStream has exactly reached its end (all bits consumed).
+*/
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/entropy_common.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,227 @@
+/*
+   Common functions of New Generation Entropy library
+   Copyright (C) 2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+*************************************************************************** */
+
+/* *************************************
+*  Dependencies
+***************************************/
+#include "mem.h"
+#include "error_private.h"       /* ERR_*, ERROR */
+#define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */
+#include "huf.h"
+
+
+/*-****************************************
+*  FSE Error Management
+******************************************/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+
+const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* **************************************************************
+*  HUF Error Management
+****************************************************************/
+unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+
+const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+static short FSE_abs(short a) { return (short)(a<0 ? -a : a); }
+
+size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return ERROR(srcSize_wrong);
+    bitStream = MEM_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) & (charnum<=*maxSVPtr)) {
+        if (previous0) {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF) {
+                n0 += 24;
+                if (ip < iend-5) {
+                    ip += 2;
+                    bitStream = MEM_readLE32(ip) >> bitCount;
+                } else {
+                    bitStream >>= 16;
+                    bitCount   += 16;
+            }   }
+            while ((bitStream & 3) == 3) {
+                n0 += 3;
+                bitStream >>= 2;
+                bitCount += 2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = MEM_readLE32(ip) >> bitCount;
+            } else {
+                bitStream >>= 2;
+        }   }
+        {   short const max = (short)((2*threshold-1)-remaining);
+            short count;
+
+            if ((bitStream & (threshold-1)) < (U32)max) {
+                count = (short)(bitStream & (threshold-1));
+                bitCount   += nbBits-1;
+            } else {
+                count = (short)(bitStream & (2*threshold-1));
+                if (count >= threshold) count -= max;
+                bitCount   += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= FSE_abs(count);
+            normalizedCounter[charnum++] = count;
+            previous0 = !count;
+            while (remaining < threshold) {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+            } else {
+                bitCount -= (int)(8 * (iend - 4 - ip));
+                ip = iend - 4;
+            }
+            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+    }   }   /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
+    if (remaining != 1) return ERROR(corruption_detected);
+    if (bitCount > 32) return ERROR(corruption_detected);
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    return ip-istart;
+}
+
+
+/*! HUF_readStats() :
+    Read compact Huffman tree, saved by HUF_writeCTable().
+    `huffWeight` is destination buffer.
+    `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
+    @return : size read from `src` , or an error Code .
+    Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
+*/
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                     U32* nbSymbolsPtr, U32* tableLogPtr,
+                     const void* src, size_t srcSize)
+{
+    U32 weightTotal;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+
+    if (!srcSize) return ERROR(srcSize_wrong);
+    iSize = ip[0];
+    /* memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */
+
+    if (iSize >= 128) {  /* special header */
+        oSize = iSize - 127;
+        iSize = ((oSize+1)/2);
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        if (oSize >= hwSize) return ERROR(corruption_detected);
+        ip += 1;
+        {   U32 n;
+            for (n=0; n<oSize; n+=2) {
+                huffWeight[n]   = ip[n/2] >> 4;
+                huffWeight[n+1] = ip[n/2] & 15;
+    }   }   }
+    else  {   /* header compressed with FSE (normal case) */
+        FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)];  /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6);   /* max (hwSize-1) values decoded, as last one is implied */
+        if (FSE_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+    weightTotal = 0;
+    {   U32 n; for (n=0; n<oSize; n++) {
+            if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+            rankStats[huffWeight[n]]++;
+            weightTotal += (1 << huffWeight[n]) >> 1;
+    }   }
+    if (weightTotal == 0) return ERROR(corruption_detected);
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+        if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+        *tableLogPtr = tableLog;
+        /* determine last weight */
+        {   U32 const total = 1 << tableLog;
+            U32 const rest = total - weightTotal;
+            U32 const verif = 1 << BIT_highbit32(rest);
+            U32 const lastWeight = BIT_highbit32(rest) + 1;
+            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
+            huffWeight[oSize] = (BYTE)lastWeight;
+            rankStats[lastWeight]++;
+    }   }
+
+    /* check tree construction validity */
+    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* results */
+    *nbSymbolsPtr = (U32)(oSize+1);
+    return iSize+1;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/error_private.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,43 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+/* The purpose of this file is to have a single list of error strings embedded in binary */
+
+#include "error_private.h"
+
+const char* ERR_getErrorString(ERR_enum code)
+{
+    static const char* const notErrorCode = "Unspecified error code";
+    switch( code )
+    {
+    case PREFIX(no_error): return "No error detected";
+    case PREFIX(GENERIC):  return "Error (generic)";
+    case PREFIX(prefix_unknown): return "Unknown frame descriptor";
+    case PREFIX(version_unsupported): return "Version not supported";
+    case PREFIX(parameter_unknown): return "Unknown parameter type";
+    case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
+    case PREFIX(frameParameter_unsupportedBy32bits): return "Frame parameter unsupported in 32-bits mode";
+    case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
+    case PREFIX(compressionParameter_unsupported): return "Compression parameter is out of bound";
+    case PREFIX(init_missing): return "Context should be init first";
+    case PREFIX(memory_allocation): return "Allocation error : not enough memory";
+    case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
+    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+    case PREFIX(srcSize_wrong): return "Src size incorrect";
+    case PREFIX(corruption_detected): return "Corrupted block detected";
+    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+    case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
+    case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
+    case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+    case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
+    case PREFIX(dictionary_wrong): return "Dictionary mismatch";
+    case PREFIX(maxCode):
+    default: return notErrorCode;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/error_private.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,76 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+/* Note : this module is expected to remain private, do not expose it */
+
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+*  Dependencies
+******************************************/
+#include <stddef.h>        /* size_t */
+#include "zstd_errors.h"  /* enum list */
+
+
+/* ****************************************
+*  Compiler-specific
+******************************************/
+#if defined(__GNUC__)
+#  define ERR_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define ERR_STATIC static inline
+#elif defined(_MSC_VER)
+#  define ERR_STATIC static __inline
+#else
+#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-****************************************
+*  Customization (error_public.h)
+******************************************/
+typedef ZSTD_ErrorCode ERR_enum;
+#define PREFIX(name) ZSTD_error_##name
+
+
+/*-****************************************
+*  Error codes handling
+******************************************/
+#ifdef ERROR
+#  undef ERROR   /* reported already defined on VS 2015 (Rich Geldreich) */
+#endif
+#define ERROR(name) ((size_t)-PREFIX(name))
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
+
+
+/*-****************************************
+*  Error Strings
+******************************************/
+
+const char* ERR_getErrorString(ERR_enum code);   /* error_private.c */
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+    return ERR_getErrorString(ERR_getErrorCode(code));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ERROR_H_MODULE */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/fse.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,668 @@
+/* ******************************************************************
+   FSE : Finite State Entropy codec
+   Public Prototypes declaration
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef FSE_H
+#define FSE_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*-*****************************************
+*  Dependencies
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+
+
+/*-****************************************
+*  FSE simple functions
+******************************************/
+/*! FSE_compress() :
+    Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
+    'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
+    @return : size of compressed data (<= dstCapacity).
+    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
+                     if FSE_isError(return), compression failed (more details using FSE_getErrorName())
+*/
+size_t FSE_compress(void* dst, size_t dstCapacity,
+              const void* src, size_t srcSize);
+
+/*! FSE_decompress():
+    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'dstCapacity'.
+    @return : size of regenerated data (<= maxDstSize),
+              or an error code, which can be tested using FSE_isError() .
+
+    ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
+    Why ? : making this distinction requires a header.
+    Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+size_t FSE_decompress(void* dst,  size_t dstCapacity,
+                const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+*  Tool functions
+******************************************/
+size_t FSE_compressBound(size_t size);       /* maximum compressed size */
+
+/* Error Management */
+unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
+const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+*  FSE advanced functions
+******************************************/
+/*! FSE_compress2() :
+    Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
+    Both parameters can be defined as '0' to mean : use default value
+    @return : size of compressed data
+    Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
+                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
+                     if FSE_isError(return), it's an error code.
+*/
+size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*-*****************************************
+*  FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[]
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+/* *** COMPRESSION *** */
+
+/*! FSE_count():
+    Provides the precise count of each byte within a table 'count'.
+    'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+    *maxSymbolValuePtr will be updated if detected smaller than initial value.
+    @return : the count of the most frequent symbol (which is not identified).
+              if return == srcSize, there is only one symbol.
+              Can also return an error code, which can be tested with FSE_isError(). */
+size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+/*! FSE_optimalTableLog():
+    dynamically downsize 'tableLog' when conditions are met.
+    It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
+    @return : recommended tableLog (necessarily <= 'maxTableLog') */
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_normalizeCount():
+    normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
+    'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+    @return : tableLog,
+              or an errorCode, which can be tested using FSE_isError() */
+size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_NCountWriteBound():
+    Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
+    Typically useful for allocation purpose. */
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_writeNCount():
+    Compactly save 'normalizedCounter' into 'buffer'.
+    @return : size of the compressed table,
+              or an errorCode, which can be tested using FSE_isError(). */
+size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*! Constructor and Destructor of FSE_CTable.
+    Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */
+FSE_CTable* FSE_createCTable (unsigned tableLog, unsigned maxSymbolValue);
+void        FSE_freeCTable (FSE_CTable* ct);
+
+/*! FSE_buildCTable():
+    Builds `ct`, which must be already allocated, using FSE_createCTable().
+    @return : 0, or an errorCode, which can be tested using FSE_isError() */
+size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_compress_usingCTable():
+    Compress `src` using `ct` into `dst` which must be already allocated.
+    @return : size of compressed data (<= `dstCapacity`),
+              or 0 if compressed data could not fit into `dst`,
+              or an errorCode, which can be tested using FSE_isError() */
+size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+
+/*!
+Tutorial :
+----------
+The first step is to count all symbols. FSE_count() does this job very fast.
+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
+FSE_count() will return the number of occurrence of the most frequent symbol.
+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+The next step is to normalize the frequencies.
+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
+It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
+You can use 'tableLog'==0 to mean "use default tableLog value".
+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
+
+The result of FSE_normalizeCount() will be saved into a table,
+called 'normalizedCounter', which is a table of signed short.
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
+The return value is tableLog if everything proceeded as expected.
+It is 0 if there is a single symbol within distribution.
+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
+'buffer' must be already allocated.
+For guaranteed success, buffer size must be at least FSE_headerBound().
+The result of the function is the number of bytes written into 'buffer'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
+
+'normalizedCounter' can then be used to create the compression table 'CTable'.
+The space required by 'CTable' must be already allocated, using FSE_createCTable().
+You can then use FSE_buildCTable() to fill 'CTable'.
+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
+
+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
+If it returns '0', compressed data could not fit into 'dst'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSE_readNCount():
+    Read compactly saved 'normalizedCounter' from 'rBuffer'.
+    @return : size read from 'rBuffer',
+              or an errorCode, which can be tested using FSE_isError().
+              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSE_DTable.
+    Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+FSE_DTable* FSE_createDTable(unsigned tableLog);
+void        FSE_freeDTable(FSE_DTable* dt);
+
+/*! FSE_buildDTable():
+    Builds 'dt', which must be already allocated, using FSE_createDTable().
+    return : 0, or an errorCode, which can be tested using FSE_isError() */
+size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_decompress_usingDTable():
+    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+    into `dst` which must be already allocated.
+    @return : size of regenerated data (necessarily <= `dstCapacity`),
+              or an errorCode, which can be tested using FSE_isError() */
+size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+
+#ifdef FSE_STATIC_LINKING_ONLY
+
+/* *** Dependency *** */
+#include "bitstream.h"
+
+
+/* *****************************************
+*  Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+*  FSE advanced API
+*******************************************/
+/* FSE_count_wksp() :
+ * Same as FSE_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned
+ */
+size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* source, size_t sourceSize, unsigned* workSpace);
+
+/** FSE_countFast() :
+ *  same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr
+ */
+size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+/* FSE_countFast_wksp() :
+ * Same as FSE_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` must be a table of minimum `1024` unsigned
+ */
+size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
+
+/*! FSE_count_simple
+ * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
+*/
+size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/**< same as FSE_optimalTableLog(), which used `minus==2` */
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
+ */
+#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + (1<<((maxTableLog>2)?(maxTableLog-2):0)) )
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
+/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
+
+size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
+/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` must be >= `(1<<tableLog)`.
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
+
+size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
+/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
+
+
+/* *****************************************
+*  FSE symbol compression API
+*******************************************/
+/*!
+   This API consists of small unitary functions, which highly benefit from being inlined.
+   Hence their body are included in next section.
+*/
+typedef struct {
+    ptrdiff_t   value;
+    const void* stateTable;
+    const void* symbolTT;
+    unsigned    stateLog;
+} FSE_CState_t;
+
+static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
+
+static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
+
+static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
+
+/**<
+These functions are inner components of FSE_compress_usingCTable().
+They allow the creation of custom streams, mixing multiple tables and bit sources.
+
+A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
+So the first symbol you will encode is the last you will decode, like a LIFO stack.
+
+You will need a few variables to track your CStream. They are :
+
+FSE_CTable    ct;         // Provided by FSE_buildCTable()
+BIT_CStream_t bitStream;  // bitStream tracking structure
+FSE_CState_t  state;      // State tracking structure (can have several)
+
+
+The first thing to do is to init bitStream and state.
+    size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
+    FSE_initCState(&state, ct);
+
+Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
+You can then encode your input data, byte after byte.
+FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
+Remember decoding will be done in reverse direction.
+    FSE_encodeByte(&bitStream, &state, symbol);
+
+At any time, you can also add any bit sequence.
+Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
+    BIT_addBits(&bitStream, bitField, nbBits);
+
+The above methods don't commit data to memory, they just store it into local register, for speed.
+Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+Writing data to memory is a manual operation, performed by the flushBits function.
+    BIT_flushBits(&bitStream);
+
+Your last FSE encoding operation shall be to flush your last state value(s).
+    FSE_flushState(&bitStream, &state);
+
+Finally, you must close the bitStream.
+The function returns the size of CStream in bytes.
+If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
+If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
+    size_t size = BIT_closeCStream(&bitStream);
+*/
+
+
+/* *****************************************
+*  FSE symbol decompression API
+*******************************************/
+typedef struct {
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+/**<
+Let's now decompose FSE_decompress_usingDTable() into its unitary components.
+You will decode FSE-encoded symbols from the bitStream,
+and also any other bitFields you put in, **in reverse order**.
+
+You will need a few variables to track your bitStream. They are :
+
+BIT_DStream_t DStream;    // Stream context
+FSE_DState_t  DState;     // State context. Multiple ones are possible
+FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
+
+The first thing to do is to init the bitStream.
+    errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
+
+You should then retrieve your initial state(s)
+(in reverse flushing order if you have several ones) :
+    errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
+
+You can then decode your data, symbol after symbol.
+For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
+Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
+    unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
+
+You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
+Note : maximum allowed nbBits is 25, for 32-bits compatibility
+    size_t bitField = BIT_readBits(&DStream, nbBits);
+
+All above operations only read from local register (which size depends on size_t).
+Refueling the register from memory is manually performed by the reload method.
+    endSignal = FSE_reloadDStream(&DStream);
+
+BIT_reloadDStream() result tells if there is still some more data to read from DStream.
+BIT_DStream_unfinished : there is still some data left into the DStream.
+BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
+BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
+BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
+
+When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
+to properly detect the exact end of stream.
+After each decoded symbol, check if DStream is fully consumed using this simple test :
+    BIT_reloadDStream(&DStream) >= BIT_DStream_completed
+
+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
+Checking if DStream has reached its end is performed by :
+    BIT_endOfDStream(&DStream);
+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
+    FSE_endOfDState(&DState);
+*/
+
+
+/* *****************************************
+*  FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+*  Implementation of inlined functions
+*******************************************/
+typedef struct {
+    int deltaFindState;
+    U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
+{
+    const void* ptr = ct;
+    const U16* u16ptr = (const U16*) ptr;
+    const U32 tableLog = MEM_read16(ptr);
+    statePtr->value = (ptrdiff_t)1<<tableLog;
+    statePtr->stateTable = u16ptr+2;
+    statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
+    statePtr->stateLog = tableLog;
+}
+
+
+/*! FSE_initCState2() :
+*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
+*   uses the smallest state value possible, saving the cost of this symbol */
+MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
+{
+    FSE_initCState(statePtr, ct);
+    {   const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+        const U16* stateTable = (const U16*)(statePtr->stateTable);
+        U32 nbBitsOut  = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
+        statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+        statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+    }
+}
+
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
+{
+    const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+    const U16* const stateTable = (const U16*)(statePtr->stateTable);
+    U32 nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+    BIT_addBits(bitC, statePtr->value, nbBitsOut);
+    statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+}
+
+MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
+{
+    BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+    BIT_flushBits(bitC);
+}
+
+
+/* ======    Decompression    ====== */
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSE_DTableHeader;   /* sizeof U32 */
+
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSE_decode_t;   /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+    BIT_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
+{
+    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    return DInfo.symbol;
+}
+
+MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    size_t const lowBits = BIT_readBits(bitD, nbBits);
+    DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    BYTE const symbol = DInfo.symbol;
+    size_t const lowBits = BIT_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+/*! FSE_decodeSymbolFast() :
+    unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    BYTE const symbol = DInfo.symbol;
+    size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+    return DStatePtr->state == 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+*  Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#ifndef FSE_MAX_MEMORY_USAGE
+#  define FSE_MAX_MEMORY_USAGE 14
+#endif
+#ifndef FSE_DEFAULT_MEMORY_USAGE
+#  define FSE_DEFAULT_MEMORY_USAGE 13
+#endif
+
+/*!FSE_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#ifndef FSE_MAX_SYMBOL_VALUE
+#  define FSE_MAX_SYMBOL_VALUE 255
+#endif
+
+/* **************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif   /* !FSE_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+*  Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#  error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+
+
+#endif /* FSE_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* FSE_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/fse_decompress.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,329 @@
+/* ******************************************************************
+   FSE : Finite State Entropy decoder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+#include "bitstream.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+/* check and forward error code */
+#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
+
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#  error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#  error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+FSE_DTable* FSE_createDTable (unsigned tableLog)
+{
+    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+    return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSE_freeDTable (FSE_DTable* dt)
+{
+    free(dt);
+}
+
+size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
+    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+
+    U32 const maxSV1 = maxSymbolValue + 1;
+    U32 const tableSize = 1 << tableLog;
+    U32 highThreshold = tableSize-1;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+    /* Init, lay down lowprob symbols */
+    {   FSE_DTableHeader DTableH;
+        DTableH.tableLog = (U16)tableLog;
+        DTableH.fastMode = 1;
+        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
+            U32 s;
+            for (s=0; s<maxSV1; s++) {
+                if (normalizedCounter[s]==-1) {
+                    tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+                    symbolNext[s] = 1;
+                } else {
+                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+                    symbolNext[s] = normalizedCounter[s];
+        }   }   }
+        memcpy(dt, &DTableH, sizeof(DTableH));
+    }
+
+    /* Spread symbols */
+    {   U32 const tableMask = tableSize-1;
+        U32 const step = FSE_TABLESTEP(tableSize);
+        U32 s, position = 0;
+        for (s=0; s<maxSV1; s++) {
+            int i;
+            for (i=0; i<normalizedCounter[s]; i++) {
+                tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+                position = (position + step) & tableMask;
+                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }   }
+        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+    }
+
+    /* Build Decoding table */
+    {   U32 u;
+        for (u=0; u<tableSize; u++) {
+            FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+            U16 nextState = symbolNext[symbol]++;
+            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
+            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+    }   }
+
+    return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-*******************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSV1 = tableMask+1;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<maxSV1; s++) {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSE_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    BIT_DStream_t bitD;
+    FSE_DState_t state1;
+    FSE_DState_t state2;
+
+    /* Init */
+    CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
+
+    FSE_initDState(&state1, &bitD, dt);
+    FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
+        op[0] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[1] = FSE_GETSYMBOL(&state2);
+
+        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[3] = FSE_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+    while (1) {
+        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+        *op++ = FSE_GETSYMBOL(&state1);
+        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+            *op++ = FSE_GETSYMBOL(&state2);
+            break;
+        }
+
+        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+        *op++ = FSE_GETSYMBOL(&state2);
+        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+            *op++ = FSE_GETSYMBOL(&state1);
+            break;
+    }   }
+
+    return op-ostart;
+}
+
+
+size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSE_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+    const U32 fastMode = DTableH->fastMode;
+
+    /* select fast mode (static) */
+    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSE_MAX_SYMBOL_VALUE+1];
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+
+    /* normal FSE decoding mode */
+    size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+    if (FSE_isError(NCountLength)) return NCountLength;
+    //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
+    if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+    ip += NCountLength;
+    cSrcSize -= NCountLength;
+
+    CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
+
+    return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace);   /* always return, even if it is an error code */
+}
+
+
+typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
+{
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
+}
+
+
+
+#endif   /* FSE_COMMONDEFS_ONLY */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/huf.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,238 @@
+/* ******************************************************************
+   Huffman coder, part of New Generation Entropy library
+   header file
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *** Dependencies *** */
+#include <stddef.h>    /* size_t */
+
+
+/* *** simple functions *** */
+/**
+HUF_compress() :
+    Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
+    'dst' buffer must be already allocated.
+    Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
+    `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
+    @return : size of compressed data (<= `dstCapacity`).
+    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+                     if return == 1, srcData is a single repeated byte symbol (RLE compression).
+                     if HUF_isError(return), compression failed (more details using HUF_getErrorName())
+*/
+size_t HUF_compress(void* dst, size_t dstCapacity,
+              const void* src, size_t srcSize);
+
+/**
+HUF_decompress() :
+    Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated buffer 'dst', of minimum size 'dstSize'.
+    `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
+    Note : in contrast with FSE, HUF_decompress can regenerate
+           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+           because it knows size to regenerate.
+    @return : size of regenerated data (== originalSize),
+              or an error code, which can be tested using HUF_isError()
+*/
+size_t HUF_decompress(void* dst,  size_t originalSize,
+                const void* cSrc, size_t cSrcSize);
+
+
+/* ***   Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024)       /**< maximum input size for a single block compressed with HUF_compress */
+size_t HUF_compressBound(size_t size);       /**< maximum compressed size (worst case) */
+
+/* Error Management */
+unsigned    HUF_isError(size_t code);        /**< tells if a return value is an error code */
+const char* HUF_getErrorName(size_t code);   /**< provides error code string (useful for debugging) */
+
+
+/* ***   Advanced function   *** */
+
+/** HUF_compress2() :
+ *   Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog` .
+ *   `tableLog` must be `<= HUF_TABLELOG_MAX` . */
+size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+
+/** HUF_compress4X_wksp() :
+*   Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
+size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least 1024 unsigned */
+
+
+
+#ifdef HUF_STATIC_LINKING_ONLY
+
+/* *** Dependencies *** */
+#include "mem.h"   /* U32 */
+
+
+/* *** Constants *** */
+#define HUF_TABLELOG_ABSOLUTEMAX  15   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUF_TABLELOG_MAX  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_TABLELOG_DEFAULT  11       /* tableLog by default, when not specified */
+#define HUF_SYMBOLVALUE_MAX 255
+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
+#  error "HUF_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+*  Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's Compression Table */
+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
+    U32 name##hb[maxSymbolValue+1]; \
+    void* name##hv = &(name##hb); \
+    HUF_CElt* name = (HUF_CElt*)(name##hv)   /* no final ; */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUF_DTable;
+#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+        HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+        HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
+
+
+/* ****************************************
+*  Advanced decompression functions
+******************************************/
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+
+
+/* ****************************************
+*  HUF detailed API
+******************************************/
+/*!
+HUF_compress() does the following:
+1. count symbol occurrence from source[] into table count[] using FSE_count()
+2. (optional) refine tableLog using HUF_optimalTableLog()
+3. build Huffman table from count using HUF_buildCTable()
+4. save Huffman table to memory buffer using HUF_writeCTable()
+5. encode the data stream using HUF_compress4X_usingCTable()
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and regenerate 'CTable' using external methods.
+*/
+/* FSE_count() : find it within "fse.h" */
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+typedef struct HUF_CElt_s HUF_CElt;   /* incomplete type */
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);
+size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+
+
+/** HUF_buildCTable_wksp() :
+ *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
+ */
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
+
+/*! HUF_readStats() :
+    Read compact Huffman tree, saved by HUF_writeCTable().
+    `huffWeight` is destination buffer.
+    @return : size read from `src` , or an error Code .
+    Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                     U32* nbSymbolsPtr, U32* tableLogPtr,
+                     const void* src, size_t srcSize);
+
+/** HUF_readCTable() :
+*   Loading a CTable saved with HUF_writeCTable() */
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned maxSymbolValue, const void* src, size_t srcSize);
+
+
+/*
+HUF_decompress() does the following:
+1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
+2. build Huffman table from save, using HUF_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
+*/
+
+/** HUF_selectDecoder() :
+*   Tells which decoder is likely to decode faster,
+*   based on a set of pre-determined metrics.
+*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+
+
+/* single stream variants */
+
+size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least 1024 unsigned */
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
+
+size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
+size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+
+#endif /* HUF_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* HUF_H_298734234 */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/mem.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,372 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-****************************************
+*  Dependencies
+******************************************/
+#include <stddef.h>     /* size_t, ptrdiff_t */
+#include <string.h>     /* memcpy */
+
+
+/*-****************************************
+*  Compiler specifics
+******************************************/
+#if defined(_MSC_VER)   /* Visual Studio */
+#   include <stdlib.h>  /* _byteswap_ulong */
+#   include <intrin.h>  /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+#  define MEM_STATIC static __inline __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+#  define MEM_STATIC static __inline
+#else
+#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+/* code only tested on 32 and 64 bits systems */
+#define MEM_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }
+MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
+
+
+/*-**************************************************************
+*  Basic Types
+*****************************************************************/
+#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef  int16_t S16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+  typedef  int64_t S64;
+  typedef intptr_t iPtrDiff;
+#else
+  typedef unsigned char      BYTE;
+  typedef unsigned short      U16;
+  typedef   signed short      S16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
+  typedef   signed long long  S64;
+  typedef ptrdiff_t      iPtrDiff;
+#endif
+
+
+/*-**************************************************************
+*  Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets depending on alignment.
+ *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define MEM_FORCE_MEMORY_ACCESS 2
+#  elif defined(__INTEL_COMPILER) /*|| defined(_MSC_VER)*/ || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define MEM_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+MEM_STATIC U64 MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
+	__pragma( pack(push, 1) )
+    typedef union { U16 u16; U32 u32; U64 u64; size_t st; } unalign;
+    __pragma( pack(pop) )
+#else
+    typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
+#endif
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+MEM_STATIC U64 MEM_readST(const void* ptr) { return ((const unalign*)ptr)->st; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; }
+
+#else
+
+/* default method, safe and standard.
+   can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC size_t MEM_readST(const void* memPtr)
+{
+    size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write32(void* memPtr, U32 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write64(void* memPtr, U64 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+#if defined(_MSC_VER)     /* Visual Studio */
+    return _byteswap_ulong(in);
+#elif defined (__GNUC__)
+    return __builtin_bswap32(in);
+#else
+    return  ((in << 24) & 0xff000000 ) |
+            ((in <<  8) & 0x00ff0000 ) |
+            ((in >>  8) & 0x0000ff00 ) |
+            ((in >> 24) & 0x000000ff );
+#endif
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER)     /* Visual Studio */
+    return _byteswap_uint64(in);
+#elif defined (__GNUC__)
+    return __builtin_bswap64(in);
+#else
+    return  ((in << 56) & 0xff00000000000000ULL) |
+            ((in << 40) & 0x00ff000000000000ULL) |
+            ((in << 24) & 0x0000ff0000000000ULL) |
+            ((in << 8)  & 0x000000ff00000000ULL) |
+            ((in >> 8)  & 0x00000000ff000000ULL) |
+            ((in >> 24) & 0x0000000000ff0000ULL) |
+            ((in >> 40) & 0x000000000000ff00ULL) |
+            ((in >> 56) & 0x00000000000000ffULL);
+#endif
+}
+
+MEM_STATIC size_t MEM_swapST(size_t in)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_swap32((U32)in);
+    else
+        return (size_t)MEM_swap64((U64)in);
+}
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read16(memPtr);
+    else {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+    if (MEM_isLittleEndian()) {
+        MEM_write16(memPtr, val);
+    } else {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)val;
+        p[1] = (BYTE)(val>>8);
+    }
+}
+
+MEM_STATIC U32 MEM_readLE24(const void* memPtr)
+{
+    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+}
+
+MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
+{
+    MEM_writeLE16(memPtr, (U16)val);
+    ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read32(memPtr);
+    else
+        return MEM_swap32(MEM_read32(memPtr));
+}
+
+MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
+{
+    if (MEM_isLittleEndian())
+        MEM_write32(memPtr, val32);
+    else
+        MEM_write32(memPtr, MEM_swap32(val32));
+}
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read64(memPtr);
+    else
+        return MEM_swap64(MEM_read64(memPtr));
+}
+
+MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
+{
+    if (MEM_isLittleEndian())
+        MEM_write64(memPtr, val64);
+    else
+        MEM_write64(memPtr, MEM_swap64(val64));
+}
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readLE32(memPtr);
+    else
+        return (size_t)MEM_readLE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
+{
+    if (MEM_32bits())
+        MEM_writeLE32(memPtr, (U32)val);
+    else
+        MEM_writeLE64(memPtr, (U64)val);
+}
+
+/*=== Big endian r/w ===*/
+
+MEM_STATIC U32 MEM_readBE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_swap32(MEM_read32(memPtr));
+    else
+        return MEM_read32(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
+{
+    if (MEM_isLittleEndian())
+        MEM_write32(memPtr, MEM_swap32(val32));
+    else
+        MEM_write32(memPtr, val32);
+}
+
+MEM_STATIC U64 MEM_readBE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_swap64(MEM_read64(memPtr));
+    else
+        return MEM_read64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
+{
+    if (MEM_isLittleEndian())
+        MEM_write64(memPtr, MEM_swap64(val64));
+    else
+        MEM_write64(memPtr, val64);
+}
+
+MEM_STATIC size_t MEM_readBEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readBE32(memPtr);
+    else
+        return (size_t)MEM_readBE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
+{
+    if (MEM_32bits())
+        MEM_writeBE32(memPtr, (U32)val);
+    else
+        MEM_writeBE64(memPtr, (U64)val);
+}
+
+
+/* function safe only for comparisons */
+MEM_STATIC U32 MEM_readMINMATCH(const void* memPtr, U32 length)
+{
+    switch (length)
+    {
+    default :
+    case 4 : return MEM_read32(memPtr);
+    case 3 : if (MEM_isLittleEndian())
+                return MEM_read32(memPtr)<<8;
+             else
+                return MEM_read32(memPtr)>>8;
+    }
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/xxhash.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,867 @@
+/*
+*  xxHash - Fast Hash algorithm
+*  Copyright (C) 2012-2016, Yann Collet
+*
+*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+*
+*  Redistribution and use in source and binary forms, with or without
+*  modification, are permitted provided that the following conditions are
+*  met:
+*
+*  * Redistributions of source code must retain the above copyright
+*  notice, this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above
+*  copyright notice, this list of conditions and the following disclaimer
+*  in the documentation and/or other materials provided with the
+*  distribution.
+*
+*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*  You can contact the author at :
+*  - xxHash homepage: http://www.xxhash.com
+*  - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+
+/* *************************************
+*  Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+ *            It can generate buggy code on targets which do not support unaligned memory accesses.
+ *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://stackoverflow.com/a/32095106/646947 for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define XXH_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define XXH_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
+ * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
+ * By default, this option is disabled. To enable it, uncomment below define :
+ */
+/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+ * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
+ * Results are therefore identical for little-endian and big-endian CPU.
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+ * Should endian-independance be of no importance for your application, you may set the #define below to 1,
+ * to improve speed for Big-endian CPU.
+ * This option has no impact on Little_Endian CPU.
+ */
+#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
+#  define XXH_FORCE_NATIVE_FORMAT 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash; set to 0 when the input data
+ * is guaranteed to be aligned.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+#    define XXH_FORCE_ALIGN_CHECK 0
+#  else
+#    define XXH_FORCE_ALIGN_CHECK 1
+#  endif
+#endif
+
+
+/* *************************************
+*  Includes & Memory related functions
+***************************************/
+/* Modify the local functions below should you wish to use some other memory routines */
+/* for malloc(), free() */
+#include <stdlib.h>
+static void* XXH_malloc(size_t s) { return malloc(s); }
+static void  XXH_free  (void* p)  { free(p); }
+/* for memcpy() */
+#include <string.h>
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+#define XXH_STATIC_LINKING_ONLY
+#include "xxhash.h"
+
+
+/* *************************************
+*  Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
+#  define FORCE_INLINE static __forceinline
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/* *************************************
+*  Basic Types
+***************************************/
+#ifndef MEM_MODULE
+# define MEM_MODULE
+# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+#   include <stdint.h>
+    typedef uint8_t  BYTE;
+    typedef uint16_t U16;
+    typedef uint32_t U32;
+    typedef  int32_t S32;
+    typedef uint64_t U64;
+#  else
+    typedef unsigned char      BYTE;
+    typedef unsigned short     U16;
+    typedef unsigned int       U32;
+    typedef   signed int       S32;
+    typedef unsigned long long U64;   /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
+#  endif
+#endif
+
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static U32 XXH_read32(const void* memPtr)
+{
+    U32 val;
+    memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+static U64 XXH_read64(const void* memPtr)
+{
+    U64 val;
+    memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* ****************************************
+*  Compiler-specific Functions and Macros
+******************************************/
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
+#if defined(_MSC_VER)
+#  define XXH_rotl32(x,r) _rotl(x,r)
+#  define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
+#endif
+
+#if defined(_MSC_VER)     /* Visual Studio */
+#  define XXH_swap32 _byteswap_ulong
+#  define XXH_swap64 _byteswap_uint64
+#elif GCC_VERSION >= 403
+#  define XXH_swap32 __builtin_bswap32
+#  define XXH_swap64 __builtin_bswap64
+#else
+static U32 XXH_swap32 (U32 x)
+{
+    return  ((x << 24) & 0xff000000 ) |
+            ((x <<  8) & 0x00ff0000 ) |
+            ((x >>  8) & 0x0000ff00 ) |
+            ((x >> 24) & 0x000000ff );
+}
+static U64 XXH_swap64 (U64 x)
+{
+    return  ((x << 56) & 0xff00000000000000ULL) |
+            ((x << 40) & 0x00ff000000000000ULL) |
+            ((x << 24) & 0x0000ff0000000000ULL) |
+            ((x << 8)  & 0x000000ff00000000ULL) |
+            ((x >> 8)  & 0x00000000ff000000ULL) |
+            ((x >> 24) & 0x0000000000ff0000ULL) |
+            ((x >> 40) & 0x000000000000ff00ULL) |
+            ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+
+/* *************************************
+*  Architecture Macros
+***************************************/
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+    static const int g_one = 1;
+#   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))
+#endif
+
+
+/* ***************************
+*  Memory reads
+*****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+    if (align==XXH_unaligned)
+        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+    else
+        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
+}
+
+FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
+{
+    return XXH_readLE32_align(ptr, endian, XXH_unaligned);
+}
+
+static U32 XXH_readBE32(const void* ptr)
+{
+    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+
+FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+    if (align==XXH_unaligned)
+        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+    else
+        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
+}
+
+FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
+{
+    return XXH_readLE64_align(ptr, endian, XXH_unaligned);
+}
+
+static U64 XXH_readBE64(const void* ptr)
+{
+    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+
+
+/* *************************************
+*  Macros
+***************************************/
+#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }    /* use only *after* variable declarations */
+
+
+/* *************************************
+*  Constants
+***************************************/
+static const U32 PRIME32_1 = 2654435761U;
+static const U32 PRIME32_2 = 2246822519U;
+static const U32 PRIME32_3 = 3266489917U;
+static const U32 PRIME32_4 =  668265263U;
+static const U32 PRIME32_5 =  374761393U;
+
+static const U64 PRIME64_1 = 11400714785074694791ULL;
+static const U64 PRIME64_2 = 14029467366897019727ULL;
+static const U64 PRIME64_3 =  1609587929392839161ULL;
+static const U64 PRIME64_4 =  9650029242287828579ULL;
+static const U64 PRIME64_5 =  2870177450012600261ULL;
+
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* **************************
+*  Utils
+****************************/
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
+{
+    memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
+{
+    memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+
+/* ***************************
+*  Simple Hash Functions
+*****************************/
+
+static U32 XXH32_round(U32 seed, U32 input)
+{
+    seed += input * PRIME32_2;
+    seed  = XXH_rotl32(seed, 13);
+    seed *= PRIME32_1;
+    return seed;
+}
+
+FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* bEnd = p + len;
+    U32 h32;
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (p==NULL) {
+        len=0;
+        bEnd=p=(const BYTE*)(size_t)16;
+    }
+#endif
+
+    if (len>=16) {
+        const BYTE* const limit = bEnd - 16;
+        U32 v1 = seed + PRIME32_1 + PRIME32_2;
+        U32 v2 = seed + PRIME32_2;
+        U32 v3 = seed + 0;
+        U32 v4 = seed - PRIME32_1;
+
+        do {
+            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
+            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
+            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
+            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
+        } while (p<=limit);
+
+        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+    } else {
+        h32  = seed + PRIME32_5;
+    }
+
+    h32 += (U32) len;
+
+    while (p+4<=bEnd) {
+        h32 += XXH_get32bits(p) * PRIME32_3;
+        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h32 += (*p) * PRIME32_5;
+        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+        p++;
+    }
+
+    h32 ^= h32 >> 15;
+    h32 *= PRIME32_2;
+    h32 ^= h32 >> 13;
+    h32 *= PRIME32_3;
+    h32 ^= h32 >> 16;
+
+    return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
+{
+#if 0
+    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+    XXH32_CREATESTATE_STATIC(state);
+    XXH32_reset(state, seed);
+    XXH32_update(state, input, len);
+    return XXH32_digest(state);
+#else
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if (XXH_FORCE_ALIGN_CHECK) {
+        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
+            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+            else
+                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+    }   }
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+    else
+        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+static U64 XXH64_round(U64 acc, U64 input)
+{
+    acc += input * PRIME64_2;
+    acc  = XXH_rotl64(acc, 31);
+    acc *= PRIME64_1;
+    return acc;
+}
+
+static U64 XXH64_mergeRound(U64 acc, U64 val)
+{
+    val  = XXH64_round(0, val);
+    acc ^= val;
+    acc  = acc * PRIME64_1 + PRIME64_4;
+    return acc;
+}
+
+FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* const bEnd = p + len;
+    U64 h64;
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (p==NULL) {
+        len=0;
+        bEnd=p=(const BYTE*)(size_t)32;
+    }
+#endif
+
+    if (len>=32) {
+        const BYTE* const limit = bEnd - 32;
+        U64 v1 = seed + PRIME64_1 + PRIME64_2;
+        U64 v2 = seed + PRIME64_2;
+        U64 v3 = seed + 0;
+        U64 v4 = seed - PRIME64_1;
+
+        do {
+            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
+            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
+            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
+            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
+        } while (p<=limit);
+
+        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+        h64 = XXH64_mergeRound(h64, v1);
+        h64 = XXH64_mergeRound(h64, v2);
+        h64 = XXH64_mergeRound(h64, v3);
+        h64 = XXH64_mergeRound(h64, v4);
+
+    } else {
+        h64  = seed + PRIME64_5;
+    }
+
+    h64 += (U64) len;
+
+    while (p+8<=bEnd) {
+        U64 const k1 = XXH64_round(0, XXH_get64bits(p));
+        h64 ^= k1;
+        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+        p+=8;
+    }
+
+    if (p+4<=bEnd) {
+        h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
+        h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h64 ^= (*p) * PRIME64_5;
+        h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+        p++;
+    }
+
+    h64 ^= h64 >> 33;
+    h64 *= PRIME64_2;
+    h64 ^= h64 >> 29;
+    h64 *= PRIME64_3;
+    h64 ^= h64 >> 32;
+
+    return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
+{
+#if 0
+    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+    XXH64_CREATESTATE_STATIC(state);
+    XXH64_reset(state, seed);
+    XXH64_update(state, input, len);
+    return XXH64_digest(state);
+#else
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if (XXH_FORCE_ALIGN_CHECK) {
+        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
+            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+            else
+                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+    }   }
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+    else
+        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+/* **************************************************
+*  Advanced Hash Functions
+****************************************************/
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+    XXH_free(statePtr);
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+    XXH_free(statePtr);
+    return XXH_OK;
+}
+
+
+/*** Hash feed ***/
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
+{
+    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+    memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for future removal */
+    state.v1 = seed + PRIME32_1 + PRIME32_2;
+    state.v2 = seed + PRIME32_2;
+    state.v3 = seed + 0;
+    state.v4 = seed - PRIME32_1;
+    memcpy(statePtr, &state, sizeof(state));
+    return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
+{
+    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+    memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for future removal */
+    state.v1 = seed + PRIME64_1 + PRIME64_2;
+    state.v2 = seed + PRIME64_2;
+    state.v3 = seed + 0;
+    state.v4 = seed - PRIME64_1;
+    memcpy(statePtr, &state, sizeof(state));
+    return XXH_OK;
+}
+
+
+FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (input==NULL) return XXH_ERROR;
+#endif
+
+    state->total_len_32 += (unsigned)len;
+    state->large_len |= (len>=16) | (state->total_len_32>=16);
+
+    if (state->memsize + len < 16)  {   /* fill in tmp buffer */
+        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
+        state->memsize += (unsigned)len;
+        return XXH_OK;
+    }
+
+    if (state->memsize) {   /* some data left from previous update */
+        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
+        {   const U32* p32 = state->mem32;
+            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
+            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
+            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
+            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
+        }
+        p += 16-state->memsize;
+        state->memsize = 0;
+    }
+
+    if (p <= bEnd-16) {
+        const BYTE* const limit = bEnd - 16;
+        U32 v1 = state->v1;
+        U32 v2 = state->v2;
+        U32 v3 = state->v3;
+        U32 v4 = state->v4;
+
+        do {
+            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
+            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
+            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
+            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
+        } while (p<=limit);
+
+        state->v1 = v1;
+        state->v2 = v2;
+        state->v3 = v3;
+        state->v4 = v4;
+    }
+
+    if (p < bEnd) {
+        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+        state->memsize = (unsigned)(bEnd-p);
+    }
+
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
+    else
+        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
+{
+    const BYTE * p = (const BYTE*)state->mem32;
+    const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
+    U32 h32;
+
+    if (state->large_len) {
+        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
+    } else {
+        h32 = state->v3 /* == seed */ + PRIME32_5;
+    }
+
+    h32 += state->total_len_32;
+
+    while (p+4<=bEnd) {
+        h32 += XXH_readLE32(p, endian) * PRIME32_3;
+        h32  = XXH_rotl32(h32, 17) * PRIME32_4;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h32 += (*p) * PRIME32_5;
+        h32  = XXH_rotl32(h32, 11) * PRIME32_1;
+        p++;
+    }
+
+    h32 ^= h32 >> 15;
+    h32 *= PRIME32_2;
+    h32 ^= h32 >> 13;
+    h32 *= PRIME32_3;
+    h32 ^= h32 >> 16;
+
+    return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_digest_endian(state_in, XXH_littleEndian);
+    else
+        return XXH32_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+
+/* **** XXH64 **** */
+
+FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (input==NULL) return XXH_ERROR;
+#endif
+
+    state->total_len += len;
+
+    if (state->memsize + len < 32) {  /* fill in tmp buffer */
+        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+        state->memsize += (U32)len;
+        return XXH_OK;
+    }
+
+    if (state->memsize) {   /* tmp buffer is full */
+        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
+        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
+        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
+        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
+        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
+        p += 32-state->memsize;
+        state->memsize = 0;
+    }
+
+    if (p+32 <= bEnd) {
+        const BYTE* const limit = bEnd - 32;
+        U64 v1 = state->v1;
+        U64 v2 = state->v2;
+        U64 v3 = state->v3;
+        U64 v4 = state->v4;
+
+        do {
+            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
+            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
+            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
+            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
+        } while (p<=limit);
+
+        state->v1 = v1;
+        state->v2 = v2;
+        state->v3 = v3;
+        state->v4 = v4;
+    }
+
+    if (p < bEnd) {
+        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+        state->memsize = (unsigned)(bEnd-p);
+    }
+
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
+    else
+        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
+{
+    const BYTE * p = (const BYTE*)state->mem64;
+    const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
+    U64 h64;
+
+    if (state->total_len >= 32) {
+        U64 const v1 = state->v1;
+        U64 const v2 = state->v2;
+        U64 const v3 = state->v3;
+        U64 const v4 = state->v4;
+
+        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+        h64 = XXH64_mergeRound(h64, v1);
+        h64 = XXH64_mergeRound(h64, v2);
+        h64 = XXH64_mergeRound(h64, v3);
+        h64 = XXH64_mergeRound(h64, v4);
+    } else {
+        h64  = state->v3 + PRIME64_5;
+    }
+
+    h64 += (U64) state->total_len;
+
+    while (p+8<=bEnd) {
+        U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
+        h64 ^= k1;
+        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+        p+=8;
+    }
+
+    if (p+4<=bEnd) {
+        h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
+        h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h64 ^= (*p) * PRIME64_5;
+        h64  = XXH_rotl64(h64, 11) * PRIME64_1;
+        p++;
+    }
+
+    h64 ^= h64 >> 33;
+    h64 *= PRIME64_2;
+    h64 ^= h64 >> 29;
+    h64 *= PRIME64_3;
+    h64 ^= h64 >> 32;
+
+    return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_digest_endian(state_in, XXH_littleEndian);
+    else
+        return XXH64_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/* **************************
+*  Canonical representation
+****************************/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).
+*   These functions allow transformation of hash result into and from its canonical format.
+*   This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
+*/
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+    memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+    memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+    return XXH_readBE32(src);
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
+{
+    return XXH_readBE64(src);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/xxhash.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,309 @@
+/*
+   xxHash - Extremely Fast Hash algorithm
+   Header File
+   Copyright (C) 2012-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name            Speed       Q.Score   Author
+xxHash          5.4 GB/s     10
+CrapWow         3.2 GB/s      2       Andrew
+MumurHash 3a    2.7 GB/s     10       Austin Appleby
+SpookyHash      2.0 GB/s     10       Bob Jenkins
+SBox            1.4 GB/s      9       Bret Mulvey
+Lookup3         1.2 GB/s      9       Bob Jenkins
+SuperFastHash   1.2 GB/s      1       Paul Hsieh
+CityHash64      1.05 GB/s    10       Pike & Alakuijala
+FNV             0.55 GB/s     5       Fowler, Noll, Vo
+CRC32           0.43 GB/s     9
+MD5-32          0.33 GB/s    10       Ronald L. Rivest
+SHA1-32         0.28 GB/s    10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+A 64-bits version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bits applications only.
+Name     Speed on 64 bits    Speed on 32 bits
+XXH64       13.8 GB/s            1.9 GB/s
+XXH32        6.8 GB/s            6.0 GB/s
+*/
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef XXH_NAMESPACE
+#  define XXH_NAMESPACE ZSTD_  /* Zstandard specific */
+#endif
+
+
+/* ****************************
+*  Definitions
+******************************/
+#include <stddef.h>   /* size_t */
+typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
+
+
+/* ****************************
+*  API modifier
+******************************/
+/** XXH_PRIVATE_API
+*   This is useful if you want to include xxhash functions in `static` mode
+*   in order to inline them, and remove their symbol from the public list.
+*   Methodology :
+*     #define XXH_PRIVATE_API
+*     #include "xxhash.h"
+*   `xxhash.c` is automatically included.
+*   It's not useful to compile and link it as a separate module anymore.
+*/
+#ifdef XXH_PRIVATE_API
+#  ifndef XXH_STATIC_LINKING_ONLY
+#    define XXH_STATIC_LINKING_ONLY
+#  endif
+#  if defined(__GNUC__)
+#    define XXH_PUBLIC_API static __inline __attribute__((unused))
+#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#    define XXH_PUBLIC_API static inline
+#  elif defined(_MSC_VER)
+#    define XXH_PUBLIC_API static __inline
+#  else
+#    define XXH_PUBLIC_API static   /* this version may generate warnings for unused static functions; disable the relevant warning */
+#  endif
+#else
+#  define XXH_PUBLIC_API   /* do nothing */
+#endif /* XXH_PRIVATE_API */
+
+/*!XXH_NAMESPACE, aka Namespace Emulation :
+
+If you want to include _and expose_ xxHash functions from within your own library,
+but also want to avoid symbol collisions with another library which also includes xxHash,
+
+you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
+with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
+
+Note that no change is required within the calling program as long as it includes `xxhash.h` :
+regular symbol name will be automatically translated by this header.
+*/
+#ifdef XXH_NAMESPACE
+#  define XXH_CAT(A,B) A##B
+#  define XXH_NAME2(A,B) XXH_CAT(A,B)
+#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+
+/* *************************************
+*  Version
+***************************************/
+#define XXH_VERSION_MAJOR    0
+#define XXH_VERSION_MINOR    6
+#define XXH_VERSION_RELEASE  2
+#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+*  Simple Hash Functions
+******************************/
+typedef unsigned int       XXH32_hash_t;
+typedef unsigned long long XXH64_hash_t;
+
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
+XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
+
+/*!
+XXH32() :
+    Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
+    The memory between input & input+length must be valid (allocated and read-accessible).
+    "seed" can be used to alter the result predictably.
+    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
+XXH64() :
+    Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
+    "seed" can be used to alter the result predictably.
+    This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
+*/
+
+
+/* ****************************
+*  Streaming Hash Functions
+******************************/
+typedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */
+typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
+
+/*! State allocation, compatible with dynamic libraries */
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
+
+
+/* hash streaming */
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, unsigned int seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, unsigned long long seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
+
+/*
+These functions generate the xxHash of an input provided in multiple segments.
+Note that, for small input, they are slower than single-call functions, due to state management.
+For small input, prefer `XXH32()` and `XXH64()` .
+
+XXH state must first be allocated, using XXH*_createState() .
+
+Start a new hash by initializing state with a seed, using XXH*_reset().
+
+Then, feed the hash state by calling XXH*_update() as many times as necessary.
+Obviously, input must be allocated and read accessible.
+The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
+
+Finally, a hash value can be produced anytime, by using XXH*_digest().
+This function returns the nn-bits hash as an int or long long.
+
+It's still possible to continue inserting input into the hash state after a digest,
+and generate some new hashes later on, by calling again XXH*_digest().
+
+When done, free XXH state space if it was allocated dynamically.
+*/
+
+
+/* **************************
+*  Utils
+****************************/
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* ! C99 */
+#  define restrict   /* disable restrict */
+#endif
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);
+
+
+/* **************************
+*  Canonical representation
+****************************/
+typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
+typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
+
+/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
+*  The canonical representation uses human-readable write convention, aka big-endian (large digits first).
+*  These functions allow transformation of hash result into and from its canonical format.
+*  This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
+*/
+
+
+#ifdef XXH_STATIC_LINKING_ONLY
+
+/* ================================================================================================
+   This section contains definitions which are not guaranteed to remain stable.
+   They may change in future versions, becoming incompatible with a different version of the library.
+   They shall only be used with static linking.
+   Never use these definitions in association with dynamic linking !
+=================================================================================================== */
+
+/* These definitions are only meant to allow allocation of XXH state
+   statically, on stack, or in a struct for example.
+   Do not use members directly. */
+
+   struct XXH32_state_s {
+       unsigned total_len_32;
+       unsigned large_len;
+       unsigned v1;
+       unsigned v2;
+       unsigned v3;
+       unsigned v4;
+       unsigned mem32[4];   /* buffer defined as U32 for alignment */
+       unsigned memsize;
+       unsigned reserved;   /* never read nor write, will be removed in a future version */
+   };   /* typedef'd to XXH32_state_t */
+
+   struct XXH64_state_s {
+       unsigned long long total_len;
+       unsigned long long v1;
+       unsigned long long v2;
+       unsigned long long v3;
+       unsigned long long v4;
+       unsigned long long mem64[4];   /* buffer defined as U64 for alignment */
+       unsigned memsize;
+       unsigned reserved[2];          /* never read nor write, will be removed in a future version */
+   };   /* typedef'd to XXH64_state_t */
+
+
+#  ifdef XXH_PRIVATE_API
+#    include "xxhash.c"   /* include xxhash functions as `static`, for inlining */
+#  endif
+
+#endif /* XXH_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* XXHASH_H_5627135585666179 */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/zstd_common.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,77 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdlib.h>         /* malloc */
+#include "error_private.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"           /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
+
+
+/*-****************************************
+*  Version
+******************************************/
+unsigned ZSTD_versionNumber (void) { return ZSTD_VERSION_NUMBER; }
+
+
+/*-****************************************
+*  ZSTD Error Management
+******************************************/
+/*! ZSTD_isError() :
+*   tells if a return value is an error code */
+unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTD_getErrorName() :
+*   provides error code string from function result (useful for debugging) */
+const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+/*! ZSTD_getError() :
+*   convert a `size_t` function result into a proper ZSTD_errorCode enum */
+ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
+
+/*! ZSTD_getErrorString() :
+*   provides error code string from enum */
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorName(code); }
+
+/* ---   ZBUFF Error Management  (deprecated)   --- */
+unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
+const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+
+/*=**************************************************************
+*  Custom allocator
+****************************************************************/
+/* default uses stdlib */
+void* ZSTD_defaultAllocFunction(void* opaque, size_t size)
+{
+    void* address = malloc(size);
+    (void)opaque;
+    return address;
+}
+
+void ZSTD_defaultFreeFunction(void* opaque, void* address)
+{
+    (void)opaque;
+    free(address);
+}
+
+void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
+{
+    return customMem.customAlloc(customMem.opaque, size);
+}
+
+void ZSTD_free(void* ptr, ZSTD_customMem customMem)
+{
+    if (ptr!=NULL)
+        customMem.customFree(customMem.opaque, ptr);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/zstd_errors.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,60 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+#ifndef ZSTD_ERRORS_H_398273423
+#define ZSTD_ERRORS_H_398273423
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*===== dependency =====*/
+#include <stddef.h>   /* size_t */
+
+
+/*-****************************************
+*  error codes list
+******************************************/
+typedef enum {
+  ZSTD_error_no_error,
+  ZSTD_error_GENERIC,
+  ZSTD_error_prefix_unknown,
+  ZSTD_error_version_unsupported,
+  ZSTD_error_parameter_unknown,
+  ZSTD_error_frameParameter_unsupported,
+  ZSTD_error_frameParameter_unsupportedBy32bits,
+  ZSTD_error_frameParameter_windowTooLarge,
+  ZSTD_error_compressionParameter_unsupported,
+  ZSTD_error_init_missing,
+  ZSTD_error_memory_allocation,
+  ZSTD_error_stage_wrong,
+  ZSTD_error_dstSize_tooSmall,
+  ZSTD_error_srcSize_wrong,
+  ZSTD_error_corruption_detected,
+  ZSTD_error_checksum_wrong,
+  ZSTD_error_tableLog_tooLarge,
+  ZSTD_error_maxSymbolValue_tooLarge,
+  ZSTD_error_maxSymbolValue_tooSmall,
+  ZSTD_error_dictionary_corrupted,
+  ZSTD_error_dictionary_wrong,
+  ZSTD_error_maxCode
+} ZSTD_ErrorCode;
+
+/*! ZSTD_getErrorCode() :
+    convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
+    which can be used to compare directly with enum list published into "error_public.h" */
+ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_ERRORS_H_398273423 */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/zstd_internal.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,270 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/*-*******************************************************
+*  Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+#ifdef _MSC_VER
+#  define FORCE_NOINLINE static __declspec(noinline)
+#else
+#  ifdef __GNUC__
+#    define FORCE_NOINLINE static __attribute__((__noinline__))
+#  else
+#    define FORCE_NOINLINE static
+#  endif
+#endif
+
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include "mem.h"
+#include "error_private.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+
+
+/*-*************************************
+*  shared macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; }  /* check and Forward error code */
+#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); }  /* check and send Error code */
+
+
+/*-*************************************
+*  Common constants
+***************************************/
+#define ZSTD_OPT_NUM    (1<<12)
+#define ZSTD_DICT_MAGIC  0xEC30A437   /* v0.7+ */
+
+#define ZSTD_REP_NUM      3                 /* number of repcodes */
+#define ZSTD_REP_CHECK    (ZSTD_REP_NUM)    /* number of repcodes to check by the optimal parser */
+#define ZSTD_REP_MOVE     (ZSTD_REP_NUM-1)
+#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
+static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6  64
+#define BIT5  32
+#define BIT4  16
+#define BIT1   2
+#define BIT0   1
+
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
+
+#define HufLog 12
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+#define EQUAL_READ32 4
+
+#define Litbits  8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML  52
+#define MaxLL  35
+#define MaxOff 28
+#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog    9
+#define LLFSELog    9
+#define OffFSELog   8
+
+static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
+                                     13,14,15,16 };
+static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+                                             2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+                                            -1,-1,-1,-1 };
+#define LL_DEFAULTNORMLOG 6  /* for static allocation */
+static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
+                                     12,13,14,15,16 };
+static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
+                                            -1,-1,-1,-1,-1 };
+#define ML_DEFAULTNORMLOG 6  /* for static allocation */
+static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+                                              1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
+#define OF_DEFAULTNORMLOG 5  /* for static allocation */
+static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+
+
+/*-*******************************************
+*  Shared functions to include for inlining
+*********************************************/
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy() :
+*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    do
+        COPY8(op, ip)
+    while (op < oend);
+}
+
+MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd)   /* should be faster for decoding, but strangely, not verified on all platform */
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = (BYTE*)dstEnd;
+    do
+        COPY8(op, ip)
+    while (op < oend);
+}
+
+
+/*-*******************************************
+*  Private interfaces
+*********************************************/
+typedef struct ZSTD_stats_s ZSTD_stats_t;
+
+typedef struct {
+    U32 off;
+    U32 len;
+} ZSTD_match_t;
+
+typedef struct {
+    U32 price;
+    U32 off;
+    U32 mlen;
+    U32 litlen;
+    U32 rep[ZSTD_REP_NUM];
+} ZSTD_optimal_t;
+
+
+typedef struct seqDef_s {
+    U32 offset;
+    U16 litLength;
+    U16 matchLength;
+} seqDef;
+
+
+typedef struct {
+    seqDef* sequencesStart;
+    seqDef* sequences;
+    BYTE* litStart;
+    BYTE* lit;
+    BYTE* llCode;
+    BYTE* mlCode;
+    BYTE* ofCode;
+    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+    U32   longLengthPos;
+    /* opt */
+    ZSTD_optimal_t* priceTable;
+    ZSTD_match_t* matchTable;
+    U32* matchLengthFreq;
+    U32* litLengthFreq;
+    U32* litFreq;
+    U32* offCodeFreq;
+    U32  matchLengthSum;
+    U32  matchSum;
+    U32  litLengthSum;
+    U32  litSum;
+    U32  offCodeSum;
+    U32  log2matchLengthSum;
+    U32  log2matchSum;
+    U32  log2litLengthSum;
+    U32  log2litSum;
+    U32  log2offCodeSum;
+    U32  factor;
+    U32  staticPrices;
+    U32  cachedPrice;
+    U32  cachedLitLength;
+    const BYTE* cachedLiterals;
+} seqStore_t;
+
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);
+int ZSTD_isSkipFrame(ZSTD_DCtx* dctx);
+
+/* custom memory allocation functions */
+void* ZSTD_defaultAllocFunction(void* opaque, size_t size);
+void ZSTD_defaultFreeFunction(void* opaque, void* address);
+#ifndef ZSTD_DLL_IMPORT
+static const ZSTD_customMem defaultCustomMem = { ZSTD_defaultAllocFunction, ZSTD_defaultFreeFunction, NULL };
+#endif
+void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_free(void* ptr, ZSTD_customMem customMem);
+
+
+/*======  common function  ======*/
+
+MEM_STATIC U32 ZSTD_highbit32(U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r=0;
+    _BitScanReverse(&r, val);
+    return (unsigned)r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* GCC Intrinsic */
+    return 31 - __builtin_clz(val);
+#   else   /* Software version */
+    static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    int r;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27];
+    return r;
+#   endif
+}
+
+
+#endif   /* ZSTD_CCOMMON_H_MODULE */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/fse_compress.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,850 @@
+/* ******************************************************************
+   FSE : Finite State Entropy encoder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+#include "bitstream.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#  error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#  error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
+ * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+    U32 const tableSize = 1 << tableLog;
+    U32 const tableMask = tableSize - 1;
+    void* const ptr = ct;
+    U16* const tableU16 = ( (U16*) ptr) + 2;
+    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
+    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+    U32 const step = FSE_TABLESTEP(tableSize);
+    U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
+
+    FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
+    U32 highThreshold = tableSize-1;
+
+    /* CTable header */
+    if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
+    tableU16[-2] = (U16) tableLog;
+    tableU16[-1] = (U16) maxSymbolValue;
+
+    /* For explanations on how to distribute symbol values over the table :
+    *  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+    /* symbol start positions */
+    {   U32 u;
+        cumul[0] = 0;
+        for (u=1; u<=maxSymbolValue+1; u++) {
+            if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */
+                cumul[u] = cumul[u-1] + 1;
+                tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
+            } else {
+                cumul[u] = cumul[u-1] + normalizedCounter[u-1];
+        }   }
+        cumul[maxSymbolValue+1] = tableSize+1;
+    }
+
+    /* Spread symbols */
+    {   U32 position = 0;
+        U32 symbol;
+        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+            int nbOccurences;
+            for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) {
+                tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
+                position = (position + step) & tableMask;
+                while (position > highThreshold) position = (position + step) & tableMask;   /* Low proba area */
+        }   }
+
+        if (position!=0) return ERROR(GENERIC);   /* Must have gone through all positions */
+    }
+
+    /* Build table */
+    {   U32 u; for (u=0; u<tableSize; u++) {
+        FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
+        tableU16[cumul[s]++] = (U16) (tableSize+u);   /* TableU16 : sorted by symbol order; gives next state value */
+    }   }
+
+    /* Build Symbol Transformation Table */
+    {   unsigned total = 0;
+        unsigned s;
+        for (s=0; s<=maxSymbolValue; s++) {
+            switch (normalizedCounter[s])
+            {
+            case  0: break;
+
+            case -1:
+            case  1:
+                symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
+                symbolTT[s].deltaFindState = total - 1;
+                total ++;
+                break;
+            default :
+                {
+                    U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
+                    U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
+                    symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
+                    symbolTT[s].deltaFindState = total - normalizedCounter[s];
+                    total +=  normalizedCounter[s];
+    }   }   }   }
+
+    return 0;
+}
+
+
+size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE];   /* memset() is not necessary, even if static analyzer complain about it */
+    return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-**************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+{
+    size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
+    return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
+}
+
+static short FSE_abs(short a) { return (short)(a<0 ? -a : a); }
+
+static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+                                       const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+                                       unsigned writeIsSafe)
+{
+    BYTE* const ostart = (BYTE*) header;
+    BYTE* out = ostart;
+    BYTE* const oend = ostart + headerBufferSize;
+    int nbBits;
+    const int tableSize = 1 << tableLog;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    bitStream = 0;
+    bitCount  = 0;
+    /* Table Size */
+    bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
+    bitCount  += 4;
+
+    /* Init */
+    remaining = tableSize+1;   /* +1 for extra accuracy */
+    threshold = tableSize;
+    nbBits = tableLog+1;
+
+    while (remaining>1) {  /* stops at 1 */
+        if (previous0) {
+            unsigned start = charnum;
+            while (!normalizedCounter[charnum]) charnum++;
+            while (charnum >= start+24) {
+                start+=24;
+                bitStream += 0xFFFFU << bitCount;
+                if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+                out[0] = (BYTE) bitStream;
+                out[1] = (BYTE)(bitStream>>8);
+                out+=2;
+                bitStream>>=16;
+            }
+            while (charnum >= start+3) {
+                start+=3;
+                bitStream += 3 << bitCount;
+                bitCount += 2;
+            }
+            bitStream += (charnum-start) << bitCount;
+            bitCount += 2;
+            if (bitCount>16) {
+                if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+                out[0] = (BYTE)bitStream;
+                out[1] = (BYTE)(bitStream>>8);
+                out += 2;
+                bitStream >>= 16;
+                bitCount -= 16;
+        }   }
+        {   short count = normalizedCounter[charnum++];
+            const short max = (short)((2*threshold-1)-remaining);
+            remaining -= FSE_abs(count);
+            if (remaining<1) return ERROR(GENERIC);
+            count++;   /* +1 for extra accuracy */
+            if (count>=threshold) count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+            bitStream += count << bitCount;
+            bitCount  += nbBits;
+            bitCount  -= (count<max);
+            previous0  = (count==1);
+            while (remaining<threshold) nbBits--, threshold>>=1;
+        }
+        if (bitCount>16) {
+            if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+            out[0] = (BYTE)bitStream;
+            out[1] = (BYTE)(bitStream>>8);
+            out += 2;
+            bitStream >>= 16;
+            bitCount -= 16;
+    }   }
+
+    /* flush remaining bitStream */
+    if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+    out[0] = (BYTE)bitStream;
+    out[1] = (BYTE)(bitStream>>8);
+    out+= (bitCount+7) /8;
+
+    if (charnum > maxSymbolValue + 1) return ERROR(GENERIC);
+
+    return (out-ostart);
+}
+
+
+size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
+    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
+
+    if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
+        return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
+
+    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
+}
+
+
+
+/*-**************************************************************
+*  Counting histogram
+****************************************************************/
+/*! FSE_count_simple
+    This function counts byte values within `src`, and store the histogram into table `count`.
+    It doesn't use any additional memory.
+    But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
+    For this reason, prefer using a table `count` with 256 elements.
+    @return : count of most numerous element
+*/
+size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+                        const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* const end = ip + srcSize;
+    unsigned maxSymbolValue = *maxSymbolValuePtr;
+    unsigned max=0;
+
+    memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
+    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+    while (ip<end) count[*ip++]++;
+
+    while (!count[maxSymbolValue]) maxSymbolValue--;
+    *maxSymbolValuePtr = maxSymbolValue;
+
+    { U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; }
+
+    return (size_t)max;
+}
+
+
+/* FSE_count_parallel_wksp() :
+ * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
+ * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
+static size_t FSE_count_parallel_wksp(
+                                unsigned* count, unsigned* maxSymbolValuePtr,
+                                const void* source, size_t sourceSize,
+                                unsigned checkMax, unsigned* const workSpace)
+{
+    const BYTE* ip = (const BYTE*)source;
+    const BYTE* const iend = ip+sourceSize;
+    unsigned maxSymbolValue = *maxSymbolValuePtr;
+    unsigned max=0;
+    U32* const Counting1 = workSpace;
+    U32* const Counting2 = Counting1 + 256;
+    U32* const Counting3 = Counting2 + 256;
+    U32* const Counting4 = Counting3 + 256;
+
+    memset(Counting1, 0, 4*256*sizeof(unsigned));
+
+    /* safety checks */
+    if (!sourceSize) {
+        memset(count, 0, maxSymbolValue + 1);
+        *maxSymbolValuePtr = 0;
+        return 0;
+    }
+    if (!maxSymbolValue) maxSymbolValue = 255;            /* 0 == default */
+
+    /* by stripes of 16 bytes */
+    {   U32 cached = MEM_read32(ip); ip += 4;
+        while (ip < iend-15) {
+            U32 c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+        }
+        ip-=4;
+    }
+
+    /* finish last symbols */
+    while (ip<iend) Counting1[*ip++]++;
+
+    if (checkMax) {   /* verify stats will fit into destination table */
+        U32 s; for (s=255; s>maxSymbolValue; s--) {
+            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+            if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
+    }   }
+
+    {   U32 s; for (s=0; s<=maxSymbolValue; s++) {
+            count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
+            if (count[s] > max) max = count[s];
+    }   }
+
+    while (!count[maxSymbolValue]) maxSymbolValue--;
+    *maxSymbolValuePtr = maxSymbolValue;
+    return (size_t)max;
+}
+
+/* FSE_countFast_wksp() :
+ * Same as FSE_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned */
+size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                     const void* source, size_t sourceSize, unsigned* workSpace)
+{
+    if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+    return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
+}
+
+/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
+size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+                     const void* source, size_t sourceSize)
+{
+    unsigned tmpCounters[1024];
+    return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
+}
+
+/* FSE_count_wksp() :
+ * Same as FSE_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned */
+size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* source, size_t sourceSize, unsigned* workSpace)
+{
+    if (*maxSymbolValuePtr < 255)
+        return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
+    *maxSymbolValuePtr = 255;
+    return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
+}
+
+size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* src, size_t srcSize)
+{
+    unsigned tmpCounters[1024];
+    return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
+}
+
+
+
+/*-**************************************************************
+*  FSE Compression Code
+****************************************************************/
+/*! FSE_sizeof_CTable() :
+    FSE_CTable is a variable size structure which contains :
+    `U16 tableLog;`
+    `U16 maxSymbolValue;`
+    `U16 nextStateNumber[1 << tableLog];`                         // This size is variable
+    `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
+Allocation is manual (C standard does not support variable-size structures).
+*/
+size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+}
+
+FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+    size_t size;
+    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+    size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+    return (FSE_CTable*)malloc(size);
+}
+
+void FSE_freeCTable (FSE_CTable* ct) { free(ct); }
+
+/* provides the minimum logSize to safely represent a distribution */
+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+{
+	U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
+	U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+	U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+	return minBits;
+}
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
+{
+	U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+    U32 tableLog = maxTableLog;
+	U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+	if (maxBitsSrc < tableLog) tableLog = maxBitsSrc;   /* Accuracy can be reduced */
+	if (minBits > tableLog) tableLog = minBits;   /* Need a minimum to safely represent all symbol values */
+    if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
+    if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
+    return tableLog;
+}
+
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
+}
+
+
+/* Secondary normalization method.
+   To be used when primary method fails. */
+
+static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
+{
+    U32 s;
+    U32 distributed = 0;
+    U32 ToDistribute;
+
+    /* Init */
+    U32 const lowThreshold = (U32)(total >> tableLog);
+    U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
+
+    for (s=0; s<=maxSymbolValue; s++) {
+        if (count[s] == 0) {
+            norm[s]=0;
+            continue;
+        }
+        if (count[s] <= lowThreshold) {
+            norm[s] = -1;
+            distributed++;
+            total -= count[s];
+            continue;
+        }
+        if (count[s] <= lowOne) {
+            norm[s] = 1;
+            distributed++;
+            total -= count[s];
+            continue;
+        }
+        norm[s]=-2;
+    }
+    ToDistribute = (1 << tableLog) - distributed;
+
+    if ((total / ToDistribute) > lowOne) {
+        /* risk of rounding to zero */
+        lowOne = (U32)((total * 3) / (ToDistribute * 2));
+        for (s=0; s<=maxSymbolValue; s++) {
+            if ((norm[s] == -2) && (count[s] <= lowOne)) {
+                norm[s] = 1;
+                distributed++;
+                total -= count[s];
+                continue;
+        }   }
+        ToDistribute = (1 << tableLog) - distributed;
+    }
+
+    if (distributed == maxSymbolValue+1) {
+        /* all values are pretty poor;
+           probably incompressible data (should have already been detected);
+           find max, then give all remaining points to max */
+        U32 maxV = 0, maxC = 0;
+        for (s=0; s<=maxSymbolValue; s++)
+            if (count[s] > maxC) maxV=s, maxC=count[s];
+        norm[maxV] += (short)ToDistribute;
+        return 0;
+    }
+
+    {   U64 const vStepLog = 62 - tableLog;
+        U64 const mid = (1ULL << (vStepLog-1)) - 1;
+        U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total;   /* scale on remaining */
+        U64 tmpTotal = mid;
+        for (s=0; s<=maxSymbolValue; s++) {
+            if (norm[s]==-2) {
+                U64 const end = tmpTotal + (count[s] * rStep);
+                U32 const sStart = (U32)(tmpTotal >> vStepLog);
+                U32 const sEnd = (U32)(end >> vStepLog);
+                U32 const weight = sEnd - sStart;
+                if (weight < 1)
+                    return ERROR(GENERIC);
+                norm[s] = (short)weight;
+                tmpTotal = end;
+    }   }   }
+
+    return 0;
+}
+
+
+size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
+                           const unsigned* count, size_t total,
+                           unsigned maxSymbolValue)
+{
+    /* Sanity checks */
+    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported size */
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported size */
+    if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC);   /* Too small tableLog, compression potentially impossible */
+
+    {   U32 const rtbTable[] = {     0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+        U64 const scale = 62 - tableLog;
+        U64 const step = ((U64)1<<62) / total;   /* <== here, one division ! */
+        U64 const vStep = 1ULL<<(scale-20);
+        int stillToDistribute = 1<<tableLog;
+        unsigned s;
+        unsigned largest=0;
+        short largestP=0;
+        U32 lowThreshold = (U32)(total >> tableLog);
+
+        for (s=0; s<=maxSymbolValue; s++) {
+            if (count[s] == total) return 0;   /* rle special case */
+            if (count[s] == 0) { normalizedCounter[s]=0; continue; }
+            if (count[s] <= lowThreshold) {
+                normalizedCounter[s] = -1;
+                stillToDistribute--;
+            } else {
+                short proba = (short)((count[s]*step) >> scale);
+                if (proba<8) {
+                    U64 restToBeat = vStep * rtbTable[proba];
+                    proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
+                }
+                if (proba > largestP) largestP=proba, largest=s;
+                normalizedCounter[s] = proba;
+                stillToDistribute -= proba;
+        }   }
+        if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
+            /* corner case, need another normalization method */
+            size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
+            if (FSE_isError(errorCode)) return errorCode;
+        }
+        else normalizedCounter[largest] += (short)stillToDistribute;
+    }
+
+#if 0
+    {   /* Print Table (debug) */
+        U32 s;
+        U32 nTotal = 0;
+        for (s=0; s<=maxSymbolValue; s++)
+            printf("%3i: %4i \n", s, normalizedCounter[s]);
+        for (s=0; s<=maxSymbolValue; s++)
+            nTotal += abs(normalizedCounter[s]);
+        if (nTotal != (1U<<tableLog))
+            printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+        getchar();
+    }
+#endif
+
+    return tableLog;
+}
+
+
+/* fake FSE_CTable, for raw (uncompressed) input */
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
+{
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSymbolValue = tableMask;
+    void* const ptr = ct;
+    U16* const tableU16 = ( (U16*) ptr) + 2;
+    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1);   /* assumption : tableLog >= 1 */
+    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);             /* min size */
+
+    /* header */
+    tableU16[-2] = (U16) nbBits;
+    tableU16[-1] = (U16) maxSymbolValue;
+
+    /* Build table */
+    for (s=0; s<tableSize; s++)
+        tableU16[s] = (U16)(tableSize + s);
+
+    /* Build Symbol Transformation Table */
+    {   const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+        for (s=0; s<=maxSymbolValue; s++) {
+            symbolTT[s].deltaNbBits = deltaNbBits;
+            symbolTT[s].deltaFindState = s-1;
+    }   }
+
+    return 0;
+}
+
+/* fake FSE_CTable, for rle input (always same symbol) */
+size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
+{
+    void* ptr = ct;
+    U16* tableU16 = ( (U16*) ptr) + 2;
+    void* FSCTptr = (U32*)ptr + 2;
+    FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
+
+    /* header */
+    tableU16[-2] = (U16) 0;
+    tableU16[-1] = (U16) symbolValue;
+
+    /* Build table */
+    tableU16[0] = 0;
+    tableU16[1] = 0;   /* just in case */
+
+    /* Build Symbol Transformation Table */
+    symbolTT[symbolValue].deltaNbBits = 0;
+    symbolTT[symbolValue].deltaFindState = 0;
+
+    return 0;
+}
+
+
+static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
+                           const void* src, size_t srcSize,
+                           const FSE_CTable* ct, const unsigned fast)
+{
+    const BYTE* const istart = (const BYTE*) src;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* ip=iend;
+
+    BIT_CStream_t bitC;
+    FSE_CState_t CState1, CState2;
+
+    /* init */
+    if (srcSize <= 2) return 0;
+    { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
+      if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
+
+#define FSE_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+    if (srcSize & 1) {
+        FSE_initCState2(&CState1, ct, *--ip);
+        FSE_initCState2(&CState2, ct, *--ip);
+        FSE_encodeSymbol(&bitC, &CState1, *--ip);
+        FSE_FLUSHBITS(&bitC);
+    } else {
+        FSE_initCState2(&CState2, ct, *--ip);
+        FSE_initCState2(&CState1, ct, *--ip);
+    }
+
+    /* join to mod 4 */
+    srcSize -= 2;
+    if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) {  /* test bit 2 */
+        FSE_encodeSymbol(&bitC, &CState2, *--ip);
+        FSE_encodeSymbol(&bitC, &CState1, *--ip);
+        FSE_FLUSHBITS(&bitC);
+    }
+
+    /* 2 or 4 encoding per loop */
+    while ( ip>istart ) {
+
+        FSE_encodeSymbol(&bitC, &CState2, *--ip);
+
+        if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 )   /* this test must be static */
+            FSE_FLUSHBITS(&bitC);
+
+        FSE_encodeSymbol(&bitC, &CState1, *--ip);
+
+        if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) {  /* this test must be static */
+            FSE_encodeSymbol(&bitC, &CState2, *--ip);
+            FSE_encodeSymbol(&bitC, &CState1, *--ip);
+        }
+
+        FSE_FLUSHBITS(&bitC);
+    }
+
+    FSE_flushCState(&bitC, &CState2);
+    FSE_flushCState(&bitC, &CState1);
+    return BIT_closeCStream(&bitC);
+}
+
+size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
+                           const void* src, size_t srcSize,
+                           const FSE_CTable* ct)
+{
+    unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
+
+    if (fast)
+        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
+    else
+        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
+}
+
+
+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
+
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
+#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` size must be `(1<<tableLog)`.
+ */
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + dstSize;
+
+    U32   count[FSE_MAX_SYMBOL_VALUE+1];
+    S16   norm[FSE_MAX_SYMBOL_VALUE+1];
+    FSE_CTable* CTable = (FSE_CTable*)workSpace;
+    size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
+    void* scratchBuffer = (void*)(CTable + CTableSize);
+    size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
+
+    /* init conditions */
+    if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
+    if (srcSize <= 1) return 0;  /* Not compressible */
+    if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+    if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
+
+    /* Scan input and build symbol stats */
+    {   CHECK_V_F(maxCount, FSE_count(count, &maxSymbolValue, src, srcSize) );
+        if (maxCount == srcSize) return 1;   /* only a single symbol in src : rle */
+        if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
+        if (maxCount < (srcSize >> 7)) return 0;   /* Heuristic : not compressible enough */
+    }
+
+    tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
+    CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
+
+    /* Write table description header */
+    {   CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
+        op += nc_err;
+    }
+
+    /* Compress */
+    CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
+    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
+        if (cSize == 0) return 0;   /* not enough space for compressed data */
+        op += cSize;
+    }
+
+    /* check compressibility */
+    if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
+
+    return op-ostart;
+}
+
+typedef struct {
+    FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
+    BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
+} fseWkspMax_t;
+
+size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
+{
+    fseWkspMax_t scratchBuffer;
+    FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE));   /* compilation failures here means scratchBuffer is not large enough */
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
+}
+
+size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
+}
+
+
+#endif   /* FSE_COMMONDEFS_ONLY */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/huf_compress.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,609 @@
+/* ******************************************************************
+   Huffman encoder, part of New Generation Entropy library
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+#include "bitstream.h"
+#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
+#include "fse.h"        /* header compression */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
+#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
+
+
+/* **************************************************************
+*  Utils
+****************************************************************/
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+}
+
+
+/* *******************************************************
+*  HUF : Huffman block compression
+*********************************************************/
+/* HUF_compressWeights() :
+ * Same as FSE_compress(), but dedicated to huff0's weights compression.
+ * The use case needs much less stack memory.
+ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
+ */
+#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
+size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + dstSize;
+
+    U32 maxSymbolValue = HUF_TABLELOG_MAX;
+    U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
+
+    FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
+    BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
+
+    U32 count[HUF_TABLELOG_MAX+1];
+    S16 norm[HUF_TABLELOG_MAX+1];
+
+    /* init conditions */
+    if (wtSize <= 1) return 0;  /* Not compressible */
+
+    /* Scan input and build symbol stats */
+    {   CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
+        if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
+        if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
+    }
+
+    tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
+    CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
+
+    /* Write table description header */
+    {   CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
+        op += hSize;
+    }
+
+    /* Compress */
+    CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
+    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
+        if (cSize == 0) return 0;   /* not enough space for compressed data */
+        op += cSize;
+    }
+
+    return op-ostart;
+}
+
+
+struct HUF_CElt_s {
+  U16  val;
+  BYTE nbBits;
+};   /* typedef'd to HUF_CElt within "huf.h" */
+
+/*! HUF_writeCTable() :
+    `CTable` : huffman tree to save, using huf representation.
+    @return : size of saved CTable */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize,
+                        const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
+{
+    BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */
+    BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
+    BYTE* op = (BYTE*)dst;
+    U32 n;
+
+     /* check conditions */
+    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+
+    /* convert to weight */
+    bitsToWeight[0] = 0;
+    for (n=1; n<huffLog+1; n++)
+        bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
+    for (n=0; n<maxSymbolValue; n++)
+        huffWeight[n] = bitsToWeight[CTable[n].nbBits];
+
+    /* attempt weights compression by FSE */
+    {   CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
+        if ((hSize>1) & (hSize < maxSymbolValue/2)) {   /* FSE compressed */
+            op[0] = (BYTE)hSize;
+            return hSize+1;
+    }   }
+
+    /* write raw values as 4-bits (max : 15) */
+    if (maxSymbolValue > (256-128)) return ERROR(GENERIC);   /* should not happen : likely means source cannot be compressed */
+    if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall);   /* not enough space within dst buffer */
+    op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
+    huffWeight[maxSymbolValue] = 0;   /* to be sure it doesn't cause msan issue in final combination */
+    for (n=0; n<maxSymbolValue; n+=2)
+        op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
+    return ((maxSymbolValue+1)/2) + 1;
+}
+
+
+size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */
+    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    U32 nbSymbols = 0;
+
+    /* get symbol weights */
+    CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
+
+    /* check result */
+    if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+    if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall);
+
+    /* Prepare base value per rank */
+    {   U32 n, nextRankStart = 0;
+        for (n=1; n<=tableLog; n++) {
+            U32 current = nextRankStart;
+            nextRankStart += (rankVal[n] << (n-1));
+            rankVal[n] = current;
+    }   }
+
+    /* fill nbBits */
+    {   U32 n; for (n=0; n<nbSymbols; n++) {
+            const U32 w = huffWeight[n];
+            CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
+    }   }
+
+    /* fill val */
+    {   U16 nbPerRank[HUF_TABLELOG_MAX+2]  = {0};  /* support w=0=>n=tableLog+1 */
+        U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
+        { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
+        /* determine stating value per rank */
+        valPerRank[tableLog+1] = 0;   /* for w==0 */
+        {   U16 min = 0;
+            U32 n; for (n=tableLog; n>0; n--) {  /* start at n=tablelog <-> w=1 */
+                valPerRank[n] = min;     /* get starting value within each rank */
+                min += nbPerRank[n];
+                min >>= 1;
+        }   }
+        /* assign value within rank, symbol order */
+        { U32 n; for (n=0; n<=maxSymbolValue; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
+    }
+
+    return readSize;
+}
+
+
+typedef struct nodeElt_s {
+    U32 count;
+    U16 parent;
+    BYTE byte;
+    BYTE nbBits;
+} nodeElt;
+
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+{
+    const U32 largestBits = huffNode[lastNonNull].nbBits;
+    if (largestBits <= maxNbBits) return largestBits;   /* early exit : no elt > maxNbBits */
+
+    /* there are several too large elements (at least >= 2) */
+    {   int totalCost = 0;
+        const U32 baseCost = 1 << (largestBits - maxNbBits);
+        U32 n = lastNonNull;
+
+        while (huffNode[n].nbBits > maxNbBits) {
+            totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
+            huffNode[n].nbBits = (BYTE)maxNbBits;
+            n --;
+        }  /* n stops at huffNode[n].nbBits <= maxNbBits */
+        while (huffNode[n].nbBits == maxNbBits) n--;   /* n end at index of smallest symbol using < maxNbBits */
+
+        /* renorm totalCost */
+        totalCost >>= (largestBits - maxNbBits);  /* note : totalCost is necessarily a multiple of baseCost */
+
+        /* repay normalized cost */
+        {   U32 const noSymbol = 0xF0F0F0F0;
+            U32 rankLast[HUF_TABLELOG_MAX+2];
+            int pos;
+
+            /* Get pos of last (smallest) symbol per rank */
+            memset(rankLast, 0xF0, sizeof(rankLast));
+            {   U32 currentNbBits = maxNbBits;
+                for (pos=n ; pos >= 0; pos--) {
+                    if (huffNode[pos].nbBits >= currentNbBits) continue;
+                    currentNbBits = huffNode[pos].nbBits;   /* < maxNbBits */
+                    rankLast[maxNbBits-currentNbBits] = pos;
+            }   }
+
+            while (totalCost > 0) {
+                U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
+                for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
+                    U32 highPos = rankLast[nBitsToDecrease];
+                    U32 lowPos = rankLast[nBitsToDecrease-1];
+                    if (highPos == noSymbol) continue;
+                    if (lowPos == noSymbol) break;
+                    {   U32 const highTotal = huffNode[highPos].count;
+                        U32 const lowTotal = 2 * huffNode[lowPos].count;
+                        if (highTotal <= lowTotal) break;
+                }   }
+                /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+                while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))  /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
+                    nBitsToDecrease ++;
+                totalCost -= 1 << (nBitsToDecrease-1);
+                if (rankLast[nBitsToDecrease-1] == noSymbol)
+                    rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];   /* this rank is no longer empty */
+                huffNode[rankLast[nBitsToDecrease]].nbBits ++;
+                if (rankLast[nBitsToDecrease] == 0)    /* special case, reached largest symbol */
+                    rankLast[nBitsToDecrease] = noSymbol;
+                else {
+                    rankLast[nBitsToDecrease]--;
+                    if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+                        rankLast[nBitsToDecrease] = noSymbol;   /* this rank is now empty */
+            }   }   /* while (totalCost > 0) */
+
+            while (totalCost < 0) {  /* Sometimes, cost correction overshoot */
+                if (rankLast[1] == noSymbol) {  /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
+                    while (huffNode[n].nbBits == maxNbBits) n--;
+                    huffNode[n+1].nbBits--;
+                    rankLast[1] = n+1;
+                    totalCost++;
+                    continue;
+                }
+                huffNode[ rankLast[1] + 1 ].nbBits--;
+                rankLast[1]++;
+                totalCost ++;
+    }   }   }   /* there are several too large elements (at least >= 2) */
+
+    return maxNbBits;
+}
+
+
+typedef struct {
+    U32 base;
+    U32 current;
+} rankPos;
+
+static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
+{
+    rankPos rank[32];
+    U32 n;
+
+    memset(rank, 0, sizeof(rank));
+    for (n=0; n<=maxSymbolValue; n++) {
+        U32 r = BIT_highbit32(count[n] + 1);
+        rank[r].base ++;
+    }
+    for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
+    for (n=0; n<32; n++) rank[n].current = rank[n].base;
+    for (n=0; n<=maxSymbolValue; n++) {
+        U32 const c = count[n];
+        U32 const r = BIT_highbit32(c+1) + 1;
+        U32 pos = rank[r].current++;
+        while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--;
+        huffNode[pos].count = c;
+        huffNode[pos].byte  = (BYTE)n;
+    }
+}
+
+
+/** HUF_buildCTable_wksp() :
+ *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
+ */
+#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
+typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1];
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+{
+    nodeElt* const huffNode0 = (nodeElt*)workSpace;
+    nodeElt* const huffNode = huffNode0+1;
+    U32 n, nonNullRank;
+    int lowS, lowN;
+    U16 nodeNb = STARTNODE;
+    U32 nodeRoot;
+
+    /* safety checks */
+    if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC);   /* workSpace is not large enough */
+    if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
+    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC);
+    memset(huffNode0, 0, sizeof(huffNodeTable));
+
+    /* sort, decreasing order */
+    HUF_sort(huffNode, count, maxSymbolValue);
+
+    /* init for parents */
+    nonNullRank = maxSymbolValue;
+    while(huffNode[nonNullRank].count == 0) nonNullRank--;
+    lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
+    huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
+    huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
+    nodeNb++; lowS-=2;
+    for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
+    huffNode0[0].count = (U32)(1U<<31);  /* fake entry, strong barrier */
+
+    /* create parents */
+    while (nodeNb <= nodeRoot) {
+        U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+        U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+        huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
+        huffNode[n1].parent = huffNode[n2].parent = nodeNb;
+        nodeNb++;
+    }
+
+    /* distribute weights (unlimited tree height) */
+    huffNode[nodeRoot].nbBits = 0;
+    for (n=nodeRoot-1; n>=STARTNODE; n--)
+        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+    for (n=0; n<=nonNullRank; n++)
+        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+
+    /* enforce maxTableLog */
+    maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
+
+    /* fill result into tree (val, nbBits) */
+    {   U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
+        U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
+        if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC);   /* check fit into table */
+        for (n=0; n<=nonNullRank; n++)
+            nbPerRank[huffNode[n].nbBits]++;
+        /* determine stating value per rank */
+        {   U16 min = 0;
+            for (n=maxNbBits; n>0; n--) {
+                valPerRank[n] = min;      /* get starting value within each rank */
+                min += nbPerRank[n];
+                min >>= 1;
+        }   }
+        for (n=0; n<=maxSymbolValue; n++)
+            tree[huffNode[n].byte].nbBits = huffNode[n].nbBits;   /* push nbBits per symbol, symbol order */
+        for (n=0; n<=maxSymbolValue; n++)
+            tree[n].val = valPerRank[tree[n].nbBits]++;   /* assign value within rank, symbol order */
+    }
+
+    return maxNbBits;
+}
+
+/** HUF_buildCTable() :
+ *  Note : count is used before tree is written, so they can safely overlap
+ */
+size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
+{
+    huffNodeTable nodeTable;
+    return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
+}
+
+static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
+{
+    BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
+}
+
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
+#define HUF_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+#define HUF_FLUSHBITS_1(stream) \
+    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
+
+#define HUF_FLUSHBITS_2(stream) \
+    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
+
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+    const BYTE* ip = (const BYTE*) src;
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* const oend = ostart + dstSize;
+    BYTE* op = ostart;
+    size_t n;
+    const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
+    BIT_CStream_t bitC;
+
+    /* init */
+    if (dstSize < 8) return 0;   /* not enough space to compress */
+    { size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
+      if (HUF_isError(initErr)) return 0; }
+
+    n = srcSize & ~3;  /* join to mod 4 */
+    switch (srcSize & 3)
+    {
+        case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
+                 HUF_FLUSHBITS_2(&bitC);
+        case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
+                 HUF_FLUSHBITS_1(&bitC);
+        case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
+                 HUF_FLUSHBITS(&bitC);
+        case 0 :
+        default: ;
+    }
+
+    for (; n>0; n-=4) {  /* note : n&3==0 at this stage */
+        HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
+        HUF_FLUSHBITS_1(&bitC);
+        HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
+        HUF_FLUSHBITS_2(&bitC);
+        HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
+        HUF_FLUSHBITS_1(&bitC);
+        HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
+        HUF_FLUSHBITS(&bitC);
+    }
+
+    return BIT_closeCStream(&bitC);
+}
+
+
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+    size_t const segmentSize = (srcSize+3)/4;   /* first 3 segments */
+    const BYTE* ip = (const BYTE*) src;
+    const BYTE* const iend = ip + srcSize;
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* const oend = ostart + dstSize;
+    BYTE* op = ostart;
+
+    if (dstSize < 6 + 1 + 1 + 1 + 8) return 0;   /* minimum space to compress successfully */
+    if (srcSize < 12) return 0;   /* no saving possible : too small input */
+    op += 6;   /* jumpTable */
+
+    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
+        if (cSize==0) return 0;
+        MEM_writeLE16(ostart, (U16)cSize);
+        op += cSize;
+    }
+
+    ip += segmentSize;
+    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
+        if (cSize==0) return 0;
+        MEM_writeLE16(ostart+2, (U16)cSize);
+        op += cSize;
+    }
+
+    ip += segmentSize;
+    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
+        if (cSize==0) return 0;
+        MEM_writeLE16(ostart+4, (U16)cSize);
+        op += cSize;
+    }
+
+    ip += segmentSize;
+    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) );
+        if (cSize==0) return 0;
+        op += cSize;
+    }
+
+    return op-ostart;
+}
+
+
+/* `workSpace` must a table of at least 1024 unsigned */
+static size_t HUF_compress_internal (
+                void* dst, size_t dstSize,
+                const void* src, size_t srcSize,
+                unsigned maxSymbolValue, unsigned huffLog,
+                unsigned singleStream,
+                void* workSpace, size_t wkspSize)
+{
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* const oend = ostart + dstSize;
+    BYTE* op = ostart;
+
+    union {
+        U32 count[HUF_SYMBOLVALUE_MAX+1];
+        HUF_CElt CTable[HUF_SYMBOLVALUE_MAX+1];
+    } table;   /* `count` can overlap with `CTable`; saves 1 KB */
+
+    /* checks & inits */
+    if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC);
+    if (!srcSize) return 0;  /* Uncompressed (note : 1 means rle, so first byte must be correct) */
+    if (!dstSize) return 0;  /* cannot fit within dst budget */
+    if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */
+    if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+    if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+    if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
+
+    /* Scan input and build symbol stats */
+    {   CHECK_V_F(largest, FSE_count_wksp (table.count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
+        if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
+        if (largest <= (srcSize >> 7)+1) return 0;   /* Fast heuristic : not compressible enough */
+    }
+
+    /* Build Huffman Tree */
+    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+    {   CHECK_V_F(maxBits, HUF_buildCTable_wksp (table.CTable, table.count, maxSymbolValue, huffLog, workSpace, wkspSize) );
+        huffLog = (U32)maxBits;
+    }
+
+    /* Write table description header */
+    {   CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table.CTable, maxSymbolValue, huffLog) );
+        if (hSize + 12 >= srcSize) return 0;   /* not useful to try compression */
+        op += hSize;
+    }
+
+    /* Compress */
+    {   size_t const cSize = (singleStream) ?
+                            HUF_compress1X_usingCTable(op, oend - op, src, srcSize, table.CTable) :   /* single segment */
+                            HUF_compress4X_usingCTable(op, oend - op, src, srcSize, table.CTable);
+        if (HUF_isError(cSize)) return cSize;
+        if (cSize==0) return 0;   /* uncompressible */
+        op += cSize;
+    }
+
+    /* check compressibility */
+    if ((size_t)(op-ostart) >= srcSize-1)
+        return 0;
+
+    return op-ostart;
+}
+
+
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
+                      const void* src, size_t srcSize,
+                      unsigned maxSymbolValue, unsigned huffLog,
+                      void* workSpace, size_t wkspSize)
+{
+    return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize);
+}
+
+size_t HUF_compress1X (void* dst, size_t dstSize,
+                 const void* src, size_t srcSize,
+                 unsigned maxSymbolValue, unsigned huffLog)
+{
+    unsigned workSpace[1024];
+    return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
+}
+
+size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
+                      const void* src, size_t srcSize,
+                      unsigned maxSymbolValue, unsigned huffLog,
+                      void* workSpace, size_t wkspSize)
+{
+    return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize);
+}
+
+size_t HUF_compress2 (void* dst, size_t dstSize,
+                const void* src, size_t srcSize,
+                unsigned maxSymbolValue, unsigned huffLog)
+{
+    unsigned workSpace[1024];
+    return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
+}
+
+size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    return HUF_compress2(dst, maxDstSize, src, (U32)srcSize, 255, HUF_TABLELOG_DEFAULT);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,3291 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <string.h>         /* memset */
+#include "mem.h"
+#define XXH_STATIC_LINKING_ONLY   /* XXH64_state_t */
+#include "xxhash.h"               /* XXH_reset, update, digest */
+#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_internal.h"  /* includes zstd.h */
+
+
+/*-*************************************
+*  Constants
+***************************************/
+static const U32 g_searchStrength = 8;   /* control skip over incompressible data */
+#define HASH_READ_SIZE 8
+typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+
+
+/*-*************************************
+*  Helper functions
+***************************************/
+#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
+size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
+
+
+/*-*************************************
+*  Sequence storage
+***************************************/
+static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+{
+    ssPtr->lit = ssPtr->litStart;
+    ssPtr->sequences = ssPtr->sequencesStart;
+    ssPtr->longLengthID = 0;
+}
+
+
+/*-*************************************
+*  Context memory management
+***************************************/
+struct ZSTD_CCtx_s
+{
+    const BYTE* nextSrc;    /* next block here to continue on current prefix */
+    const BYTE* base;       /* All regular indexes relative to this position */
+    const BYTE* dictBase;   /* extDict indexes relative to this position */
+    U32   dictLimit;        /* below that point, need extDict */
+    U32   lowLimit;         /* below that point, no more data */
+    U32   nextToUpdate;     /* index from which to continue dictionary update */
+    U32   nextToUpdate3;    /* index from which to continue dictionary update */
+    U32   hashLog3;         /* dispatch table : larger == faster, more memory */
+    U32   loadedDictEnd;
+    ZSTD_compressionStage_e stage;
+    U32   rep[ZSTD_REP_NUM];
+    U32   savedRep[ZSTD_REP_NUM];
+    U32   dictID;
+    ZSTD_parameters params;
+    void* workSpace;
+    size_t workSpaceSize;
+    size_t blockSize;
+    U64 frameContentSize;
+    XXH64_state_t xxhState;
+    ZSTD_customMem customMem;
+
+    seqStore_t seqStore;    /* sequences storage ptrs */
+    U32* hashTable;
+    U32* hashTable3;
+    U32* chainTable;
+    HUF_CElt* hufTable;
+    U32 flagStaticTables;
+    FSE_CTable offcodeCTable  [FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
+    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
+    FSE_CTable litlengthCTable  [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
+    unsigned tmpCounters[1024];
+};
+
+ZSTD_CCtx* ZSTD_createCCtx(void)
+{
+    return ZSTD_createCCtx_advanced(defaultCustomMem);
+}
+
+ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
+{
+    ZSTD_CCtx* cctx;
+
+    if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
+    if (!customMem.customAlloc || !customMem.customFree) return NULL;
+
+    cctx = (ZSTD_CCtx*) ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
+    if (!cctx) return NULL;
+    memset(cctx, 0, sizeof(ZSTD_CCtx));
+    memcpy(&(cctx->customMem), &customMem, sizeof(customMem));
+    return cctx;
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return 0;   /* support free on NULL */
+    ZSTD_free(cctx->workSpace, cctx->customMem);
+    ZSTD_free(cctx, cctx->customMem);
+    return 0;   /* reserved as a potential error code in the future */
+}
+
+size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return 0;   /* support sizeof on NULL */
+    return sizeof(*cctx) + cctx->workSpaceSize;
+}
+
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx)   /* hidden interface */
+{
+    return &(ctx->seqStore);
+}
+
+static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx* cctx)
+{
+    return cctx->params;
+}
+
+
+/** ZSTD_checkParams() :
+    ensure param values remain within authorized range.
+    @return : 0, or an error code if one value is beyond authorized range */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+{
+#   define CLAMPCHECK(val,min,max) { if ((val<min) | (val>max)) return ERROR(compressionParameter_unsupported); }
+    CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
+    CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
+    CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+    CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
+    { U32 const searchLengthMin = ((cParams.strategy == ZSTD_fast) | (cParams.strategy == ZSTD_greedy)) ? ZSTD_SEARCHLENGTH_MIN+1 : ZSTD_SEARCHLENGTH_MIN;
+      U32 const searchLengthMax = (cParams.strategy == ZSTD_fast) ? ZSTD_SEARCHLENGTH_MAX : ZSTD_SEARCHLENGTH_MAX-1;
+      CLAMPCHECK(cParams.searchLength, searchLengthMin, searchLengthMax); }
+    CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
+    if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2) return ERROR(compressionParameter_unsupported);
+    return 0;
+}
+
+
+/** ZSTD_cycleLog() :
+ *  condition for correct operation : hashLog > 1 */
+static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
+{
+    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
+    return hashLog - btScale;
+}
+
+/** ZSTD_adjustCParams() :
+    optimize `cPar` for a given input (`srcSize` and `dictSize`).
+    mostly downsizing to reduce memory consumption and initialization.
+    Both `srcSize` and `dictSize` are optional (use 0 if unknown),
+    but if both are 0, no optimization can be done.
+    Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
+ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+{
+    if (srcSize+dictSize == 0) return cPar;   /* no size information available : no adjustment */
+
+    /* resize params, to use less memory when necessary */
+    {   U32 const minSrcSize = (srcSize==0) ? 500 : 0;
+        U64 const rSize = srcSize + dictSize + minSrcSize;
+        if (rSize < ((U64)1<<ZSTD_WINDOWLOG_MAX)) {
+            U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
+            if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+    }   }
+    if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
+    {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+        if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog);
+    }
+
+    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
+
+    return cPar;
+}
+
+
+size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams)
+{
+    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
+    U32    const divider = (cParams.searchLength==3) ? 3 : 4;
+    size_t const maxNbSeq = blockSize / divider;
+    size_t const tokenSpace = blockSize + 11*maxNbSeq;
+
+    size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
+    size_t const hSize = ((size_t)1) << cParams.hashLog;
+    U32    const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
+    size_t const h3Size = ((size_t)1) << hashLog3;
+    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+
+    size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
+                          + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
+    size_t const neededSpace = tableSpace + (256*sizeof(U32)) /* huffTable */ + tokenSpace
+                             + (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
+
+    return sizeof(ZSTD_CCtx) + neededSpace;
+}
+
+
+static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
+{
+    return (param1.cParams.hashLog  == param2.cParams.hashLog)
+         & (param1.cParams.chainLog == param2.cParams.chainLog)
+         & (param1.cParams.strategy == param2.cParams.strategy)
+         & ((param1.cParams.searchLength==3) == (param2.cParams.searchLength==3));
+}
+
+/*! ZSTD_continueCCtx() :
+    reuse CCtx without reset (note : requires no dictionary) */
+static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 frameContentSize)
+{
+    U32 const end = (U32)(cctx->nextSrc - cctx->base);
+    cctx->params = params;
+    cctx->frameContentSize = frameContentSize;
+    cctx->lowLimit = end;
+    cctx->dictLimit = end;
+    cctx->nextToUpdate = end+1;
+    cctx->stage = ZSTDcs_init;
+    cctx->dictID = 0;
+    cctx->loadedDictEnd = 0;
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = repStartValue[i]; }
+    cctx->seqStore.litLengthSum = 0;  /* force reset of btopt stats */
+    XXH64_reset(&cctx->xxhState, 0);
+    return 0;
+}
+
+typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
+
+/*! ZSTD_resetCCtx_advanced() :
+    note : 'params' must be validated */
+static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
+                                       ZSTD_parameters params, U64 frameContentSize,
+                                       ZSTD_compResetPolicy_e const crp)
+{
+    if (crp == ZSTDcrp_continue)
+        if (ZSTD_equivalentParams(params, zc->params))
+            return ZSTD_continueCCtx(zc, params, frameContentSize);
+
+    {   size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
+        U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
+        size_t const maxNbSeq = blockSize / divider;
+        size_t const tokenSpace = blockSize + 11*maxNbSeq;
+        size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
+        size_t const hSize = ((size_t)1) << params.cParams.hashLog;
+        U32    const hashLog3 = (params.cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
+        size_t const h3Size = ((size_t)1) << hashLog3;
+        size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+        void* ptr;
+
+        /* Check if workSpace is large enough, alloc a new one if needed */
+        {   size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
+                                  + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
+            size_t const neededSpace = tableSpace + (256*sizeof(U32)) /* huffTable */ + tokenSpace
+                                  + (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
+            if (zc->workSpaceSize < neededSpace) {
+                ZSTD_free(zc->workSpace, zc->customMem);
+                zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
+                if (zc->workSpace == NULL) return ERROR(memory_allocation);
+                zc->workSpaceSize = neededSpace;
+        }   }
+
+        if (crp!=ZSTDcrp_noMemset) memset(zc->workSpace, 0, tableSpace);   /* reset tables only */
+        XXH64_reset(&zc->xxhState, 0);
+        zc->hashLog3 = hashLog3;
+        zc->hashTable = (U32*)(zc->workSpace);
+        zc->chainTable = zc->hashTable + hSize;
+        zc->hashTable3 = zc->chainTable + chainSize;
+        ptr = zc->hashTable3 + h3Size;
+        zc->hufTable = (HUF_CElt*)ptr;
+        zc->flagStaticTables = 0;
+        ptr = ((U32*)ptr) + 256;  /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
+
+        zc->nextToUpdate = 1;
+        zc->nextSrc = NULL;
+        zc->base = NULL;
+        zc->dictBase = NULL;
+        zc->dictLimit = 0;
+        zc->lowLimit = 0;
+        zc->params = params;
+        zc->blockSize = blockSize;
+        zc->frameContentSize = frameContentSize;
+        { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = repStartValue[i]; }
+
+        if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
+            zc->seqStore.litFreq = (U32*)ptr;
+            zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<<Litbits);
+            zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1);
+            zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1);
+            ptr = zc->seqStore.offCodeFreq + (MaxOff+1);
+            zc->seqStore.matchTable = (ZSTD_match_t*)ptr;
+            ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1;
+            zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr;
+            ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1;
+            zc->seqStore.litLengthSum = 0;
+        }
+        zc->seqStore.sequencesStart = (seqDef*)ptr;
+        ptr = zc->seqStore.sequencesStart + maxNbSeq;
+        zc->seqStore.llCode = (BYTE*) ptr;
+        zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
+        zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
+        zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
+
+        zc->stage = ZSTDcs_init;
+        zc->dictID = 0;
+        zc->loadedDictEnd = 0;
+
+        return 0;
+    }
+}
+
+
+/*! ZSTD_copyCCtx() :
+*   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+*   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+*   @return : 0, or an error code */
+size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
+{
+    if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
+
+    memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
+    ZSTD_resetCCtx_advanced(dstCCtx, srcCCtx->params, pledgedSrcSize, ZSTDcrp_noMemset);
+
+    /* copy tables */
+    {   size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
+        size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
+        size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
+        size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+        memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
+    }
+
+    /* copy dictionary offsets */
+    dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
+    dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3;
+    dstCCtx->nextSrc      = srcCCtx->nextSrc;
+    dstCCtx->base         = srcCCtx->base;
+    dstCCtx->dictBase     = srcCCtx->dictBase;
+    dstCCtx->dictLimit    = srcCCtx->dictLimit;
+    dstCCtx->lowLimit     = srcCCtx->lowLimit;
+    dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd;
+    dstCCtx->dictID       = srcCCtx->dictID;
+
+    /* copy entropy tables */
+    dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
+    if (srcCCtx->flagStaticTables) {
+        memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256*4);
+        memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
+        memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
+        memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
+    }
+
+    return 0;
+}
+
+
+/*! ZSTD_reduceTable() :
+*   reduce table indexes by `reducerValue` */
+static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue)
+{
+    U32 u;
+    for (u=0 ; u < size ; u++) {
+        if (table[u] < reducerValue) table[u] = 0;
+        else table[u] -= reducerValue;
+    }
+}
+
+/*! ZSTD_reduceIndex() :
+*   rescale all indexes to avoid future overflow (indexes are U32) */
+static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
+{
+    { U32 const hSize = 1 << zc->params.cParams.hashLog;
+      ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); }
+
+    { U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
+      ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); }
+
+    { U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
+      ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); }
+}
+
+
+/*-*******************************************************
+*  Block entropic compression
+*********************************************************/
+
+/* See doc/zstd_compression_format.md for detailed format description */
+
+size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
+    MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
+    return ZSTD_blockHeaderSize+srcSize;
+}
+
+
+static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    BYTE* const ostart = (BYTE* const)dst;
+    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+    if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
+
+    switch(flSize)
+    {
+        case 1: /* 2 - 1 - 5 */
+            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
+            break;
+        case 2: /* 2 - 2 - 12 */
+            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
+            break;
+        default:   /*note : should not be necessary : flSize is within {1,2,3} */
+        case 3: /* 2 - 2 - 20 */
+            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
+            break;
+    }
+
+    memcpy(ostart + flSize, src, srcSize);
+    return srcSize + flSize;
+}
+
+static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    BYTE* const ostart = (BYTE* const)dst;
+    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
+
+    switch(flSize)
+    {
+        case 1: /* 2 - 1 - 5 */
+            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
+            break;
+        case 2: /* 2 - 2 - 12 */
+            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
+            break;
+        default:   /*note : should not be necessary : flSize is necessarily within {1,2,3} */
+        case 3: /* 2 - 2 - 20 */
+            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
+            break;
+    }
+
+    ostart[flSize] = *(const BYTE*)src;
+    return flSize+1;
+}
+
+
+static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
+
+static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
+                                     void* dst, size_t dstCapacity,
+                               const void* src, size_t srcSize)
+{
+    size_t const minGain = ZSTD_minGain(srcSize);
+    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
+    BYTE*  const ostart = (BYTE*)dst;
+    U32 singleStream = srcSize < 256;
+    symbolEncodingType_e hType = set_compressed;
+    size_t cLitSize;
+
+
+    /* small ? don't even attempt compression (speed opt) */
+#   define LITERAL_NOENTROPY 63
+    {   size_t const minLitSize = zc->flagStaticTables ? 6 : LITERAL_NOENTROPY;
+        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+    }
+
+    if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
+    if (zc->flagStaticTables && (lhSize==3)) {
+        hType = set_repeat;
+        singleStream = 1;
+        cLitSize = HUF_compress1X_usingCTable(ostart+lhSize, dstCapacity-lhSize, src, srcSize, zc->hufTable);
+    } else {
+        cLitSize = singleStream ? HUF_compress1X_wksp(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters))
+                                : HUF_compress4X_wksp(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters));
+    }
+
+    if ((cLitSize==0) | (cLitSize >= srcSize - minGain))
+        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+    if (cLitSize==1)
+        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+
+    /* Build header */
+    switch(lhSize)
+    {
+    case 3: /* 2 - 2 - 10 - 10 */
+        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+            MEM_writeLE24(ostart, lhc);
+            break;
+        }
+    case 4: /* 2 - 2 - 14 - 14 */
+        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
+            MEM_writeLE32(ostart, lhc);
+            break;
+        }
+    default:   /* should not be necessary, lhSize is only {3,4,5} */
+    case 5: /* 2 - 2 - 18 - 18 */
+        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
+            MEM_writeLE32(ostart, lhc);
+            ostart[4] = (BYTE)(cLitSize >> 10);
+            break;
+        }
+    }
+    return lhSize+cLitSize;
+}
+
+static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
+                                   8,  9, 10, 11, 12, 13, 14, 15,
+                                  16, 16, 17, 17, 18, 18, 19, 19,
+                                  20, 20, 20, 20, 21, 21, 21, 21,
+                                  22, 22, 22, 22, 22, 22, 22, 22,
+                                  23, 23, 23, 23, 23, 23, 23, 23,
+                                  24, 24, 24, 24, 24, 24, 24, 24,
+                                  24, 24, 24, 24, 24, 24, 24, 24 };
+
+static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+                                  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+                                  32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+                                  38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+                                  40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+                                  41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+                                  42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+                                  42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
+
+
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+{
+    BYTE const LL_deltaCode = 19;
+    BYTE const ML_deltaCode = 36;
+    const seqDef* const sequences = seqStorePtr->sequencesStart;
+    BYTE* const llCodeTable = seqStorePtr->llCode;
+    BYTE* const ofCodeTable = seqStorePtr->ofCode;
+    BYTE* const mlCodeTable = seqStorePtr->mlCode;
+    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+    U32 u;
+    for (u=0; u<nbSeq; u++) {
+        U32 const llv = sequences[u].litLength;
+        U32 const mlv = sequences[u].matchLength;
+        llCodeTable[u] = (llv> 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
+        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
+        mlCodeTable[u] = (mlv>127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
+    }
+    if (seqStorePtr->longLengthID==1)
+        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
+    if (seqStorePtr->longLengthID==2)
+        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+}
+
+
+size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
+                              void* dst, size_t dstCapacity,
+                              size_t srcSize)
+{
+    const seqStore_t* seqStorePtr = &(zc->seqStore);
+    U32 count[MaxSeq+1];
+    S16 norm[MaxSeq+1];
+    FSE_CTable* CTable_LitLength = zc->litlengthCTable;
+    FSE_CTable* CTable_OffsetBits = zc->offcodeCTable;
+    FSE_CTable* CTable_MatchLength = zc->matchlengthCTable;
+    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
+    const seqDef* const sequences = seqStorePtr->sequencesStart;
+    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+    const BYTE* const llCodeTable = seqStorePtr->llCode;
+    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* const oend = ostart + dstCapacity;
+    BYTE* op = ostart;
+    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+    BYTE* seqHead;
+    BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
+
+    /* Compress literals */
+    {   const BYTE* const literals = seqStorePtr->litStart;
+        size_t const litSize = seqStorePtr->lit - literals;
+        size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
+        if (ZSTD_isError(cSize)) return cSize;
+        op += cSize;
+    }
+
+    /* Sequences Header */
+    if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) return ERROR(dstSize_tooSmall);
+    if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq;
+    else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
+    else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+    if (nbSeq==0) goto _check_compressibility;
+
+    /* seqHead : flags for FSE encoding type */
+    seqHead = op++;
+
+#define MIN_SEQ_FOR_DYNAMIC_FSE   64
+#define MAX_SEQ_FOR_STATIC_FSE  1000
+
+    /* convert length/distances into codes */
+    ZSTD_seqToCodes(seqStorePtr);
+
+    /* CTable for Literal Lengths */
+    {   U32 max = MaxLL;
+        size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters);
+        if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+            *op++ = llCodeTable[0];
+            FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
+            LLtype = set_rle;
+        } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
+            LLtype = set_repeat;
+        } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) {
+            FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
+            LLtype = set_basic;
+        } else {
+            size_t nbSeq_1 = nbSeq;
+            const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
+            if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; }
+            FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
+            { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
+              if (FSE_isError(NCountSize)) return ERROR(GENERIC);
+              op += NCountSize; }
+            FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
+            LLtype = set_compressed;
+    }   }
+
+    /* CTable for Offsets */
+    {   U32 max = MaxOff;
+        size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters);
+        if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+            *op++ = ofCodeTable[0];
+            FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
+            Offtype = set_rle;
+        } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
+            Offtype = set_repeat;
+        } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) {
+            FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
+            Offtype = set_basic;
+        } else {
+            size_t nbSeq_1 = nbSeq;
+            const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
+            if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; }
+            FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
+            { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
+              if (FSE_isError(NCountSize)) return ERROR(GENERIC);
+              op += NCountSize; }
+            FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
+            Offtype = set_compressed;
+    }   }
+
+    /* CTable for MatchLengths */
+    {   U32 max = MaxML;
+        size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters);
+        if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+            *op++ = *mlCodeTable;
+            FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
+            MLtype = set_rle;
+        } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
+            MLtype = set_repeat;
+        } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) {
+            FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
+            MLtype = set_basic;
+        } else {
+            size_t nbSeq_1 = nbSeq;
+            const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
+            if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; }
+            FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
+            { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
+              if (FSE_isError(NCountSize)) return ERROR(GENERIC);
+              op += NCountSize; }
+            FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
+            MLtype = set_compressed;
+    }   }
+
+    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+    zc->flagStaticTables = 0;
+
+    /* Encoding Sequences */
+    {   BIT_CStream_t blockStream;
+        FSE_CState_t  stateMatchLength;
+        FSE_CState_t  stateOffsetBits;
+        FSE_CState_t  stateLitLength;
+
+        CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */
+
+        /* first symbols */
+        FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
+        FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
+        FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
+        BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
+        if (MEM_32bits()) BIT_flushBits(&blockStream);
+        BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
+        if (MEM_32bits()) BIT_flushBits(&blockStream);
+        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
+        BIT_flushBits(&blockStream);
+
+        {   size_t n;
+            for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
+                BYTE const llCode = llCodeTable[n];
+                BYTE const ofCode = ofCodeTable[n];
+                BYTE const mlCode = mlCodeTable[n];
+                U32  const llBits = LL_bits[llCode];
+                U32  const ofBits = ofCode;                                     /* 32b*/  /* 64b*/
+                U32  const mlBits = ML_bits[mlCode];
+                                                                                /* (7)*/  /* (7)*/
+                FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
+                FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
+                if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
+                FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
+                if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
+                    BIT_flushBits(&blockStream);                                /* (7)*/
+                BIT_addBits(&blockStream, sequences[n].litLength, llBits);
+                if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
+                BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
+                if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
+                BIT_addBits(&blockStream, sequences[n].offset, ofBits);         /* 31 */
+                BIT_flushBits(&blockStream);                                    /* (7)*/
+        }   }
+
+        FSE_flushCState(&blockStream, &stateMatchLength);
+        FSE_flushCState(&blockStream, &stateOffsetBits);
+        FSE_flushCState(&blockStream, &stateLitLength);
+
+        {   size_t const streamSize = BIT_closeCStream(&blockStream);
+            if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
+            op += streamSize;
+    }   }
+
+    /* check compressibility */
+_check_compressibility:
+    { size_t const minGain = ZSTD_minGain(srcSize);
+      size_t const maxCSize = srcSize - minGain;
+      if ((size_t)(op-ostart) >= maxCSize) return 0; }
+
+    /* confirm repcodes */
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->savedRep[i]; }
+
+    return op - ostart;
+}
+
+
+/*! ZSTD_storeSeq() :
+    Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
+    `offsetCode` : distance to match, or 0 == repCode.
+    `matchCode` : matchLength - MINMATCH
+*/
+MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
+{
+#if 0  /* for debug */
+    static const BYTE* g_start = NULL;
+    const U32 pos = (U32)((const BYTE*)literals - g_start);
+    if (g_start==NULL) g_start = (const BYTE*)literals;
+    //if ((pos > 1) && (pos < 50000))
+        printf("Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
+               pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
+#endif
+    /* copy Literals */
+    ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
+    seqStorePtr->lit += litLength;
+
+    /* literal Length */
+    if (litLength>0xFFFF) { seqStorePtr->longLengthID = 1; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); }
+    seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+    /* match offset */
+    seqStorePtr->sequences[0].offset = offsetCode + 1;
+
+    /* match Length */
+    if (matchCode>0xFFFF) { seqStorePtr->longLengthID = 2; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); }
+    seqStorePtr->sequences[0].matchLength = (U16)matchCode;
+
+    seqStorePtr->sequences++;
+}
+
+
+/*-*************************************
+*  Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes (register size_t val)
+{
+    if (MEM_isLittleEndian()) {
+        if (MEM_64bits()) {
+#       if defined(_MSC_VER) && defined(_WIN64)
+            unsigned long r = 0;
+            _BitScanForward64( &r, (U64)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_ctzll((U64)val) >> 3);
+#       else
+            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+#       endif
+        } else { /* 32 bits */
+#       if defined(_MSC_VER)
+            unsigned long r=0;
+            _BitScanForward( &r, (U32)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_ctz((U32)val) >> 3);
+#       else
+            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+#       endif
+        }
+    } else {  /* Big Endian CPU */
+        if (MEM_64bits()) {
+#       if defined(_MSC_VER) && defined(_WIN64)
+            unsigned long r = 0;
+            _BitScanReverse64( &r, val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_clzll(val) >> 3);
+#       else
+            unsigned r;
+            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
+            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+            r += (!val);
+            return r;
+#       endif
+        } else { /* 32 bits */
+#       if defined(_MSC_VER)
+            unsigned long r = 0;
+            _BitScanReverse( &r, (unsigned long)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_clz((U32)val) >> 3);
+#       else
+            unsigned r;
+            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+            r += (!val);
+            return r;
+#       endif
+    }   }
+}
+
+
+static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
+{
+    const BYTE* const pStart = pIn;
+    const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
+
+    while (pIn < pInLoopLimit) {
+        size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+        if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
+        pIn += ZSTD_NbCommonBytes(diff);
+        return (size_t)(pIn - pStart);
+    }
+    if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
+    if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
+    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+    return (size_t)(pIn - pStart);
+}
+
+/** ZSTD_count_2segments() :
+*   can count match length with `ip` & `match` in 2 different segments.
+*   convention : on reaching mEnd, match count continue starting from iStart
+*/
+static size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
+{
+    const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
+    size_t const matchLength = ZSTD_count(ip, match, vEnd);
+    if (match + matchLength != mEnd) return matchLength;
+    return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
+}
+
+
+/*-*************************************
+*  Hashes
+***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); }   /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+{
+    switch(mls)
+    {
+    default:
+    case 4: return ZSTD_hash4Ptr(p, hBits);
+    case 5: return ZSTD_hash5Ptr(p, hBits);
+    case 6: return ZSTD_hash6Ptr(p, hBits);
+    case 7: return ZSTD_hash7Ptr(p, hBits);
+    case 8: return ZSTD_hash8Ptr(p, hBits);
+    }
+}
+
+
+/*-*************************************
+*  Fast Scan
+***************************************/
+static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
+{
+    U32* const hashTable = zc->hashTable;
+    U32  const hBits = zc->params.cParams.hashLog;
+    const BYTE* const base = zc->base;
+    const BYTE* ip = base + zc->nextToUpdate;
+    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+    const size_t fastHashFillStep = 3;
+
+    while(ip <= iend) {
+        hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
+        ip += fastHashFillStep;
+    }
+}
+
+
+FORCE_INLINE
+void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
+                               const void* src, size_t srcSize,
+                               const U32 mls)
+{
+    U32* const hashTable = cctx->hashTable;
+    U32  const hBits = cctx->params.cParams.hashLog;
+    seqStore_t* seqStorePtr = &(cctx->seqStore);
+    const BYTE* const base = cctx->base;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32   lowestIndex = cctx->dictLimit;
+    const BYTE* const lowest = base + lowestIndex;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - HASH_READ_SIZE;
+    U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
+    U32 offsetSaved = 0;
+
+    /* init */
+    ip += (ip==lowest);
+    {   U32 const maxRep = (U32)(ip-lowest);
+        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+    }
+
+    /* Main Search Loop */
+    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
+        size_t mLength;
+        size_t const h = ZSTD_hashPtr(ip, hBits, mls);
+        U32 const current = (U32)(ip-base);
+        U32 const matchIndex = hashTable[h];
+        const BYTE* match = base + matchIndex;
+        hashTable[h] = current;   /* update hash table */
+
+        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else {
+            U32 offset;
+            if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
+                ip += ((ip-anchor) >> g_searchStrength) + 1;
+                continue;
+            }
+            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+            offset = (U32)(ip-match);
+            while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+            offset_2 = offset_1;
+            offset_1 = offset;
+
+            ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        }
+
+        /* match found */
+        ip += mLength;
+        anchor = ip;
+
+        if (ip <= ilimit) {
+            /* Fill Table */
+            hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;  /* here because current+2 could be > iend-8 */
+            hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
+            /* check immediate repcode */
+            while ( (ip <= ilimit)
+                 && ( (offset_2>0)
+                 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+                /* store sequence */
+                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; }  /* swap offset_2 <=> offset_1 */
+                hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
+                ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
+                ip += rLength;
+                anchor = ip;
+                continue;   /* faster when present ... (?) */
+    }   }   }
+
+    /* save reps for next block */
+    cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
+    cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
+
+    /* Last Literals */
+    {   size_t const lastLLSize = iend - anchor;
+        memcpy(seqStorePtr->lit, anchor, lastLLSize);
+        seqStorePtr->lit += lastLLSize;
+    }
+}
+
+
+static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
+                       const void* src, size_t srcSize)
+{
+    const U32 mls = ctx->params.cParams.searchLength;
+    switch(mls)
+    {
+    default:
+    case 4 :
+        ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
+    case 5 :
+        ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
+    case 6 :
+        ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
+    case 7 :
+        ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
+    }
+}
+
+
+static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
+                                 const void* src, size_t srcSize,
+                                 const U32 mls)
+{
+    U32* hashTable = ctx->hashTable;
+    const U32 hBits = ctx->params.cParams.hashLog;
+    seqStore_t* seqStorePtr = &(ctx->seqStore);
+    const BYTE* const base = ctx->base;
+    const BYTE* const dictBase = ctx->dictBase;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32   lowestIndex = ctx->lowLimit;
+    const BYTE* const dictStart = dictBase + lowestIndex;
+    const U32   dictLimit = ctx->dictLimit;
+    const BYTE* const lowPrefixPtr = base + dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
+
+    /* Search Loop */
+    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
+        const size_t h = ZSTD_hashPtr(ip, hBits, mls);
+        const U32 matchIndex = hashTable[h];
+        const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+        const BYTE* match = matchBase + matchIndex;
+        const U32 current = (U32)(ip-base);
+        const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
+        const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
+        const BYTE* repMatch = repBase + repIndex;
+        size_t mLength;
+        hashTable[h] = current;   /* update hash table */
+
+        if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
+            ip++;
+            ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else {
+            if ( (matchIndex < lowestIndex) ||
+                 (MEM_read32(match) != MEM_read32(ip)) ) {
+                ip += ((ip-anchor) >> g_searchStrength) + 1;
+                continue;
+            }
+            {   const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+                const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+                U32 offset;
+                mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
+                while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
+                offset = current - matchIndex;
+                offset_2 = offset_1;
+                offset_1 = offset;
+                ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        }   }
+
+        /* found a match : store it */
+        ip += mLength;
+        anchor = ip;
+
+        if (ip <= ilimit) {
+            /* Fill Table */
+            hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
+            hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
+            /* check immediate repcode */
+            while (ip <= ilimit) {
+                U32 const current2 = (U32)(ip-base);
+                U32 const repIndex2 = current2 - offset_2;
+                const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+                if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
+                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
+                    size_t repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
+                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
+                    hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
+                    ip += repLength2;
+                    anchor = ip;
+                    continue;
+                }
+                break;
+    }   }   }
+
+    /* save reps for next block */
+    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+
+    /* Last Literals */
+    {   size_t const lastLLSize = iend - anchor;
+        memcpy(seqStorePtr->lit, anchor, lastLLSize);
+        seqStorePtr->lit += lastLLSize;
+    }
+}
+
+
+static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
+                         const void* src, size_t srcSize)
+{
+    U32 const mls = ctx->params.cParams.searchLength;
+    switch(mls)
+    {
+    default:
+    case 4 :
+        ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
+    case 5 :
+        ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
+    case 6 :
+        ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
+    case 7 :
+        ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
+    }
+}
+
+
+/*-*************************************
+*  Double Fast
+***************************************/
+static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U32 mls)
+{
+    U32* const hashLarge = cctx->hashTable;
+    U32  const hBitsL = cctx->params.cParams.hashLog;
+    U32* const hashSmall = cctx->chainTable;
+    U32  const hBitsS = cctx->params.cParams.chainLog;
+    const BYTE* const base = cctx->base;
+    const BYTE* ip = base + cctx->nextToUpdate;
+    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+    const size_t fastHashFillStep = 3;
+
+    while(ip <= iend) {
+        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
+        hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
+        ip += fastHashFillStep;
+    }
+}
+
+
+FORCE_INLINE
+void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
+                                 const void* src, size_t srcSize,
+                                 const U32 mls)
+{
+    U32* const hashLong = cctx->hashTable;
+    const U32 hBitsL = cctx->params.cParams.hashLog;
+    U32* const hashSmall = cctx->chainTable;
+    const U32 hBitsS = cctx->params.cParams.chainLog;
+    seqStore_t* seqStorePtr = &(cctx->seqStore);
+    const BYTE* const base = cctx->base;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32 lowestIndex = cctx->dictLimit;
+    const BYTE* const lowest = base + lowestIndex;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - HASH_READ_SIZE;
+    U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
+    U32 offsetSaved = 0;
+
+    /* init */
+    ip += (ip==lowest);
+    {   U32 const maxRep = (U32)(ip-lowest);
+        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+    }
+
+    /* Main Search Loop */
+    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
+        size_t mLength;
+        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+        U32 const current = (U32)(ip-base);
+        U32 const matchIndexL = hashLong[h2];
+        U32 const matchIndexS = hashSmall[h];
+        const BYTE* matchLong = base + matchIndexL;
+        const BYTE* match = base + matchIndexS;
+        hashLong[h2] = hashSmall[h] = current;   /* update hash tables */
+
+        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */
+            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else {
+            U32 offset;
+            if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
+                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+                offset = (U32)(ip-matchLong);
+                while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+            } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
+                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+                U32 const matchIndex3 = hashLong[h3];
+                const BYTE* match3 = base + matchIndex3;
+                hashLong[h3] = current + 1;
+                if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+                    mLength = ZSTD_count(ip+9, match3+8, iend) + 8;
+                    ip++;
+                    offset = (U32)(ip-match3);
+                    while (((ip>anchor) & (match3>lowest)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+                } else {
+                    mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+                    offset = (U32)(ip-match);
+                    while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+                }
+            } else {
+                ip += ((ip-anchor) >> g_searchStrength) + 1;
+                continue;
+            }
+
+            offset_2 = offset_1;
+            offset_1 = offset;
+
+            ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        }
+
+        /* match found */
+        ip += mLength;
+        anchor = ip;
+
+        if (ip <= ilimit) {
+            /* Fill Table */
+            hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
+                hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;  /* here because current+2 could be > iend-8 */
+            hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
+                hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+
+            /* check immediate repcode */
+            while ( (ip <= ilimit)
+                 && ( (offset_2>0)
+                 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+                /* store sequence */
+                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
+                hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+                hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+                ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
+                ip += rLength;
+                anchor = ip;
+                continue;   /* faster when present ... (?) */
+    }   }   }
+
+    /* save reps for next block */
+    cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
+    cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
+
+    /* Last Literals */
+    {   size_t const lastLLSize = iend - anchor;
+        memcpy(seqStorePtr->lit, anchor, lastLLSize);
+        seqStorePtr->lit += lastLLSize;
+    }
+}
+
+
+static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    const U32 mls = ctx->params.cParams.searchLength;
+    switch(mls)
+    {
+    default:
+    case 4 :
+        ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
+    case 5 :
+        ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
+    case 6 :
+        ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
+    case 7 :
+        ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
+    }
+}
+
+
+static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
+                                 const void* src, size_t srcSize,
+                                 const U32 mls)
+{
+    U32* const hashLong = ctx->hashTable;
+    U32  const hBitsL = ctx->params.cParams.hashLog;
+    U32* const hashSmall = ctx->chainTable;
+    U32  const hBitsS = ctx->params.cParams.chainLog;
+    seqStore_t* seqStorePtr = &(ctx->seqStore);
+    const BYTE* const base = ctx->base;
+    const BYTE* const dictBase = ctx->dictBase;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32   lowestIndex = ctx->lowLimit;
+    const BYTE* const dictStart = dictBase + lowestIndex;
+    const U32   dictLimit = ctx->dictLimit;
+    const BYTE* const lowPrefixPtr = base + dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
+
+    /* Search Loop */
+    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
+        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+        const U32 matchIndex = hashSmall[hSmall];
+        const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+        const BYTE* match = matchBase + matchIndex;
+
+        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+        const U32 matchLongIndex = hashLong[hLong];
+        const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
+        const BYTE* matchLong = matchLongBase + matchLongIndex;
+
+        const U32 current = (U32)(ip-base);
+        const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
+        const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
+        const BYTE* repMatch = repBase + repIndex;
+        size_t mLength;
+        hashSmall[hSmall] = hashLong[hLong] = current;   /* update hash table */
+
+        if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else {
+            if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+                const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
+                const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
+                U32 offset;
+                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
+                offset = current - matchLongIndex;
+                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
+                offset_2 = offset_1;
+                offset_1 = offset;
+                ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+            } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+                U32 const matchIndex3 = hashLong[h3];
+                const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
+                const BYTE* match3 = match3Base + matchIndex3;
+                U32 offset;
+                hashLong[h3] = current + 1;
+                if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+                    const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
+                    const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
+                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
+                    ip++;
+                    offset = current+1 - matchIndex3;
+                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+                } else {
+                    const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+                    const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+                    offset = current - matchIndex;
+                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
+                }
+                offset_2 = offset_1;
+                offset_1 = offset;
+                ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+            } else {
+                ip += ((ip-anchor) >> g_searchStrength) + 1;
+                continue;
+        }   }
+
+        /* found a match : store it */
+        ip += mLength;
+        anchor = ip;
+
+        if (ip <= ilimit) {
+            /* Fill Table */
+			hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
+			hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
+            hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+            hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+            /* check immediate repcode */
+            while (ip <= ilimit) {
+                U32 const current2 = (U32)(ip-base);
+                U32 const repIndex2 = current2 - offset_2;
+                const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+                if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
+                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
+                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
+                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+                    ip += repLength2;
+                    anchor = ip;
+                    continue;
+                }
+                break;
+    }   }   }
+
+    /* save reps for next block */
+    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+
+    /* Last Literals */
+    {   size_t const lastLLSize = iend - anchor;
+        memcpy(seqStorePtr->lit, anchor, lastLLSize);
+        seqStorePtr->lit += lastLLSize;
+    }
+}
+
+
+static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx,
+                         const void* src, size_t srcSize)
+{
+    U32 const mls = ctx->params.cParams.searchLength;
+    switch(mls)
+    {
+    default:
+    case 4 :
+        ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
+    case 5 :
+        ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
+    case 6 :
+        ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
+    case 7 :
+        ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
+    }
+}
+
+
+/*-*************************************
+*  Binary Tree search
+***************************************/
+/** ZSTD_insertBt1() : add one or multiple positions to tree.
+*   ip : assumed <= iend-8 .
+*   @return : nb of positions added */
+static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares,
+                          U32 extDict)
+{
+    U32*   const hashTable = zc->hashTable;
+    U32    const hashLog = zc->params.cParams.hashLog;
+    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
+    U32*   const bt = zc->chainTable;
+    U32    const btLog  = zc->params.cParams.chainLog - 1;
+    U32    const btMask = (1 << btLog) - 1;
+    U32 matchIndex = hashTable[h];
+    size_t commonLengthSmaller=0, commonLengthLarger=0;
+    const BYTE* const base = zc->base;
+    const BYTE* const dictBase = zc->dictBase;
+    const U32 dictLimit = zc->dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const BYTE* match;
+    const U32 current = (U32)(ip-base);
+    const U32 btLow = btMask >= current ? 0 : current - btMask;
+    U32* smallerPtr = bt + 2*(current&btMask);
+    U32* largerPtr  = smallerPtr + 1;
+    U32 dummy32;   /* to be nullified at the end */
+    U32 const windowLow = zc->lowLimit;
+    U32 matchEndIdx = current+8;
+    size_t bestLength = 8;
+#ifdef ZSTD_C_PREDICT
+    U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
+    U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
+    predictedSmall += (predictedSmall>0);
+    predictedLarge += (predictedLarge>0);
+#endif /* ZSTD_C_PREDICT */
+
+    hashTable[h] = current;   /* Update Hash Table */
+
+    while (nbCompares-- && (matchIndex > windowLow)) {
+        U32* const nextPtr = bt + 2*(matchIndex & btMask);
+        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+
+#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
+        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
+        if (matchIndex == predictedSmall) {
+            /* no need to check length, result known */
+            *smallerPtr = matchIndex;
+            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
+            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+            predictedSmall = predictPtr[1] + (predictPtr[1]>0);
+            continue;
+        }
+        if (matchIndex == predictedLarge) {
+            *largerPtr = matchIndex;
+            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            largerPtr = nextPtr;
+            matchIndex = nextPtr[0];
+            predictedLarge = predictPtr[0] + (predictPtr[0]>0);
+            continue;
+        }
+#endif
+        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+            match = base + matchIndex;
+            if (match[matchLength] == ip[matchLength])
+                matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
+        } else {
+            match = dictBase + matchIndex;
+            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+            if (matchIndex+matchLength >= dictLimit)
+				match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
+        }
+
+        if (matchLength > bestLength) {
+            bestLength = matchLength;
+            if (matchLength > matchEndIdx - matchIndex)
+                matchEndIdx = matchIndex + (U32)matchLength;
+        }
+
+        if (ip+matchLength == iend)   /* equal : no way to know if inf or sup */
+            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
+
+        if (match[matchLength] < ip[matchLength]) {  /* necessarily within correct buffer */
+            /* match is smaller than current */
+            *smallerPtr = matchIndex;             /* update smaller idx */
+            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
+            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+        } else {
+            /* match is larger than current */
+            *largerPtr = matchIndex;
+            commonLengthLarger = matchLength;
+            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            largerPtr = nextPtr;
+            matchIndex = nextPtr[0];
+    }   }
+
+    *smallerPtr = *largerPtr = 0;
+    if (bestLength > 384) return MIN(192, (U32)(bestLength - 384));   /* speed optimization */
+    if (matchEndIdx > current + 8) return matchEndIdx - current - 8;
+    return 1;
+}
+
+
+static size_t ZSTD_insertBtAndFindBestMatch (
+                        ZSTD_CCtx* zc,
+                        const BYTE* const ip, const BYTE* const iend,
+                        size_t* offsetPtr,
+                        U32 nbCompares, const U32 mls,
+                        U32 extDict)
+{
+    U32*   const hashTable = zc->hashTable;
+    U32    const hashLog = zc->params.cParams.hashLog;
+    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
+    U32*   const bt = zc->chainTable;
+    U32    const btLog  = zc->params.cParams.chainLog - 1;
+    U32    const btMask = (1 << btLog) - 1;
+    U32 matchIndex  = hashTable[h];
+    size_t commonLengthSmaller=0, commonLengthLarger=0;
+    const BYTE* const base = zc->base;
+    const BYTE* const dictBase = zc->dictBase;
+    const U32 dictLimit = zc->dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const U32 current = (U32)(ip-base);
+    const U32 btLow = btMask >= current ? 0 : current - btMask;
+    const U32 windowLow = zc->lowLimit;
+    U32* smallerPtr = bt + 2*(current&btMask);
+    U32* largerPtr  = bt + 2*(current&btMask) + 1;
+    U32 matchEndIdx = current+8;
+    U32 dummy32;   /* to be nullified at the end */
+    size_t bestLength = 0;
+
+    hashTable[h] = current;   /* Update Hash Table */
+
+    while (nbCompares-- && (matchIndex > windowLow)) {
+        U32* const nextPtr = bt + 2*(matchIndex & btMask);
+        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+        const BYTE* match;
+
+        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+            match = base + matchIndex;
+            if (match[matchLength] == ip[matchLength])
+                matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
+        } else {
+            match = dictBase + matchIndex;
+            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+            if (matchIndex+matchLength >= dictLimit)
+				match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
+        }
+
+        if (matchLength > bestLength) {
+            if (matchLength > matchEndIdx - matchIndex)
+                matchEndIdx = matchIndex + (U32)matchLength;
+            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
+                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+            if (ip+matchLength == iend)   /* equal : no way to know if inf or sup */
+                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
+        }
+
+        if (match[matchLength] < ip[matchLength]) {
+            /* match is smaller than current */
+            *smallerPtr = matchIndex;             /* update smaller idx */
+            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
+            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+        } else {
+            /* match is larger than current */
+            *largerPtr = matchIndex;
+            commonLengthLarger = matchLength;
+            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            largerPtr = nextPtr;
+            matchIndex = nextPtr[0];
+    }   }
+
+    *smallerPtr = *largerPtr = 0;
+
+    zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
+    return bestLength;
+}
+
+
+static void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
+{
+    const BYTE* const base = zc->base;
+    const U32 target = (U32)(ip - base);
+    U32 idx = zc->nextToUpdate;
+
+    while(idx < target)
+        idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0);
+}
+
+/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+static size_t ZSTD_BtFindBestMatch (
+                        ZSTD_CCtx* zc,
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 maxNbAttempts, const U32 mls)
+{
+    if (ip < zc->base + zc->nextToUpdate) return 0;   /* skipped area */
+    ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
+    return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
+}
+
+
+static size_t ZSTD_BtFindBestMatch_selectMLS (
+                        ZSTD_CCtx* zc,   /* Index table will be updated */
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+    switch(matchLengthSearch)
+    {
+    default :
+    case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
+    case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
+    case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
+    }
+}
+
+
+static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
+{
+    const BYTE* const base = zc->base;
+    const U32 target = (U32)(ip - base);
+    U32 idx = zc->nextToUpdate;
+
+    while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1);
+}
+
+
+/** Tree updater, providing best match */
+static size_t ZSTD_BtFindBestMatch_extDict (
+                        ZSTD_CCtx* zc,
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 maxNbAttempts, const U32 mls)
+{
+    if (ip < zc->base + zc->nextToUpdate) return 0;   /* skipped area */
+    ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
+    return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
+}
+
+
+static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
+                        ZSTD_CCtx* zc,   /* Index table will be updated */
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+    switch(matchLengthSearch)
+    {
+    default :
+    case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
+    case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
+    case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
+    }
+}
+
+
+
+/* *********************************
+*  Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & mask]
+
+/* Update chains up to ip (excluded)
+   Assumption : always within prefix (ie. not within extDict) */
+FORCE_INLINE
+U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
+{
+    U32* const hashTable  = zc->hashTable;
+    const U32 hashLog = zc->params.cParams.hashLog;
+    U32* const chainTable = zc->chainTable;
+    const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
+    const BYTE* const base = zc->base;
+    const U32 target = (U32)(ip - base);
+    U32 idx = zc->nextToUpdate;
+
+    while(idx < target) { /* catch up */
+        size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
+        NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+        hashTable[h] = idx;
+        idx++;
+    }
+
+    zc->nextToUpdate = target;
+    return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+
+
+FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
+size_t ZSTD_HcFindBestMatch_generic (
+                        ZSTD_CCtx* zc,   /* Index table will be updated */
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 maxNbAttempts, const U32 mls, const U32 extDict)
+{
+    U32* const chainTable = zc->chainTable;
+    const U32 chainSize = (1 << zc->params.cParams.chainLog);
+    const U32 chainMask = chainSize-1;
+    const BYTE* const base = zc->base;
+    const BYTE* const dictBase = zc->dictBase;
+    const U32 dictLimit = zc->dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const U32 lowLimit = zc->lowLimit;
+    const U32 current = (U32)(ip-base);
+    const U32 minChain = current > chainSize ? current - chainSize : 0;
+    int nbAttempts=maxNbAttempts;
+    size_t ml=EQUAL_READ32-1;
+
+    /* HC4 match finder */
+    U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
+
+    for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+        const BYTE* match;
+        size_t currentMl=0;
+        if ((!extDict) || matchIndex >= dictLimit) {
+            match = base + matchIndex;
+            if (match[ml] == ip[ml])   /* potentially better */
+                currentMl = ZSTD_count(ip, match, iLimit);
+        } else {
+            match = dictBase + matchIndex;
+            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+                currentMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
+        }
+
+        /* save best solution */
+        if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ }
+
+        if (matchIndex <= minChain) break;
+        matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+    }
+
+    return ml;
+}
+
+
+FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS (
+                        ZSTD_CCtx* zc,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+    switch(matchLengthSearch)
+    {
+    default :
+    case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
+    case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
+    case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
+    }
+}
+
+
+FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
+                        ZSTD_CCtx* zc,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+    switch(matchLengthSearch)
+    {
+    default :
+    case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
+    case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
+    case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
+    }
+}
+
+
+/* *******************************
+*  Common parser - lazy strategy
+*********************************/
+FORCE_INLINE
+void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
+                                     const void* src, size_t srcSize,
+                                     const U32 searchMethod, const U32 depth)
+{
+    seqStore_t* seqStorePtr = &(ctx->seqStore);
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    const BYTE* const base = ctx->base + ctx->dictLimit;
+
+    U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
+    U32 const mls = ctx->params.cParams.searchLength;
+
+    typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
+                        size_t* offsetPtr,
+                        U32 maxNbAttempts, U32 matchLengthSearch);
+    searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
+    U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset=0;
+
+    /* init */
+    ip += (ip==base);
+    ctx->nextToUpdate3 = ctx->nextToUpdate;
+    {   U32 const maxRep = (U32)(ip-base);
+        if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+        if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+    }
+
+    /* Match Loop */
+    while (ip < ilimit) {
+        size_t matchLength=0;
+        size_t offset=0;
+        const BYTE* start=ip+1;
+
+        /* check repCode */
+        if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
+            /* repcode : we take it */
+            matchLength = ZSTD_count(ip+1+EQUAL_READ32, ip+1+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
+            if (depth==0) goto _storeSequence;
+        }
+
+        /* first search (depth 0) */
+        {   size_t offsetFound = 99999999;
+            size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
+            if (ml2 > matchLength)
+                matchLength = ml2, start = ip, offset=offsetFound;
+        }
+
+        if (matchLength < EQUAL_READ32) {
+            ip += ((ip-anchor) >> g_searchStrength) + 1;   /* jump faster over incompressible sections */
+            continue;
+        }
+
+        /* let's try to find a better solution */
+        if (depth>=1)
+        while (ip<ilimit) {
+            ip ++;
+            if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+                size_t const mlRep = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
+                int const gain2 = (int)(mlRep * 3);
+                int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+                if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
+                    matchLength = mlRep, offset = 0, start = ip;
+            }
+            {   size_t offset2=99999999;
+                size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
+                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+                if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
+                    matchLength = ml2, offset = offset2, start = ip;
+                    continue;   /* search a better one */
+            }   }
+
+            /* let's find an even better one */
+            if ((depth==2) && (ip<ilimit)) {
+                ip ++;
+                if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+                    size_t const ml2 = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
+                    int const gain2 = (int)(ml2 * 4);
+                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+                    if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
+                        matchLength = ml2, offset = 0, start = ip;
+                }
+                {   size_t offset2=99999999;
+                    size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
+                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+                    if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
+                        matchLength = ml2, offset = offset2, start = ip;
+                        continue;
+            }   }   }
+            break;  /* nothing found : store previous solution */
+        }
+
+        /* catch up */
+        if (offset) {
+            while ((start>anchor) && (start>base+offset-ZSTD_REP_MOVE) && (start[-1] == start[-1-offset+ZSTD_REP_MOVE]))   /* only search for offset within prefix */
+                { start--; matchLength++; }
+            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+        }
+
+        /* store sequence */
+_storeSequence:
+        {   size_t const litLength = start - anchor;
+            ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+            anchor = ip = start + matchLength;
+        }
+
+        /* check immediate repcode */
+        while ( (ip <= ilimit)
+             && ((offset_2>0)
+             & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+            /* store sequence */
+            matchLength = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_2, iend) + EQUAL_READ32;
+            offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
+            ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
+            ip += matchLength;
+            anchor = ip;
+            continue;   /* faster when present ... (?) */
+    }   }
+
+    /* Save reps for next block */
+    ctx->savedRep[0] = offset_1 ? offset_1 : savedOffset;
+    ctx->savedRep[1] = offset_2 ? offset_2 : savedOffset;
+
+    /* Last Literals */
+    {   size_t const lastLLSize = iend - anchor;
+        memcpy(seqStorePtr->lit, anchor, lastLLSize);
+        seqStorePtr->lit += lastLLSize;
+    }
+}
+
+
+static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2);
+}
+
+static void ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2);
+}
+
+static void ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1);
+}
+
+static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0);
+}
+
+
+FORCE_INLINE
+void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
+                                     const void* src, size_t srcSize,
+                                     const U32 searchMethod, const U32 depth)
+{
+    seqStore_t* seqStorePtr = &(ctx->seqStore);
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    const BYTE* const base = ctx->base;
+    const U32 dictLimit = ctx->dictLimit;
+    const U32 lowestIndex = ctx->lowLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const BYTE* const dictBase = ctx->dictBase;
+    const BYTE* const dictEnd  = dictBase + dictLimit;
+    const BYTE* const dictStart  = dictBase + ctx->lowLimit;
+
+    const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
+    const U32 mls = ctx->params.cParams.searchLength;
+
+    typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
+                        size_t* offsetPtr,
+                        U32 maxNbAttempts, U32 matchLengthSearch);
+    searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
+
+    U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
+
+    /* init */
+    ctx->nextToUpdate3 = ctx->nextToUpdate;
+    ip += (ip == prefixStart);
+
+    /* Match Loop */
+    while (ip < ilimit) {
+        size_t matchLength=0;
+        size_t offset=0;
+        const BYTE* start=ip+1;
+        U32 current = (U32)(ip-base);
+
+        /* check repCode */
+        {   const U32 repIndex = (U32)(current+1 - offset_1);
+            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+            const BYTE* const repMatch = repBase + repIndex;
+            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))   /* intentional overflow */
+            if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
+                /* repcode detected we should take it */
+                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                matchLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
+                if (depth==0) goto _storeSequence;
+        }   }
+
+        /* first search (depth 0) */
+        {   size_t offsetFound = 99999999;
+            size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
+            if (ml2 > matchLength)
+                matchLength = ml2, start = ip, offset=offsetFound;
+        }
+
+         if (matchLength < EQUAL_READ32) {
+            ip += ((ip-anchor) >> g_searchStrength) + 1;   /* jump faster over incompressible sections */
+            continue;
+        }
+
+        /* let's try to find a better solution */
+        if (depth>=1)
+        while (ip<ilimit) {
+            ip ++;
+            current++;
+            /* check repCode */
+            if (offset) {
+                const U32 repIndex = (U32)(current - offset_1);
+                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+                const BYTE* const repMatch = repBase + repIndex;
+                if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
+                if (MEM_read32(ip) == MEM_read32(repMatch)) {
+                    /* repcode detected */
+                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                    size_t const repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
+                    int const gain2 = (int)(repLength * 3);
+                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+                    if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
+                        matchLength = repLength, offset = 0, start = ip;
+            }   }
+
+            /* search match, depth 1 */
+            {   size_t offset2=99999999;
+                size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
+                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+                if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
+                    matchLength = ml2, offset = offset2, start = ip;
+                    continue;   /* search a better one */
+            }   }
+
+            /* let's find an even better one */
+            if ((depth==2) && (ip<ilimit)) {
+                ip ++;
+                current++;
+                /* check repCode */
+                if (offset) {
+                    const U32 repIndex = (U32)(current - offset_1);
+                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+                    const BYTE* const repMatch = repBase + repIndex;
+                    if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
+                    if (MEM_read32(ip) == MEM_read32(repMatch)) {
+                        /* repcode detected */
+                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                        size_t repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
+                        int gain2 = (int)(repLength * 4);
+                        int gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+                        if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
+                            matchLength = repLength, offset = 0, start = ip;
+                }   }
+
+                /* search match, depth 2 */
+                {   size_t offset2=99999999;
+                    size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
+                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+                    if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
+                        matchLength = ml2, offset = offset2, start = ip;
+                        continue;
+            }   }   }
+            break;  /* nothing found : store previous solution */
+        }
+
+        /* catch up */
+        if (offset) {
+            U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+            const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+            const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+            while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
+            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+        }
+
+        /* store sequence */
+_storeSequence:
+        {   size_t const litLength = start - anchor;
+            ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+            anchor = ip = start + matchLength;
+        }
+
+        /* check immediate repcode */
+        while (ip <= ilimit) {
+            const U32 repIndex = (U32)((ip-base) - offset_2);
+            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+            const BYTE* const repMatch = repBase + repIndex;
+            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
+            if (MEM_read32(ip) == MEM_read32(repMatch)) {
+                /* repcode detected we should take it */
+                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                matchLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
+                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
+                ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
+                ip += matchLength;
+                anchor = ip;
+                continue;   /* faster when present ... (?) */
+            }
+            break;
+    }   }
+
+    /* Save reps for next block */
+    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+
+    /* Last Literals */
+    {   size_t const lastLLSize = iend - anchor;
+        memcpy(seqStorePtr->lit, anchor, lastLLSize);
+        seqStorePtr->lit += lastLLSize;
+    }
+}
+
+
+void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0);
+}
+
+static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
+}
+
+static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
+}
+
+static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+    ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
+}
+
+
+/* The optimal parser */
+#include "zstd_opt.h"
+
+static void ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+#ifdef ZSTD_OPT_H_91842398743
+    ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
+#else
+    (void)ctx; (void)src; (void)srcSize;
+    return;
+#endif
+}
+
+static void ZSTD_compressBlock_btopt2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+#ifdef ZSTD_OPT_H_91842398743
+    ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
+#else
+    (void)ctx; (void)src; (void)srcSize;
+    return;
+#endif
+}
+
+static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+#ifdef ZSTD_OPT_H_91842398743
+    ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
+#else
+    (void)ctx; (void)src; (void)srcSize;
+    return;
+#endif
+}
+
+static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
+{
+#ifdef ZSTD_OPT_H_91842398743
+    ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
+#else
+    (void)ctx; (void)src; (void)srcSize;
+    return;
+#endif
+}
+
+
+typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
+
+static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
+{
+    static const ZSTD_blockCompressor blockCompressor[2][8] = {
+        { ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2 },
+        { ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict }
+    };
+
+    return blockCompressor[extDict][(U32)strat];
+}
+
+
+static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
+    const BYTE* const base = zc->base;
+    const BYTE* const istart = (const BYTE*)src;
+    const U32 current = (U32)(istart-base);
+    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0;   /* don't even attempt compression below a certain srcSize */
+    ZSTD_resetSeqStore(&(zc->seqStore));
+    if (current > zc->nextToUpdate + 384)
+        zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384));   /* update tree not updated after finding very long rep matches */
+    blockCompressor(zc, src, srcSize);
+    return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
+}
+
+
+/*! ZSTD_compress_generic() :
+*   Compress a chunk of data into one or multiple blocks.
+*   All blocks will be terminated, all input will be consumed.
+*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
+*   Frame is supposed already started (header already produced)
+*   @return : compressed size, or an error code
+*/
+static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
+                                     void* dst, size_t dstCapacity,
+                               const void* src, size_t srcSize,
+                                     U32 lastFrameChunk)
+{
+    size_t blockSize = cctx->blockSize;
+    size_t remaining = srcSize;
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* op = ostart;
+    U32 const maxDist = 1 << cctx->params.cParams.windowLog;
+
+    if (cctx->params.fParams.checksumFlag && srcSize)
+        XXH64_update(&cctx->xxhState, src, srcSize);
+
+    while (remaining) {
+        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+        size_t cSize;
+
+        if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
+        if (remaining < blockSize) blockSize = remaining;
+
+        /* preemptive overflow correction */
+        if (cctx->lowLimit > (2U<<30)) {
+            U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
+            U32 const current = (U32)(ip - cctx->base);
+            U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog);
+            U32 const correction = current - newCurrent;
+            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
+            ZSTD_reduceIndex(cctx, correction);
+            cctx->base += correction;
+            cctx->dictBase += correction;
+            cctx->lowLimit -= correction;
+            cctx->dictLimit -= correction;
+            if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0;
+            else cctx->nextToUpdate -= correction;
+        }
+
+        if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
+            /* enforce maxDist */
+            U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist;
+            if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit;
+            if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit;
+        }
+
+        cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize);
+        if (ZSTD_isError(cSize)) return cSize;
+
+        if (cSize == 0) {  /* block is not compressible */
+            U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
+            if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+            MEM_writeLE32(op, cBlockHeader24);   /* no pb, 4th byte will be overwritten */
+            memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
+            cSize = ZSTD_blockHeaderSize+blockSize;
+        } else {
+            U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+            MEM_writeLE24(op, cBlockHeader24);
+            cSize += ZSTD_blockHeaderSize;
+        }
+
+        remaining -= blockSize;
+        dstCapacity -= cSize;
+        ip += blockSize;
+        op += cSize;
+    }
+
+    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
+    return op-ostart;
+}
+
+
+static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
+                                    ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
+{   BYTE* const op = (BYTE*)dst;
+    U32   const dictIDSizeCode = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
+    U32   const checksumFlag = params.fParams.checksumFlag>0;
+    U32   const windowSize = 1U << params.cParams.windowLog;
+    U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize > (pledgedSrcSize-1));
+    BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+    U32   const fcsCode = params.fParams.contentSizeFlag ?
+                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) :   /* 0-3 */
+                      0;
+    BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+    size_t pos;
+
+    if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
+
+    MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
+    op[4] = frameHeaderDecriptionByte; pos=5;
+    if (!singleSegment) op[pos++] = windowLogByte;
+    switch(dictIDSizeCode)
+    {
+        default:   /* impossible */
+        case 0 : break;
+        case 1 : op[pos] = (BYTE)(dictID); pos++; break;
+        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
+        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
+    }
+    switch(fcsCode)
+    {
+        default:   /* impossible */
+        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
+        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
+        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
+        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
+    }
+    return pos;
+}
+
+
+static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
+                              void* dst, size_t dstCapacity,
+                        const void* src, size_t srcSize,
+                               U32 frame, U32 lastFrameChunk)
+{
+    const BYTE* const ip = (const BYTE*) src;
+    size_t fhSize = 0;
+
+    if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
+
+    if (frame && (cctx->stage==ZSTDcs_init)) {
+        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
+        if (ZSTD_isError(fhSize)) return fhSize;
+        dstCapacity -= fhSize;
+        dst = (char*)dst + fhSize;
+        cctx->stage = ZSTDcs_ongoing;
+    }
+
+    /* Check if blocks follow each other */
+    if (src != cctx->nextSrc) {
+        /* not contiguous */
+        ptrdiff_t const delta = cctx->nextSrc - ip;
+        cctx->lowLimit = cctx->dictLimit;
+        cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
+        cctx->dictBase = cctx->base;
+        cctx->base -= delta;
+        cctx->nextToUpdate = cctx->dictLimit;
+        if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit;   /* too small extDict */
+    }
+
+    /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
+    if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
+        ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
+        U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
+        cctx->lowLimit = lowLimitMax;
+    }
+
+    cctx->nextSrc = ip + srcSize;
+
+    {   size_t const cSize = frame ?
+                             ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
+                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
+        if (ZSTD_isError(cSize)) return cSize;
+        return cSize + fhSize;
+    }
+}
+
+
+size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
+                              void* dst, size_t dstCapacity,
+                        const void* src, size_t srcSize)
+{
+    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
+}
+
+
+size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx)
+{
+    return MIN (ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog);
+}
+
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
+    if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
+    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
+}
+
+
+static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+{
+    const BYTE* const ip = (const BYTE*) src;
+    const BYTE* const iend = ip + srcSize;
+
+    /* input becomes current prefix */
+    zc->lowLimit = zc->dictLimit;
+    zc->dictLimit = (U32)(zc->nextSrc - zc->base);
+    zc->dictBase = zc->base;
+    zc->base += ip - zc->nextSrc;
+    zc->nextToUpdate = zc->dictLimit;
+    zc->loadedDictEnd = (U32)(iend - zc->base);
+
+    zc->nextSrc = iend;
+    if (srcSize <= HASH_READ_SIZE) return 0;
+
+    switch(zc->params.cParams.strategy)
+    {
+    case ZSTD_fast:
+        ZSTD_fillHashTable (zc, iend, zc->params.cParams.searchLength);
+        break;
+
+    case ZSTD_dfast:
+        ZSTD_fillDoubleHashTable (zc, iend, zc->params.cParams.searchLength);
+        break;
+
+    case ZSTD_greedy:
+    case ZSTD_lazy:
+    case ZSTD_lazy2:
+        ZSTD_insertAndFindFirstIndex (zc, iend-HASH_READ_SIZE, zc->params.cParams.searchLength);
+        break;
+
+    case ZSTD_btlazy2:
+    case ZSTD_btopt:
+    case ZSTD_btopt2:
+        ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
+        break;
+
+    default:
+        return ERROR(GENERIC);   /* strategy doesn't exist; impossible */
+    }
+
+    zc->nextToUpdate = zc->loadedDictEnd;
+    return 0;
+}
+
+
+/* Dictionaries that assign zero probability to symbols that show up causes problems
+   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
+   that we may encounter during compression.
+   NOTE: This behavior is not standard and could be improved in the future. */
+static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
+    U32 s;
+    if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
+    for (s = 0; s <= maxSymbolValue; ++s) {
+        if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
+    }
+    return 0;
+}
+
+
+/* Dictionary format :
+    Magic == ZSTD_DICT_MAGIC (4 bytes)
+    HUF_writeCTable(256)
+    FSE_writeNCount(off)
+    FSE_writeNCount(ml)
+    FSE_writeNCount(ll)
+    RepOffsets
+    Dictionary content
+*/
+/*! ZSTD_loadDictEntropyStats() :
+    @return : size read from dictionary
+    note : magic number supposed already checked */
+static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+    const BYTE* dictPtr = (const BYTE*)dict;
+    const BYTE* const dictEnd = dictPtr + dictSize;
+    short offcodeNCount[MaxOff+1];
+    unsigned offcodeMaxValue = MaxOff;
+    BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
+
+    {   size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dict, dictSize);
+        if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
+        dictPtr += hufHeaderSize;
+    }
+
+    {   unsigned offcodeLog;
+        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
+        CHECK_E (FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
+        dictPtr += offcodeHeaderSize;
+    }
+
+    {   short matchlengthNCount[MaxML+1];
+        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        /* Every match length code must have non-zero probability */
+        CHECK_F (ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
+        CHECK_E (FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
+        dictPtr += matchlengthHeaderSize;
+    }
+
+    {   short litlengthNCount[MaxLL+1];
+        unsigned litlengthMaxValue = MaxLL, litlengthLog;
+        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        /* Every literal length code must have non-zero probability */
+        CHECK_F (ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
+        CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
+        dictPtr += litlengthHeaderSize;
+    }
+
+    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+    cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+    cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+    dictPtr += 12;
+
+    {   U32 offcodeMax = MaxOff;
+        if ((size_t)(dictEnd - dictPtr) <= ((U32)-1) - 128 KB) {
+            U32 const maxOffset = (U32)(dictEnd - dictPtr) + 128 KB; /* The maximum offset that must be supported */
+            /* Calculate minimum offset code required to represent maxOffset */
+            offcodeMax = ZSTD_highbit32(maxOffset);
+        }
+        /* Every possible supported offset <= dictContentSize + 128 KB must be representable */
+        CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
+    }
+
+    cctx->flagStaticTables = 1;
+    return dictPtr - (const BYTE*)dict;
+}
+
+/** ZSTD_compress_insertDictionary() :
+*   @return : 0, or an error code */
+static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* zc, const void* dict, size_t dictSize)
+{
+    if ((dict==NULL) || (dictSize<=8)) return 0;
+
+    /* default : dict is pure content */
+    if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return ZSTD_loadDictionaryContent(zc, dict, dictSize);
+    zc->dictID = zc->params.fParams.noDictIDFlag ? 0 :  MEM_readLE32((const char*)dict+4);
+
+    /* known magic number : dict is parsed for entropy stats and content */
+    {   size_t const loadError = ZSTD_loadDictEntropyStats(zc, (const char*)dict+8 /* skip dictHeader */, dictSize-8);
+        size_t const eSize = loadError + 8;
+        if (ZSTD_isError(loadError)) return loadError;
+        return ZSTD_loadDictionaryContent(zc, (const char*)dict+eSize, dictSize-eSize);
+    }
+}
+
+
+/*! ZSTD_compressBegin_internal() :
+*   @return : 0, or an error code */
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+                             const void* dict, size_t dictSize,
+                                   ZSTD_parameters params, U64 pledgedSrcSize)
+{
+    ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
+    CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
+    return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
+}
+
+
+/*! ZSTD_compressBegin_advanced() :
+*   @return : 0, or an error code */
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
+                             const void* dict, size_t dictSize,
+                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    /* compression parameters verification and optimization */
+    CHECK_F(ZSTD_checkCParams(params.cParams));
+    return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
+}
+
+
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
+    return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
+}
+
+
+size_t ZSTD_compressBegin(ZSTD_CCtx* zc, int compressionLevel)
+{
+    return ZSTD_compressBegin_usingDict(zc, NULL, 0, compressionLevel);
+}
+
+
+/*! ZSTD_writeEpilogue() :
+*   Ends a frame.
+*   @return : nb of bytes written into dst (or an error code) */
+static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+{
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* op = ostart;
+    size_t fhSize = 0;
+
+    if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
+
+    /* special case : empty frame */
+    if (cctx->stage == ZSTDcs_init) {
+        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
+        if (ZSTD_isError(fhSize)) return fhSize;
+        dstCapacity -= fhSize;
+        op += fhSize;
+        cctx->stage = ZSTDcs_ongoing;
+    }
+
+    if (cctx->stage != ZSTDcs_ending) {
+        /* write one last empty block, make it the "last" block */
+        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
+        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+        MEM_writeLE32(op, cBlockHeader24);
+        op += ZSTD_blockHeaderSize;
+        dstCapacity -= ZSTD_blockHeaderSize;
+    }
+
+    if (cctx->params.fParams.checksumFlag) {
+        U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
+        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+        MEM_writeLE32(op, checksum);
+        op += 4;
+    }
+
+    cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
+    return op-ostart;
+}
+
+
+size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
+                         void* dst, size_t dstCapacity,
+                   const void* src, size_t srcSize)
+{
+    size_t endResult;
+    size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
+    if (ZSTD_isError(cSize)) return cSize;
+    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
+    if (ZSTD_isError(endResult)) return endResult;
+    return cSize + endResult;
+}
+
+
+static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
+                               void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize,
+                         const void* dict,size_t dictSize,
+                               ZSTD_parameters params)
+{
+    CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
+    return ZSTD_compressEnd(cctx, dst,  dstCapacity, src, srcSize);
+}
+
+size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
+                               void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize,
+                         const void* dict,size_t dictSize,
+                               ZSTD_parameters params)
+{
+    CHECK_F(ZSTD_checkCParams(params.cParams));
+    return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
+}
+
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dict ? dictSize : 0);
+    params.fParams.contentSizeFlag = 1;
+    return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
+}
+
+size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+{
+    return ZSTD_compress_usingDict(ctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
+}
+
+size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+{
+    size_t result;
+    ZSTD_CCtx ctxBody;
+    memset(&ctxBody, 0, sizeof(ctxBody));
+    memcpy(&ctxBody.customMem, &defaultCustomMem, sizeof(ZSTD_customMem));
+    result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
+    ZSTD_free(ctxBody.workSpace, defaultCustomMem);  /* can't free ctxBody itself, as it's on stack; free only heap content */
+    return result;
+}
+
+
+/* =====  Dictionary API  ===== */
+
+struct ZSTD_CDict_s {
+    void* dictContent;
+    size_t dictContentSize;
+    ZSTD_CCtx* refContext;
+};  /* typedef'd tp ZSTD_CDict within "zstd.h" */
+
+size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
+{
+    if (cdict==NULL) return 0;   /* support sizeof on NULL */
+    return ZSTD_sizeof_CCtx(cdict->refContext) + cdict->dictContentSize;
+}
+
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_parameters params, ZSTD_customMem customMem)
+{
+    if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
+    if (!customMem.customAlloc || !customMem.customFree) return NULL;
+
+    {   ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
+        void* const dictContent = ZSTD_malloc(dictSize, customMem);
+        ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem);
+
+        if (!dictContent || !cdict || !cctx) {
+            ZSTD_free(dictContent, customMem);
+            ZSTD_free(cdict, customMem);
+            ZSTD_free(cctx, customMem);
+            return NULL;
+        }
+
+        if (dictSize) {
+            memcpy(dictContent, dict, dictSize);
+        }
+        {   size_t const errorCode = ZSTD_compressBegin_advanced(cctx, dictContent, dictSize, params, 0);
+            if (ZSTD_isError(errorCode)) {
+                ZSTD_free(dictContent, customMem);
+                ZSTD_free(cdict, customMem);
+                ZSTD_free(cctx, customMem);
+                return NULL;
+        }   }
+
+        cdict->dictContent = dictContent;
+        cdict->dictContentSize = dictSize;
+        cdict->refContext = cctx;
+        return cdict;
+    }
+}
+
+ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
+    params.fParams.contentSizeFlag = 1;
+    return ZSTD_createCDict_advanced(dict, dictSize, params, allocator);
+}
+
+size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
+{
+    if (cdict==NULL) return 0;   /* support free on NULL */
+    {   ZSTD_customMem const cMem = cdict->refContext->customMem;
+        ZSTD_freeCCtx(cdict->refContext);
+        ZSTD_free(cdict->dictContent, cMem);
+        ZSTD_free(cdict, cMem);
+        return 0;
+    }
+}
+
+static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) {
+    return ZSTD_getParamsFromCCtx(cdict->refContext);
+}
+
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, U64 pledgedSrcSize)
+{
+    if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
+    else CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, cdict->refContext->params, pledgedSrcSize));
+    return 0;
+}
+
+/*! ZSTD_compress_usingCDict() :
+*   Compression using a digested Dictionary.
+*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+*   Note that compression level is decided during dictionary creation */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+                                void* dst, size_t dstCapacity,
+                                const void* src, size_t srcSize,
+                                const ZSTD_CDict* cdict)
+{
+    CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
+
+    if (cdict->refContext->params.fParams.contentSizeFlag==1) {
+        cctx->params.fParams.contentSizeFlag = 1;
+        cctx->frameContentSize = srcSize;
+    }
+
+    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+
+
+/* ******************************************************************
+*  Streaming
+********************************************************************/
+
+typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
+
+struct ZSTD_CStream_s {
+    ZSTD_CCtx* cctx;
+    ZSTD_CDict* cdictLocal;
+    const ZSTD_CDict* cdict;
+    char*  inBuff;
+    size_t inBuffSize;
+    size_t inToCompress;
+    size_t inBuffPos;
+    size_t inBuffTarget;
+    size_t blockSize;
+    char*  outBuff;
+    size_t outBuffSize;
+    size_t outBuffContentSize;
+    size_t outBuffFlushedSize;
+    ZSTD_cStreamStage stage;
+    U32    checksum;
+    U32    frameEnded;
+    U64    pledgedSrcSize;
+    U64    inputProcessed;
+    ZSTD_parameters params;
+    ZSTD_customMem customMem;
+};   /* typedef'd to ZSTD_CStream within "zstd.h" */
+
+ZSTD_CStream* ZSTD_createCStream(void)
+{
+    return ZSTD_createCStream_advanced(defaultCustomMem);
+}
+
+ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
+{
+    ZSTD_CStream* zcs;
+
+    if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
+    if (!customMem.customAlloc || !customMem.customFree) return NULL;
+
+    zcs = (ZSTD_CStream*)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
+    if (zcs==NULL) return NULL;
+    memset(zcs, 0, sizeof(ZSTD_CStream));
+    memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
+    zcs->cctx = ZSTD_createCCtx_advanced(customMem);
+    if (zcs->cctx == NULL) { ZSTD_freeCStream(zcs); return NULL; }
+    return zcs;
+}
+
+size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
+{
+    if (zcs==NULL) return 0;   /* support free on NULL */
+    {   ZSTD_customMem const cMem = zcs->customMem;
+        ZSTD_freeCCtx(zcs->cctx);
+        ZSTD_freeCDict(zcs->cdictLocal);
+        ZSTD_free(zcs->inBuff, cMem);
+        ZSTD_free(zcs->outBuff, cMem);
+        ZSTD_free(zcs, cMem);
+        return 0;
+    }
+}
+
+
+/*======   Initialization   ======*/
+
+size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
+size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; }
+
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
+{
+    if (zcs->inBuffSize==0) return ERROR(stage_wrong);   /* zcs has not been init at least once */
+
+    if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
+    else CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
+
+    zcs->inToCompress = 0;
+    zcs->inBuffPos = 0;
+    zcs->inBuffTarget = zcs->blockSize;
+    zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+    zcs->stage = zcss_load;
+    zcs->frameEnded = 0;
+    zcs->pledgedSrcSize = pledgedSrcSize;
+    zcs->inputProcessed = 0;
+    return 0;   /* ready to go */
+}
+
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+                                 const void* dict, size_t dictSize,
+                                 ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    /* allocate buffers */
+    {   size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
+        if (zcs->inBuffSize < neededInBuffSize) {
+            zcs->inBuffSize = neededInBuffSize;
+            ZSTD_free(zcs->inBuff, zcs->customMem);
+            zcs->inBuff = (char*) ZSTD_malloc(neededInBuffSize, zcs->customMem);
+            if (zcs->inBuff == NULL) return ERROR(memory_allocation);
+        }
+        zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
+    }
+    if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize)+1) {
+        zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize)+1;
+        ZSTD_free(zcs->outBuff, zcs->customMem);
+        zcs->outBuff = (char*) ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
+        if (zcs->outBuff == NULL) return ERROR(memory_allocation);
+    }
+
+    if (dict) {
+        ZSTD_freeCDict(zcs->cdictLocal);
+        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, params, zcs->customMem);
+        if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
+        zcs->cdict = zcs->cdictLocal;
+    } else zcs->cdict = NULL;
+
+    zcs->checksum = params.fParams.checksumFlag > 0;
+    zcs->params = params;
+
+    return ZSTD_resetCStream(zcs, pledgedSrcSize);
+}
+
+/* note : cdict must outlive compression session */
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
+{
+    ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
+    size_t const initError =  ZSTD_initCStream_advanced(zcs, NULL, 0, params, 0);
+    zcs->cdict = cdict;
+    return initError;
+}
+
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
+    return ZSTD_initCStream_advanced(zcs, dict, dictSize, params, 0);
+}
+
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize)
+{
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
+    return ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
+}
+
+size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
+{
+    return ZSTD_initCStream_usingDict(zcs, NULL, 0, compressionLevel);
+}
+
+size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
+{
+    if (zcs==NULL) return 0;   /* support sizeof on NULL */
+    return sizeof(zcs) + ZSTD_sizeof_CCtx(zcs->cctx) + ZSTD_sizeof_CDict(zcs->cdictLocal) + zcs->outBuffSize + zcs->inBuffSize;
+}
+
+/*======   Compression   ======*/
+
+typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    size_t const length = MIN(dstCapacity, srcSize);
+    memcpy(dst, src, length);
+    return length;
+}
+
+static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+                              void* dst, size_t* dstCapacityPtr,
+                        const void* src, size_t* srcSizePtr,
+                              ZSTD_flush_e const flush)
+{
+    U32 someMoreWork = 1;
+    const char* const istart = (const char*)src;
+    const char* const iend = istart + *srcSizePtr;
+    const char* ip = istart;
+    char* const ostart = (char*)dst;
+    char* const oend = ostart + *dstCapacityPtr;
+    char* op = ostart;
+
+    while (someMoreWork) {
+        switch(zcs->stage)
+        {
+        case zcss_init: return ERROR(init_missing);   /* call ZBUFF_compressInit() first ! */
+
+        case zcss_load:
+            /* complete inBuffer */
+            {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+                size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip);
+                zcs->inBuffPos += loaded;
+                ip += loaded;
+                if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) {
+                    someMoreWork = 0; break;  /* not enough input to get a full block : stop there, wait for more */
+            }   }
+            /* compress current block (note : this stage cannot be stopped in the middle) */
+            {   void* cDst;
+                size_t cSize;
+                size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
+                size_t oSize = oend-op;
+                if (oSize >= ZSTD_compressBound(iSize))
+                    cDst = op;   /* compress directly into output buffer (avoid flush stage) */
+                else
+                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;
+                cSize = (flush == zsf_end) ?
+                        ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) :
+                        ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
+                if (ZSTD_isError(cSize)) return cSize;
+                if (flush == zsf_end) zcs->frameEnded = 1;
+                /* prepare next block */
+                zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+                if (zcs->inBuffTarget > zcs->inBuffSize)
+                    zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;   /* note : inBuffSize >= blockSize */
+                zcs->inToCompress = zcs->inBuffPos;
+                if (cDst == op) { op += cSize; break; }   /* no need to flush */
+                zcs->outBuffContentSize = cSize;
+                zcs->outBuffFlushedSize = 0;
+                zcs->stage = zcss_flush;   /* pass-through to flush stage */
+            }
+
+        case zcss_flush:
+            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+                size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+                op += flushed;
+                zcs->outBuffFlushedSize += flushed;
+                if (toFlush!=flushed) { someMoreWork = 0; break; }  /* dst too small to store flushed data : stop there */
+                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+                zcs->stage = zcss_load;
+                break;
+            }
+
+        case zcss_final:
+            someMoreWork = 0;   /* do nothing */
+            break;
+
+        default:
+            return ERROR(GENERIC);   /* impossible */
+        }
+    }
+
+    *srcSizePtr = ip - istart;
+    *dstCapacityPtr = op - ostart;
+    zcs->inputProcessed += *srcSizePtr;
+    if (zcs->frameEnded) return 0;
+    {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
+        if (hintInSize==0) hintInSize = zcs->blockSize;
+        return hintInSize;
+    }
+}
+
+size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    size_t sizeRead = input->size - input->pos;
+    size_t sizeWritten = output->size - output->pos;
+    size_t const result = ZSTD_compressStream_generic(zcs,
+                                                      (char*)(output->dst) + output->pos, &sizeWritten,
+                                                      (const char*)(input->src) + input->pos, &sizeRead, zsf_gather);
+    input->pos += sizeRead;
+    output->pos += sizeWritten;
+    return result;
+}
+
+
+/*======   Finalize   ======*/
+
+/*! ZSTD_flushStream() :
+*   @return : amount of data remaining to flush */
+size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+    size_t srcSize = 0;
+    size_t sizeWritten = output->size - output->pos;
+    size_t const result = ZSTD_compressStream_generic(zcs,
+                                                     (char*)(output->dst) + output->pos, &sizeWritten,
+                                                     &srcSize, &srcSize, /* use a valid src address instead of NULL */
+                                                      zsf_flush);
+    output->pos += sizeWritten;
+    if (ZSTD_isError(result)) return result;
+    return zcs->outBuffContentSize - zcs->outBuffFlushedSize;   /* remaining to flush */
+}
+
+
+size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+    BYTE* const ostart = (BYTE*)(output->dst) + output->pos;
+    BYTE* const oend = (BYTE*)(output->dst) + output->size;
+    BYTE* op = ostart;
+
+    if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
+        return ERROR(srcSize_wrong);   /* pledgedSrcSize not respected */
+
+    if (zcs->stage != zcss_final) {
+        /* flush whatever remains */
+        size_t srcSize = 0;
+        size_t sizeWritten = output->size - output->pos;
+        size_t const notEnded = ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end);  /* use a valid src address instead of NULL */
+        size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+        op += sizeWritten;
+        if (remainingToFlush) {
+            output->pos += sizeWritten;
+            return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
+        }
+        /* create epilogue */
+        zcs->stage = zcss_final;
+        zcs->outBuffContentSize = !notEnded ? 0 :
+            ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, 0);  /* write epilogue, including final empty block, into outBuff */
+    }
+
+    /* flush epilogue */
+    {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+        size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+        op += flushed;
+        zcs->outBuffFlushedSize += flushed;
+        output->pos += op-ostart;
+        if (toFlush==flushed) zcs->stage = zcss_init;  /* end reached */
+        return toFlush - flushed;
+    }
+}
+
+
+
+/*-=====  Pre-defined compression levels  =====-*/
+
+#define ZSTD_DEFAULT_CLEVEL 1
+#define ZSTD_MAX_CLEVEL     22
+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+
+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
+{   /* "default" */
+    /* W,  C,  H,  S,  L, TL, strat */
+    { 18, 12, 12,  1,  7, 16, ZSTD_fast    },  /* level  0 - never used */
+    { 19, 13, 14,  1,  7, 16, ZSTD_fast    },  /* level  1 */
+    { 19, 15, 16,  1,  6, 16, ZSTD_fast    },  /* level  2 */
+    { 20, 16, 17,  1,  5, 16, ZSTD_dfast   },  /* level  3.*/
+    { 20, 18, 18,  1,  5, 16, ZSTD_dfast   },  /* level  4.*/
+    { 20, 15, 18,  3,  5, 16, ZSTD_greedy  },  /* level  5 */
+    { 21, 16, 19,  2,  5, 16, ZSTD_lazy    },  /* level  6 */
+    { 21, 17, 20,  3,  5, 16, ZSTD_lazy    },  /* level  7 */
+    { 21, 18, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
+    { 21, 20, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  9 */
+    { 21, 19, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
+    { 22, 20, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
+    { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
+    { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 13 */
+    { 22, 21, 22,  6,  5, 16, ZSTD_lazy2   },  /* level 14 */
+    { 22, 21, 21,  5,  5, 16, ZSTD_btlazy2 },  /* level 15 */
+    { 23, 22, 22,  5,  5, 16, ZSTD_btlazy2 },  /* level 16 */
+    { 23, 21, 22,  4,  5, 24, ZSTD_btopt   },  /* level 17 */
+    { 23, 23, 22,  6,  5, 32, ZSTD_btopt   },  /* level 18 */
+    { 23, 23, 22,  6,  3, 48, ZSTD_btopt   },  /* level 19 */
+    { 25, 25, 23,  7,  3, 64, ZSTD_btopt2  },  /* level 20 */
+    { 26, 26, 23,  7,  3,256, ZSTD_btopt2  },  /* level 21 */
+    { 27, 27, 25,  9,  3,512, ZSTD_btopt2  },  /* level 22 */
+},
+{   /* for srcSize <= 256 KB */
+    /* W,  C,  H,  S,  L,  T, strat */
+    {  0,  0,  0,  0,  0,  0, ZSTD_fast    },  /* level  0 - not used */
+    { 18, 13, 14,  1,  6,  8, ZSTD_fast    },  /* level  1 */
+    { 18, 14, 13,  1,  5,  8, ZSTD_dfast   },  /* level  2 */
+    { 18, 16, 15,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
+    { 18, 15, 17,  1,  5,  8, ZSTD_greedy  },  /* level  4.*/
+    { 18, 16, 17,  4,  5,  8, ZSTD_greedy  },  /* level  5.*/
+    { 18, 16, 17,  3,  5,  8, ZSTD_lazy    },  /* level  6.*/
+    { 18, 17, 17,  4,  4,  8, ZSTD_lazy    },  /* level  7 */
+    { 18, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
+    { 18, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
+    { 18, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
+    { 18, 18, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 11.*/
+    { 18, 18, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 12.*/
+    { 18, 19, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13 */
+    { 18, 18, 18,  4,  4, 16, ZSTD_btopt   },  /* level 14.*/
+    { 18, 18, 18,  4,  3, 16, ZSTD_btopt   },  /* level 15.*/
+    { 18, 19, 18,  6,  3, 32, ZSTD_btopt   },  /* level 16.*/
+    { 18, 19, 18,  8,  3, 64, ZSTD_btopt   },  /* level 17.*/
+    { 18, 19, 18,  9,  3,128, ZSTD_btopt   },  /* level 18.*/
+    { 18, 19, 18, 10,  3,256, ZSTD_btopt   },  /* level 19.*/
+    { 18, 19, 18, 11,  3,512, ZSTD_btopt2  },  /* level 20.*/
+    { 18, 19, 18, 12,  3,512, ZSTD_btopt2  },  /* level 21.*/
+    { 18, 19, 18, 13,  3,512, ZSTD_btopt2  },  /* level 22.*/
+},
+{   /* for srcSize <= 128 KB */
+    /* W,  C,  H,  S,  L,  T, strat */
+    { 17, 12, 12,  1,  7,  8, ZSTD_fast    },  /* level  0 - not used */
+    { 17, 12, 13,  1,  6,  8, ZSTD_fast    },  /* level  1 */
+    { 17, 13, 16,  1,  5,  8, ZSTD_fast    },  /* level  2 */
+    { 17, 16, 16,  2,  5,  8, ZSTD_dfast   },  /* level  3 */
+    { 17, 13, 15,  3,  4,  8, ZSTD_greedy  },  /* level  4 */
+    { 17, 15, 17,  4,  4,  8, ZSTD_greedy  },  /* level  5 */
+    { 17, 16, 17,  3,  4,  8, ZSTD_lazy    },  /* level  6 */
+    { 17, 15, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  7 */
+    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
+    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
+    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
+    { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
+    { 17, 17, 17,  8,  4,  8, ZSTD_lazy2   },  /* level 12 */
+    { 17, 18, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13.*/
+    { 17, 17, 17,  7,  3,  8, ZSTD_btopt   },  /* level 14.*/
+    { 17, 17, 17,  7,  3, 16, ZSTD_btopt   },  /* level 15.*/
+    { 17, 18, 17,  7,  3, 32, ZSTD_btopt   },  /* level 16.*/
+    { 17, 18, 17,  7,  3, 64, ZSTD_btopt   },  /* level 17.*/
+    { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 18.*/
+    { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 19.*/
+    { 17, 18, 17,  9,  3,256, ZSTD_btopt2  },  /* level 20.*/
+    { 17, 18, 17, 10,  3,256, ZSTD_btopt2  },  /* level 21.*/
+    { 17, 18, 17, 11,  3,512, ZSTD_btopt2  },  /* level 22.*/
+},
+{   /* for srcSize <= 16 KB */
+    /* W,  C,  H,  S,  L,  T, strat */
+    { 14, 12, 12,  1,  7,  6, ZSTD_fast    },  /* level  0 - not used */
+    { 14, 14, 14,  1,  6,  6, ZSTD_fast    },  /* level  1 */
+    { 14, 14, 14,  1,  4,  6, ZSTD_fast    },  /* level  2 */
+    { 14, 14, 14,  1,  4,  6, ZSTD_dfast   },  /* level  3.*/
+    { 14, 14, 14,  4,  4,  6, ZSTD_greedy  },  /* level  4.*/
+    { 14, 14, 14,  3,  4,  6, ZSTD_lazy    },  /* level  5.*/
+    { 14, 14, 14,  4,  4,  6, ZSTD_lazy2   },  /* level  6 */
+    { 14, 14, 14,  5,  4,  6, ZSTD_lazy2   },  /* level  7 */
+    { 14, 14, 14,  6,  4,  6, ZSTD_lazy2   },  /* level  8.*/
+    { 14, 15, 14,  6,  4,  6, ZSTD_btlazy2 },  /* level  9.*/
+    { 14, 15, 14,  3,  3,  6, ZSTD_btopt   },  /* level 10.*/
+    { 14, 15, 14,  6,  3,  8, ZSTD_btopt   },  /* level 11.*/
+    { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
+    { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
+    { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
+    { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
+    { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
+    { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
+    { 14, 15, 15,  6,  3,256, ZSTD_btopt   },  /* level 18.*/
+    { 14, 15, 15,  7,  3,256, ZSTD_btopt   },  /* level 19.*/
+    { 14, 15, 15,  8,  3,256, ZSTD_btopt2  },  /* level 20.*/
+    { 14, 15, 15,  9,  3,256, ZSTD_btopt2  },  /* level 21.*/
+    { 14, 15, 15, 10,  3,256, ZSTD_btopt2  },  /* level 22.*/
+},
+};
+
+/*! ZSTD_getCParams() :
+*   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
+*   Size values are optional, provide 0 if not known or unused */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
+{
+    ZSTD_compressionParameters cp;
+    size_t const addedSize = srcSize ? 0 : 500;
+    U64 const rSize = srcSize+dictSize ? srcSize+dictSize+addedSize : (U64)-1;
+    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
+    if (compressionLevel <= 0) compressionLevel = ZSTD_DEFAULT_CLEVEL;   /* 0 == default; no negative compressionLevel yet */
+    if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
+    cp = ZSTD_defaultCParameters[tableID][compressionLevel];
+    if (MEM_32bits()) {   /* auto-correction, for 32-bits mode */
+        if (cp.windowLog > ZSTD_WINDOWLOG_MAX) cp.windowLog = ZSTD_WINDOWLOG_MAX;
+        if (cp.chainLog > ZSTD_CHAINLOG_MAX) cp.chainLog = ZSTD_CHAINLOG_MAX;
+        if (cp.hashLog > ZSTD_HASHLOG_MAX) cp.hashLog = ZSTD_HASHLOG_MAX;
+    }
+    cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
+    return cp;
+}
+
+/*! ZSTD_getParams() :
+*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
+*   All fields of `ZSTD_frameParameters` are set to default (0) */
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) {
+    ZSTD_parameters params;
+    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
+    memset(&params, 0, sizeof(params));
+    params.cParams = cParams;
+    return params;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstd_opt.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,919 @@
+/**
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+/* Note : this file is intended to be included within zstd_compress.c */
+
+
+#ifndef ZSTD_OPT_H_91842398743
+#define ZSTD_OPT_H_91842398743
+
+
+#define ZSTD_LITFREQ_ADD    2
+#define ZSTD_FREQ_DIV       4
+#define ZSTD_MAX_PRICE      (1<<30)
+
+/*-*************************************
+*  Price functions for optimal parser
+***************************************/
+FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t* ssPtr)
+{
+    ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum+1);
+    ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum+1);
+    ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum+1);
+    ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum+1);
+    ssPtr->factor = 1 + ((ssPtr->litSum>>5) / ssPtr->litLengthSum) + ((ssPtr->litSum<<1) / (ssPtr->litSum + ssPtr->matchSum));
+}
+
+
+MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t srcSize)
+{
+    unsigned u;
+
+    ssPtr->cachedLiterals = NULL;
+    ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
+    ssPtr->staticPrices = 0; 
+
+    if (ssPtr->litLengthSum == 0) {
+        if (srcSize <= 1024) ssPtr->staticPrices = 1;
+
+        for (u=0; u<=MaxLit; u++)
+            ssPtr->litFreq[u] = 0;
+        for (u=0; u<srcSize; u++)
+            ssPtr->litFreq[src[u]]++;
+
+        ssPtr->litSum = 0;
+        ssPtr->litLengthSum = MaxLL+1;
+        ssPtr->matchLengthSum = MaxML+1;
+        ssPtr->offCodeSum = (MaxOff+1);
+        ssPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
+
+        for (u=0; u<=MaxLit; u++) {
+            ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV);
+            ssPtr->litSum += ssPtr->litFreq[u]; 
+        }
+        for (u=0; u<=MaxLL; u++)
+            ssPtr->litLengthFreq[u] = 1;
+        for (u=0; u<=MaxML; u++)
+            ssPtr->matchLengthFreq[u] = 1;
+        for (u=0; u<=MaxOff; u++)
+            ssPtr->offCodeFreq[u] = 1;
+    } else {
+        ssPtr->matchLengthSum = 0;
+        ssPtr->litLengthSum = 0;
+        ssPtr->offCodeSum = 0;
+        ssPtr->matchSum = 0;
+        ssPtr->litSum = 0;
+
+        for (u=0; u<=MaxLit; u++) {
+            ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
+            ssPtr->litSum += ssPtr->litFreq[u];
+        }
+        for (u=0; u<=MaxLL; u++) {
+            ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
+            ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
+        }
+        for (u=0; u<=MaxML; u++) {
+            ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
+            ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
+            ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
+        }
+        ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
+        for (u=0; u<=MaxOff; u++) {
+            ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
+            ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
+        }
+    }
+
+    ZSTD_setLog2Prices(ssPtr);
+}
+
+
+FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BYTE* literals)
+{
+    U32 price, u;
+
+    if (ssPtr->staticPrices)
+        return ZSTD_highbit32((U32)litLength+1) + (litLength*6);
+
+    if (litLength == 0)
+        return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0]+1);
+
+    /* literals */
+    if (ssPtr->cachedLiterals == literals) {
+        U32 const additional = litLength - ssPtr->cachedLitLength;
+        const BYTE* literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
+        price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
+        for (u=0; u < additional; u++)
+            price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]]+1);
+        ssPtr->cachedPrice = price;
+        ssPtr->cachedLitLength = litLength;
+    } else {
+        price = litLength * ssPtr->log2litSum;
+        for (u=0; u < litLength; u++)
+            price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]]+1);
+
+        if (litLength >= 12) {
+            ssPtr->cachedLiterals = literals;
+            ssPtr->cachedPrice = price;
+            ssPtr->cachedLitLength = litLength;
+        }
+    }
+
+    /* literal Length */
+    {   const BYTE LL_deltaCode = 19;
+        const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+        price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1);
+    }
+
+    return price;
+}
+
+
+FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
+{
+    /* offset */
+    U32 price;
+    BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
+
+    if (seqStorePtr->staticPrices)
+        return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
+
+    price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1);
+    if (!ultra && offCode >= 20) price += (offCode-19)*2;
+
+    /* match Length */
+    {   const BYTE ML_deltaCode = 36;
+        const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
+        price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1);
+    }
+
+    return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
+}
+
+
+MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
+{
+    U32 u;
+
+    /* literals */
+    seqStorePtr->litSum += litLength*ZSTD_LITFREQ_ADD;
+    for (u=0; u < litLength; u++)
+        seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+
+    /* literal Length */
+    {   const BYTE LL_deltaCode = 19;
+        const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+        seqStorePtr->litLengthFreq[llCode]++;
+        seqStorePtr->litLengthSum++;
+    }
+
+    /* match offset */
+	{   BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
+		seqStorePtr->offCodeSum++;
+		seqStorePtr->offCodeFreq[offCode]++;
+	}
+
+    /* match Length */
+    {   const BYTE ML_deltaCode = 36;
+        const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
+        seqStorePtr->matchLengthFreq[mlCode]++;
+        seqStorePtr->matchLengthSum++;
+    }
+
+    ZSTD_setLog2Prices(seqStorePtr);
+}
+
+
+#define SET_PRICE(pos, mlen_, offset_, litlen_, price_)   \
+    {                                                 \
+        while (last_pos < pos)  { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \
+        opt[pos].mlen = mlen_;                         \
+        opt[pos].off = offset_;                        \
+        opt[pos].litlen = litlen_;                     \
+        opt[pos].price = price_;                       \
+    }
+
+
+
+/* Update hashTable3 up to ip (excluded)
+   Assumption : always within prefix (ie. not within extDict) */
+FORCE_INLINE
+U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip)
+{
+    U32* const hashTable3  = zc->hashTable3;
+    U32 const hashLog3  = zc->hashLog3;
+    const BYTE* const base = zc->base;
+    U32 idx = zc->nextToUpdate3;
+    const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
+    const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+
+    while(idx < target) {
+        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
+        idx++;
+    }
+
+    return hashTable3[hash3];
+}
+
+
+/*-*************************************
+*  Binary Tree search
+***************************************/
+static U32 ZSTD_insertBtAndGetAllMatches (
+                        ZSTD_CCtx* zc,
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        U32 nbCompares, const U32 mls,
+                        U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+    const BYTE* const base = zc->base;
+    const U32 current = (U32)(ip-base);
+    const U32 hashLog = zc->params.cParams.hashLog;
+    const size_t h  = ZSTD_hashPtr(ip, hashLog, mls);
+    U32* const hashTable = zc->hashTable;
+    U32 matchIndex  = hashTable[h];
+    U32* const bt   = zc->chainTable;
+    const U32 btLog = zc->params.cParams.chainLog - 1;
+    const U32 btMask= (1U << btLog) - 1;
+    size_t commonLengthSmaller=0, commonLengthLarger=0;
+    const BYTE* const dictBase = zc->dictBase;
+    const U32 dictLimit = zc->dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const U32 btLow = btMask >= current ? 0 : current - btMask;
+    const U32 windowLow = zc->lowLimit;
+    U32* smallerPtr = bt + 2*(current&btMask);
+    U32* largerPtr  = bt + 2*(current&btMask) + 1;
+    U32 matchEndIdx = current+8;
+    U32 dummy32;   /* to be nullified at the end */
+    U32 mnum = 0;
+
+    const U32 minMatch = (mls == 3) ? 3 : 4;
+    size_t bestLength = minMatchLen-1;
+
+    if (minMatch == 3) { /* HC3 match finder */
+        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip);
+        if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) {
+            const BYTE* match;
+            size_t currentMl=0;
+            if ((!extDict) || matchIndex3 >= dictLimit) {
+                match = base + matchIndex3;
+                if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit);
+            } else {
+                match = dictBase + matchIndex3;
+                if (MEM_readMINMATCH(match, MINMATCH) == MEM_readMINMATCH(ip, MINMATCH))    /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
+                    currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
+            }
+
+            /* save best solution */
+            if (currentMl > bestLength) {
+                bestLength = currentMl;
+                matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3;
+                matches[mnum].len = (U32)currentMl;
+                mnum++;
+                if (currentMl > ZSTD_OPT_NUM) goto update;
+                if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/
+            }
+        }
+    }
+
+    hashTable[h] = current;   /* Update Hash Table */
+
+    while (nbCompares-- && (matchIndex > windowLow)) {
+        U32* nextPtr = bt + 2*(matchIndex & btMask);
+        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+        const BYTE* match;
+
+        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+            match = base + matchIndex;
+            if (match[matchLength] == ip[matchLength]) {
+                matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1;
+            }
+        } else {
+            match = dictBase + matchIndex;
+            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
+            if (matchIndex+matchLength >= dictLimit)
+                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
+        }
+
+        if (matchLength > bestLength) {
+            if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength;
+            bestLength = matchLength;
+            matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex;
+            matches[mnum].len = (U32)matchLength;
+            mnum++;
+            if (matchLength > ZSTD_OPT_NUM) break;
+            if (ip+matchLength == iLimit)   /* equal : no way to know if inf or sup */
+                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
+        }
+
+        if (match[matchLength] < ip[matchLength]) {
+            /* match is smaller than current */
+            *smallerPtr = matchIndex;             /* update smaller idx */
+            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
+            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+        } else {
+            /* match is larger than current */
+            *largerPtr = matchIndex;
+            commonLengthLarger = matchLength;
+            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            largerPtr = nextPtr;
+            matchIndex = nextPtr[0];
+    }   }
+
+    *smallerPtr = *largerPtr = 0;
+
+update:
+    zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
+    return mnum;
+}
+
+
+/** Tree updater, providing best match */
+static U32 ZSTD_BtGetAllMatches (
+                        ZSTD_CCtx* zc,
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+    if (ip < zc->base + zc->nextToUpdate) return 0;   /* skipped area */
+    ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
+    return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
+}
+
+
+static U32 ZSTD_BtGetAllMatches_selectMLS (
+                        ZSTD_CCtx* zc,   /* Index table will be updated */
+                        const BYTE* ip, const BYTE* const iHighLimit,
+                        const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+    switch(matchLengthSearch)
+    {
+    case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
+    default :
+    case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
+    case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
+    case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
+    }
+}
+
+/** Tree updater, providing best match */
+static U32 ZSTD_BtGetAllMatches_extDict (
+                        ZSTD_CCtx* zc,
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+    if (ip < zc->base + zc->nextToUpdate) return 0;   /* skipped area */
+    ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
+    return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
+}
+
+
+static U32 ZSTD_BtGetAllMatches_selectMLS_extDict (
+                        ZSTD_CCtx* zc,   /* Index table will be updated */
+                        const BYTE* ip, const BYTE* const iHighLimit,
+                        const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
+{
+    switch(matchLengthSearch)
+    {
+    case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
+    default :
+    case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
+    case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
+    case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
+    }
+}
+
+
+/*-*******************************
+*  Optimal parser
+*********************************/
+FORCE_INLINE
+void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
+                                    const void* src, size_t srcSize, const int ultra)
+{
+    seqStore_t* seqStorePtr = &(ctx->seqStore);
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    const BYTE* const base = ctx->base;
+    const BYTE* const prefixStart = base + ctx->dictLimit;
+
+    const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
+    const U32 sufficient_len = ctx->params.cParams.targetLength;
+    const U32 mls = ctx->params.cParams.searchLength;
+    const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
+
+    ZSTD_optimal_t* opt = seqStorePtr->priceTable;
+    ZSTD_match_t* matches = seqStorePtr->matchTable;
+    const BYTE* inr;
+    U32 offset, rep[ZSTD_REP_NUM];
+
+    /* init */
+    ctx->nextToUpdate3 = ctx->nextToUpdate;
+    ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
+    ip += (ip==prefixStart);
+    { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
+
+    /* Match Loop */
+    while (ip < ilimit) {
+        U32 cur, match_num, last_pos, litlen, price;
+        U32 u, mlen, best_mlen, best_off, litLength;
+        memset(opt, 0, sizeof(ZSTD_optimal_t));
+        last_pos = 0;
+        litlen = (U32)(ip - anchor);
+
+        /* check repCode */
+        {   U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
+            for (i=(ip == anchor); i<last_i; i++) {
+                const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
+                if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart))
+                    && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - repCur, minMatch))) {
+                    mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch;
+                    if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
+                        best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
+                        goto _storeSequence;
+                    }
+                    best_off = i - (ip == anchor);
+                    do {
+                        price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+                        if (mlen > last_pos || price < opt[mlen].price)
+                            SET_PRICE(mlen, mlen, i, litlen, price);   /* note : macro modifies last_pos */
+                        mlen--;
+                    } while (mlen >= minMatch);
+        }   }   }
+
+        match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
+
+        if (!last_pos && !match_num) { ip++; continue; }
+
+        if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
+            best_mlen = matches[match_num-1].len;
+            best_off = matches[match_num-1].off;
+            cur = 0;
+            last_pos = 1;
+            goto _storeSequence;
+        }
+
+        /* set prices using matches at position = 0 */
+        best_mlen = (last_pos) ? last_pos : minMatch;
+        for (u = 0; u < match_num; u++) {
+            mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
+            best_mlen = matches[u].len;
+            while (mlen <= best_mlen) {
+                price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
+                if (mlen > last_pos || price < opt[mlen].price)
+                    SET_PRICE(mlen, mlen, matches[u].off, litlen, price);   /* note : macro modifies last_pos */
+                mlen++;
+        }   }
+
+        if (last_pos < minMatch) { ip++; continue; }
+
+        /* initialize opt[0] */
+        { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+        opt[0].mlen = 1;
+        opt[0].litlen = litlen;
+
+         /* check further positions */
+        for (cur = 1; cur <= last_pos; cur++) {
+           inr = ip + cur;
+
+           if (opt[cur-1].mlen == 1) {
+                litlen = opt[cur-1].litlen + 1;
+                if (cur > litlen) {
+                    price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen);
+                } else
+                    price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
+           } else {
+                litlen = 1;
+                price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1);
+           }
+
+           if (cur > last_pos || price <= opt[cur].price)
+                SET_PRICE(cur, 1, 0, litlen, price);
+
+           if (cur == last_pos) break;
+
+           if (inr > ilimit)  /* last match must start at a minimum distance of 8 from oend */
+               continue;
+
+           mlen = opt[cur].mlen;
+           if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
+                opt[cur].rep[2] = opt[cur-mlen].rep[1];
+                opt[cur].rep[1] = opt[cur-mlen].rep[0];
+                opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
+           } else {
+                opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
+                opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
+                opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
+           }
+
+            best_mlen = minMatch;
+            {   U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
+                for (i=(opt[cur].mlen != 1); i<last_i; i++) {  /* check rep */
+                    const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
+                    if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart))
+                       && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - repCur, minMatch))) {
+                       mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch;
+
+                       if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
+                            best_mlen = mlen; best_off = i; last_pos = cur + 1;
+                            goto _storeSequence;
+                       }
+
+                       best_off = i - (opt[cur].mlen != 1);
+                       if (mlen > best_mlen) best_mlen = mlen;
+
+                       do {
+                           if (opt[cur].mlen == 1) {
+                                litlen = opt[cur].litlen;
+                                if (cur > litlen) {
+                                    price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
+                                } else
+                                    price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+                            } else {
+                                litlen = 0;
+                                price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
+                            }
+
+                            if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
+                                SET_PRICE(cur + mlen, mlen, i, litlen, price);
+                            mlen--;
+                        } while (mlen >= minMatch);
+            }   }   }
+
+            match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
+
+            if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
+                best_mlen = matches[match_num-1].len;
+                best_off = matches[match_num-1].off;
+                last_pos = cur + 1;
+                goto _storeSequence;
+            }
+
+            /* set prices using matches at position = cur */
+            for (u = 0; u < match_num; u++) {
+                mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
+                best_mlen = matches[u].len;
+
+                while (mlen <= best_mlen) {
+                    if (opt[cur].mlen == 1) {
+                        litlen = opt[cur].litlen;
+                        if (cur > litlen)
+                            price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
+                        else
+                            price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
+                    } else {
+                        litlen = 0;
+                        price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
+                    }
+
+                    if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
+                        SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
+
+                    mlen++;
+        }   }   }
+
+        best_mlen = opt[last_pos].mlen;
+        best_off = opt[last_pos].off;
+        cur = last_pos - best_mlen;
+
+        /* store sequence */
+_storeSequence:   /* cur, last_pos, best_mlen, best_off have to be set */
+        opt[0].mlen = 1;
+
+        while (1) {
+            mlen = opt[cur].mlen;
+            offset = opt[cur].off;
+            opt[cur].mlen = best_mlen;
+            opt[cur].off = best_off;
+            best_mlen = mlen;
+            best_off = offset;
+            if (mlen > cur) break;
+            cur -= mlen;
+        }
+
+        for (u = 0; u <= last_pos;) {
+            u += opt[u].mlen;
+        }
+
+        for (cur=0; cur < last_pos; ) {
+            mlen = opt[cur].mlen;
+            if (mlen == 1) { ip++; cur++; continue; }
+            offset = opt[cur].off;
+            cur += mlen;
+            litLength = (U32)(ip - anchor);
+
+            if (offset > ZSTD_REP_MOVE_OPT) {
+                rep[2] = rep[1];
+                rep[1] = rep[0];
+                rep[0] = offset - ZSTD_REP_MOVE_OPT;
+                offset--;
+            } else {
+                if (offset != 0) {
+                    best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
+                    if (offset != 1) rep[2] = rep[1];
+                    rep[1] = rep[0];
+                    rep[0] = best_off;
+                }
+                if (litLength==0) offset--;
+            }
+
+            ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
+            ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
+            anchor = ip = ip + mlen;
+    }    }   /* for (cur=0; cur < last_pos; ) */
+
+    /* Save reps for next block */
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; }
+
+    /* Last Literals */
+    {   size_t const lastLLSize = iend - anchor;
+        memcpy(seqStorePtr->lit, anchor, lastLLSize);
+        seqStorePtr->lit += lastLLSize;
+    }
+}
+
+
+FORCE_INLINE
+void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
+                                     const void* src, size_t srcSize, const int ultra)
+{
+    seqStore_t* seqStorePtr = &(ctx->seqStore);
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    const BYTE* const base = ctx->base;
+    const U32 lowestIndex = ctx->lowLimit;
+    const U32 dictLimit = ctx->dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const BYTE* const dictBase = ctx->dictBase;
+    const BYTE* const dictEnd  = dictBase + dictLimit;
+
+    const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
+    const U32 sufficient_len = ctx->params.cParams.targetLength;
+    const U32 mls = ctx->params.cParams.searchLength;
+    const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
+
+    ZSTD_optimal_t* opt = seqStorePtr->priceTable;
+    ZSTD_match_t* matches = seqStorePtr->matchTable;
+    const BYTE* inr;
+
+    /* init */
+    U32 offset, rep[ZSTD_REP_NUM];
+    { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
+
+    ctx->nextToUpdate3 = ctx->nextToUpdate;
+    ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
+    ip += (ip==prefixStart);
+
+    /* Match Loop */
+    while (ip < ilimit) {
+        U32 cur, match_num, last_pos, litlen, price;
+        U32 u, mlen, best_mlen, best_off, litLength;
+        U32 current = (U32)(ip-base);
+        memset(opt, 0, sizeof(ZSTD_optimal_t));
+        last_pos = 0;
+        opt[0].litlen = (U32)(ip - anchor);
+
+        /* check repCode */
+        {   U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
+            for (i = (ip==anchor); i<last_i; i++) {
+                const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
+                const U32 repIndex = (U32)(current - repCur);
+                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+                const BYTE* const repMatch = repBase + repIndex;
+                if ( (repCur > 0 && repCur <= (S32)current)
+                   && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex))  /* intentional overflow */
+                   && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) {
+                    /* repcode detected we should take it */
+                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                    mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
+
+                    if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
+                        best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
+                        goto _storeSequence;
+                    }
+
+                    best_off = i - (ip==anchor);
+                    litlen = opt[0].litlen;
+                    do {
+                        price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+                        if (mlen > last_pos || price < opt[mlen].price)
+                            SET_PRICE(mlen, mlen, i, litlen, price);   /* note : macro modifies last_pos */
+                        mlen--;
+                    } while (mlen >= minMatch);
+        }   }   }
+
+        match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch);  /* first search (depth 0) */
+
+        if (!last_pos && !match_num) { ip++; continue; }
+
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+        opt[0].mlen = 1;
+
+        if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
+            best_mlen = matches[match_num-1].len;
+            best_off = matches[match_num-1].off;
+            cur = 0;
+            last_pos = 1;
+            goto _storeSequence;
+        }
+
+        best_mlen = (last_pos) ? last_pos : minMatch;
+
+        /* set prices using matches at position = 0 */
+        for (u = 0; u < match_num; u++) {
+            mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
+            best_mlen = matches[u].len;
+            litlen = opt[0].litlen;
+            while (mlen <= best_mlen) {
+                price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
+                if (mlen > last_pos || price < opt[mlen].price)
+                    SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
+                mlen++;
+        }   }
+
+        if (last_pos < minMatch) {
+            ip++; continue;
+        }
+
+        /* check further positions */
+        for (cur = 1; cur <= last_pos; cur++) {
+            inr = ip + cur;
+
+            if (opt[cur-1].mlen == 1) {
+                litlen = opt[cur-1].litlen + 1;
+                if (cur > litlen) {
+                    price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen);
+                } else
+                    price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
+            } else {
+                litlen = 1;
+                price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1);
+            }
+
+            if (cur > last_pos || price <= opt[cur].price)
+                SET_PRICE(cur, 1, 0, litlen, price);
+
+            if (cur == last_pos) break;
+
+            if (inr > ilimit)  /* last match must start at a minimum distance of 8 from oend */
+                continue;
+
+            mlen = opt[cur].mlen;
+            if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
+                opt[cur].rep[2] = opt[cur-mlen].rep[1];
+                opt[cur].rep[1] = opt[cur-mlen].rep[0];
+                opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
+            } else {
+                opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
+                opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
+                opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
+            }
+
+            best_mlen = minMatch;
+            {   U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
+                for (i = (mlen != 1); i<last_i; i++) {
+                    const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
+                    const U32 repIndex = (U32)(current+cur - repCur);
+                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+                    const BYTE* const repMatch = repBase + repIndex;
+                    if ( (repCur > 0 && repCur <= (S32)(current+cur))
+                      && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex))  /* intentional overflow */
+                      && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) {
+                        /* repcode detected */
+                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                        mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
+
+                        if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
+                            best_mlen = mlen; best_off = i; last_pos = cur + 1;
+                            goto _storeSequence;
+                        }
+
+                        best_off = i - (opt[cur].mlen != 1);
+                        if (mlen > best_mlen) best_mlen = mlen;
+
+                        do {
+                            if (opt[cur].mlen == 1) {
+                                litlen = opt[cur].litlen;
+                                if (cur > litlen) {
+                                    price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
+                                } else
+                                    price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+                            } else {
+                                litlen = 0;
+                                price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
+                            }
+
+                            if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
+                                SET_PRICE(cur + mlen, mlen, i, litlen, price);
+                            mlen--;
+                        } while (mlen >= minMatch);
+            }   }   }
+
+            match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
+
+            if (match_num > 0 && matches[match_num-1].len > sufficient_len) {
+                best_mlen = matches[match_num-1].len;
+                best_off = matches[match_num-1].off;
+                last_pos = cur + 1;
+                goto _storeSequence;
+            }
+
+            /* set prices using matches at position = cur */
+            for (u = 0; u < match_num; u++) {
+                mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
+                best_mlen = (cur + matches[u].len < ZSTD_OPT_NUM) ? matches[u].len : ZSTD_OPT_NUM - cur;
+
+                while (mlen <= best_mlen) {
+                    if (opt[cur].mlen == 1) {
+                        litlen = opt[cur].litlen;
+                        if (cur > litlen)
+                            price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
+                        else
+                            price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
+                    } else {
+                        litlen = 0;
+                        price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
+                    }
+
+                    if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
+                        SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
+
+                    mlen++;
+        }   }   }   /* for (cur = 1; cur <= last_pos; cur++) */
+
+        best_mlen = opt[last_pos].mlen;
+        best_off = opt[last_pos].off;
+        cur = last_pos - best_mlen;
+
+        /* store sequence */
+_storeSequence:   /* cur, last_pos, best_mlen, best_off have to be set */
+        opt[0].mlen = 1;
+
+        while (1) {
+            mlen = opt[cur].mlen;
+            offset = opt[cur].off;
+            opt[cur].mlen = best_mlen;
+            opt[cur].off = best_off;
+            best_mlen = mlen;
+            best_off = offset;
+            if (mlen > cur) break;
+            cur -= mlen;
+        }
+
+        for (u = 0; u <= last_pos; ) {
+            u += opt[u].mlen;
+        }
+
+        for (cur=0; cur < last_pos; ) {
+            mlen = opt[cur].mlen;
+            if (mlen == 1) { ip++; cur++; continue; }
+            offset = opt[cur].off;
+            cur += mlen;
+            litLength = (U32)(ip - anchor);
+
+            if (offset > ZSTD_REP_MOVE_OPT) {
+                rep[2] = rep[1];
+                rep[1] = rep[0];
+                rep[0] = offset - ZSTD_REP_MOVE_OPT;
+                offset--;
+            } else {
+                if (offset != 0) {
+                    best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
+                    if (offset != 1) rep[2] = rep[1];
+                    rep[1] = rep[0];
+                    rep[0] = best_off;
+                }
+
+                if (litLength==0) offset--;
+            }
+
+            ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
+            ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
+            anchor = ip = ip + mlen;
+    }    }   /* for (cur=0; cur < last_pos; ) */
+
+    /* Save reps for next block */
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; }
+
+    /* Last Literals */
+    {   size_t lastLLSize = iend - anchor;
+        memcpy(seqStorePtr->lit, anchor, lastLLSize);
+        seqStorePtr->lit += lastLLSize;
+    }
+}
+
+#endif  /* ZSTD_OPT_H_91842398743 */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/decompress/huf_decompress.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,885 @@
+/* ******************************************************************
+   Huffman decoder, part of New Generation Entropy library
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER) || defined(__GNUC__)
+#  define inline __inline
+#else
+#  define inline /* disable inline */
+#endif
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+*  Dependencies
+****************************************************************/
+#include <string.h>     /* memcpy, memset */
+#include "bitstream.h"  /* BIT_* */
+#include "fse.h"        /* header compression */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/*-***************************/
+/*  generic DTableDesc       */
+/*-***************************/
+
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
+{
+    DTableDesc dtd;
+    memcpy(&dtd, table, sizeof(dtd));
+    return dtd;
+}
+
+
+/*-***************************/
+/*  single-symbol decoding   */
+/*-***************************/
+
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
+
+size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
+    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    U32 nbSymbols = 0;
+    size_t iSize;
+    void* const dtPtr = DTable + 1;
+    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+
+    HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+    /* memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* Table header */
+    {   DTableDesc dtd = HUF_getDTableDesc(DTable);
+        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, huffman tree cannot fit in */
+        dtd.tableType = 0;
+        dtd.tableLog = (BYTE)tableLog;
+        memcpy(DTable, &dtd, sizeof(dtd));
+    }
+
+    /* Prepare ranks */
+    {   U32 n, nextRankStart = 0;
+        for (n=1; n<tableLog+1; n++) {
+            U32 current = nextRankStart;
+            nextRankStart += (rankVal[n] << (n-1));
+            rankVal[n] = current;
+    }   }
+
+    /* fill DTable */
+    {   U32 n;
+        for (n=0; n<nbSymbols; n++) {
+            U32 const w = huffWeight[n];
+            U32 const length = (1 << w) >> 1;
+            U32 i;
+            HUF_DEltX2 D;
+            D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+            for (i = rankVal[w]; i < rankVal[w] + length; i++)
+                dt[i] = D;
+            rankVal[w] += length;
+    }   }
+
+    return iSize;
+}
+
+
+static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+    BYTE const c = dt[val].byte;
+    BIT_skipBits(Dstream, dt[val].nbBits);
+    return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 4 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) {
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    /* no more data to retrieve from bitstream, hence no need to reload */
+    while (p < pEnd)
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    return pEnd-pStart;
+}
+
+static size_t HUF_decompress1X2_usingDTable_internal(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + dstSize;
+    const void* dtPtr = DTable + 1;
+    const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+    BIT_DStream_t bitD;
+    DTableDesc const dtd = HUF_getDTableDesc(DTable);
+    U32 const dtLog = dtd.tableLog;
+
+    { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
+      if (HUF_isError(errorCode)) return errorCode; }
+
+    HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+    /* check */
+    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    return dstSize;
+}
+
+size_t HUF_decompress1X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 0) return ERROR(GENERIC);
+    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUF_decompress1X2_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUF_readDTableX2 (DCtx, cSrc, cSrcSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
+}
+
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+static size_t HUF_decompress4X2_usingDTable_internal(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    /* Check */
+    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable + 1;
+        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        size_t const length1 = MEM_readLE16(istart);
+        size_t const length2 = MEM_readLE16(istart+2);
+        size_t const length3 = MEM_readLE16(istart+4);
+        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        U32 const dtLog = dtd.tableLog;
+
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
+          if (HUF_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
+          if (HUF_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
+          if (HUF_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
+          if (HUF_isError(errorCode)) return errorCode; }
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) {
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+size_t HUF_decompress4X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 0) return ERROR(GENERIC);
+    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+
+size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUF_readDTableX2 (dctx, cSrc, cSrcSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
+}
+
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/* HUF_fillDTableX4Level2() :
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
+static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+                           const U32* rankValOrigin, const int minWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+                           U32 nbBitsBaseline, U16 baseSeq)
+{
+    HUF_DEltX4 DElt;
+    U32 rankVal[HUF_TABLELOG_MAX + 1];
+
+    /* get pre-calculated rankVal */
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1) {
+        U32 i, skipSize = rankVal[minWeight];
+        MEM_writeLE16(&(DElt.sequence), baseSeq);
+        DElt.nbBits   = (BYTE)(consumed);
+        DElt.length   = 1;
+        for (i = 0; i < skipSize; i++)
+            DTable[i] = DElt;
+    }
+
+    /* fill DTable */
+    {   U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
+            const U32 symbol = sortedSymbols[s].symbol;
+            const U32 weight = sortedSymbols[s].weight;
+            const U32 nbBits = nbBitsBaseline - weight;
+            const U32 length = 1 << (sizeLog-nbBits);
+            const U32 start = rankVal[weight];
+            U32 i = start;
+            const U32 end = start + length;
+
+            MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+            DElt.nbBits = (BYTE)(nbBits + consumed);
+            DElt.length = 2;
+            do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
+
+            rankVal[weight] += length;
+    }   }
+}
+
+typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
+
+static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
+                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32 nbBitsBaseline)
+{
+    U32 rankVal[HUF_TABLELOG_MAX + 1];
+    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+    const U32 minBits  = nbBitsBaseline - maxWeight;
+    U32 s;
+
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++) {
+        const U16 symbol = sortedList[s].symbol;
+        const U32 weight = sortedList[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 start = rankVal[weight];
+        const U32 length = 1 << (targetLog-nbBits);
+
+        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
+            U32 sortedRank;
+            int minWeight = nbBits + scaleLog;
+            if (minWeight < 1) minWeight = 1;
+            sortedRank = rankStart[minWeight];
+            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+                           rankValOrigin[nbBits], minWeight,
+                           sortedList+sortedRank, sortedListSize-sortedRank,
+                           nbBitsBaseline, symbol);
+        } else {
+            HUF_DEltX4 DElt;
+            MEM_writeLE16(&(DElt.sequence), symbol);
+            DElt.nbBits = (BYTE)(nbBits);
+            DElt.length = 1;
+            {   U32 const end = start + length;
+                U32 u;
+                for (u = start; u < end; u++) DTable[u] = DElt;
+        }   }
+        rankVal[weight] += length;
+    }
+}
+
+size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+    BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
+    sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
+    U32 rankStats[HUF_TABLELOG_MAX + 1] = { 0 };
+    U32 rankStart0[HUF_TABLELOG_MAX + 2] = { 0 };
+    U32* const rankStart = rankStart0+1;
+    rankVal_t rankVal;
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    U32 const maxTableLog = dtd.maxTableLog;
+    size_t iSize;
+    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
+    HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
+
+    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable));   /* if compilation fails here, assertion is false */
+    if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+    /* memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
+
+    /* Get start index of each weight */
+    {   U32 w, nextRankStart = 0;
+        for (w=1; w<maxW+1; w++) {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {   U32 s;
+        for (s=0; s<nbSymbols; s++) {
+            U32 const w = weightList[s];
+            U32 const r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {   U32* const rankVal0 = rankVal[0];
+        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
+            U32 nextRankVal = 0;
+            U32 w;
+            for (w=1; w<maxW+1; w++) {
+                U32 current = nextRankVal;
+                nextRankVal += rankStats[w] << (w+rescale);
+                rankVal0[w] = current;
+        }   }
+        {   U32 const minBits = tableLog+1 - maxW;
+            U32 consumed;
+            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+                U32* const rankValPtr = rankVal[consumed];
+                U32 w;
+                for (w = 1; w < maxW+1; w++) {
+                    rankValPtr[w] = rankVal0[w] >> consumed;
+    }   }   }   }
+
+    HUF_fillDTableX4(dt, maxTableLog,
+                   sortedSymbol, sizeOfSort,
+                   rankStart0, rankVal, maxW,
+                   tableLog+1);
+
+    dtd.tableLog = (BYTE)maxTableLog;
+    dtd.tableType = 1;
+    memcpy(DTable, &dtd, sizeof(dtd));
+    return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BIT_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+    else {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+            BIT_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+    }   }
+    return 1;
+}
+
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+    }
+
+    /* closer to end : up to 2 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+
+static size_t HUF_decompress1X4_usingDTable_internal(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    BIT_DStream_t bitD;
+
+    /* Init */
+    {   size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
+        if (HUF_isError(errorCode)) return errorCode;
+    }
+
+    /* decode */
+    {   BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
+        const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
+    }
+
+    /* check */
+    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    /* decoded size */
+    return dstSize;
+}
+
+size_t HUF_decompress1X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 1) return ERROR(GENERIC);
+    return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUF_decompress1X4_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUF_readDTableX4 (DCtx, cSrc, cSrcSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
+}
+
+size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+static size_t HUF_decompress4X4_usingDTable_internal(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;
+        const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        size_t const length1 = MEM_readLE16(istart);
+        size_t const length2 = MEM_readLE16(istart+2);
+        size_t const length3 = MEM_readLE16(istart+4);
+        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        size_t const segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        U32 const dtLog = dtd.tableLog;
+
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
+          if (HUF_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
+          if (HUF_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
+          if (HUF_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
+          if (HUF_isError(errorCode)) return errorCode; }
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+          if (!endCheck) return ERROR(corruption_detected); }
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+size_t HUF_decompress4X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 1) return ERROR(GENERIC);
+    return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+
+size_t HUF_decompress4X4_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUF_readDTableX4 (dctx, cSrc, cSrcSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
+}
+
+size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+                                    const void* cSrc, size_t cSrcSize,
+                                    const HUF_DTable* DTable)
+{
+    DTableDesc const dtd = HUF_getDTableDesc(DTable);
+    return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
+                           HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+                                    const void* cSrc, size_t cSrcSize,
+                                    const HUF_DTable* DTable)
+{
+    DTableDesc const dtd = HUF_getDTableDesc(DTable);
+    return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
+                           HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
+}
+
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+    /* single, double, quad */
+    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
+    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
+    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
+    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
+    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
+    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
+    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
+    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
+    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
+    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
+    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
+    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
+    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
+};
+
+/** HUF_selectDecoder() :
+*   Tells which decoder is likely to decode faster,
+*   based on a set of pre-determined metrics.
+*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+    /* decoder timing evaluation */
+    U32 const Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
+    U32 const D256 = (U32)(dstSize >> 8);
+    U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+    U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+    DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, for cache eviction */
+
+    return DTime1 < DTime0;
+}
+
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 };
+
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+    }
+}
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+    }
+}
+
+size_t HUF_decompress4X_hufOnly (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected);   /* invalid */
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+    }
+}
+
+size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,2154 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTD_HEAPMODE
+#  define ZSTD_HEAPMODE 1
+#endif
+
+/*!
+*  LEGACY_SUPPORT :
+*  if set to 1, ZSTD_decompress() can decode older formats (v0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+#  define ZSTD_LEGACY_SUPPORT 0
+#endif
+
+/*!
+*  MAXWINDOWSIZE_DEFAULT :
+*  maximum window size accepted by DStream, by default.
+*  Frames requiring more memory will be rejected.
+*/
+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
+#  define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1)   /* defined within zstd.h */
+#endif
+
+
+/*-*******************************************************
+*  Dependencies
+*********************************************************/
+#include <string.h>      /* memcpy, memmove, memset */
+#include "mem.h"         /* low level memory routines */
+#define XXH_STATIC_LINKING_ONLY   /* XXH64_state_t */
+#include "xxhash.h"      /* XXH64_* */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_internal.h"
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+#  include "zstd_legacy.h"
+#endif
+
+
+#if defined(_MSC_VER)
+#  include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+#  define ZSTD_PREFETCH(ptr)   _mm_prefetch((const char*)ptr, _MM_HINT_T0)
+#elif defined(__GNUC__)
+#  define ZSTD_PREFETCH(ptr)   __builtin_prefetch(ptr, 0, 0)
+#else
+#  define ZSTD_PREFETCH(ptr)   /* disabled */
+#endif
+
+/*-*************************************
+*  Macros
+***************************************/
+#define ZSTD_isError ERR_isError   /* for inlining */
+#define FSE_isError  ERR_isError
+#define HUF_isError  ERR_isError
+
+
+/*_*******************************************************
+*  Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+*   Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
+               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
+
+struct ZSTD_DCtx_s
+{
+    const FSE_DTable* LLTptr;
+    const FSE_DTable* MLTptr;
+    const FSE_DTable* OFTptr;
+    const HUF_DTable* HUFptr;
+    FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+    FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+    FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
+    const void* previousDstEnd;
+    const void* base;
+    const void* vBase;
+    const void* dictEnd;
+    size_t expected;
+    U32 rep[ZSTD_REP_NUM];
+    ZSTD_frameParams fParams;
+    blockType_e bType;   /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+    ZSTD_dStage stage;
+    U32 litEntropy;
+    U32 fseEntropy;
+    XXH64_state_t xxhState;
+    size_t headerSize;
+    U32 dictID;
+    const BYTE* litPtr;
+    ZSTD_customMem customMem;
+    size_t litSize;
+    size_t rleSize;
+    BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
+    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) { return (dctx==NULL) ? 0 : sizeof(ZSTD_DCtx); }
+
+size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
+
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+    dctx->expected = ZSTD_frameHeaderSize_prefix;
+    dctx->stage = ZSTDds_getFrameHeaderSize;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    dctx->vBase = NULL;
+    dctx->dictEnd = NULL;
+    dctx->hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
+    dctx->litEntropy = dctx->fseEntropy = 0;
+    dctx->dictID = 0;
+    MEM_STATIC_ASSERT(sizeof(dctx->rep) == sizeof(repStartValue));
+    memcpy(dctx->rep, repStartValue, sizeof(repStartValue));  /* initial repcodes */
+    dctx->LLTptr = dctx->LLTable;
+    dctx->MLTptr = dctx->MLTable;
+    dctx->OFTptr = dctx->OFTable;
+    dctx->HUFptr = dctx->hufTable;
+    return 0;
+}
+
+ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+    ZSTD_DCtx* dctx;
+
+    if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
+    if (!customMem.customAlloc || !customMem.customFree) return NULL;
+
+    dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem);
+    if (!dctx) return NULL;
+    memcpy(&dctx->customMem, &customMem, sizeof(customMem));
+    ZSTD_decompressBegin(dctx);
+    return dctx;
+}
+
+ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+    return ZSTD_createDCtx_advanced(defaultCustomMem);
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+    if (dctx==NULL) return 0;   /* support free on NULL */
+    ZSTD_free(dctx, dctx->customMem);
+    return 0;   /* reserved as a potential error code in the future */
+}
+
+void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
+{
+    size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max;
+    memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize);  /* no need to copy workspace */
+}
+
+static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
+{
+    ZSTD_decompressBegin(dstDCtx);  /* init */
+    if (srcDCtx) {   /* support refDCtx on NULL */
+        dstDCtx->dictEnd = srcDCtx->dictEnd;
+        dstDCtx->vBase = srcDCtx->vBase;
+        dstDCtx->base = srcDCtx->base;
+        dstDCtx->previousDstEnd = srcDCtx->previousDstEnd;
+        dstDCtx->dictID = srcDCtx->dictID;
+        dstDCtx->litEntropy = srcDCtx->litEntropy;
+        dstDCtx->fseEntropy = srcDCtx->fseEntropy;
+        dstDCtx->LLTptr = srcDCtx->LLTable;
+        dstDCtx->MLTptr = srcDCtx->MLTable;
+        dstDCtx->OFTptr = srcDCtx->OFTable;
+        dstDCtx->HUFptr = srcDCtx->hufTable;
+        dstDCtx->rep[0] = srcDCtx->rep[0];
+        dstDCtx->rep[1] = srcDCtx->rep[1];
+        dstDCtx->rep[2] = srcDCtx->rep[2];
+    }
+}
+
+
+/*-*************************************************************
+*   Decompression section
+***************************************************************/
+
+/*! ZSTD_isFrame() :
+ *  Tells if the content of `buffer` starts with a valid Frame Identifier.
+ *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ *  Note 3 : Skippable Frame Identifiers are considered valid. */
+unsigned ZSTD_isFrame(const void* buffer, size_t size)
+{
+    if (size < 4) return 0;
+    {   U32 const magic = MEM_readLE32(buffer);
+        if (magic == ZSTD_MAGICNUMBER) return 1;
+        if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+    }
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+    if (ZSTD_isLegacy(buffer, size)) return 1;
+#endif
+    return 0;
+}
+
+
+/** ZSTD_frameHeaderSize() :
+*   srcSize must be >= ZSTD_frameHeaderSize_prefix.
+*   @return : size of the Frame Header */
+static size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+    if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong);
+    {   BYTE const fhd = ((const BYTE*)src)[4];
+        U32 const dictID= fhd & 3;
+        U32 const singleSegment = (fhd >> 5) & 1;
+        U32 const fcsId = fhd >> 6;
+        return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+                + (singleSegment && !fcsId);
+    }
+}
+
+
+/** ZSTD_getFrameParams() :
+*   decode Frame Header, or require larger `srcSize`.
+*   @return : 0, `fparamsPtr` is correctly filled,
+*            >0, `srcSize` is too small, result is expected `srcSize`,
+*             or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+
+    if (srcSize < ZSTD_frameHeaderSize_prefix) return ZSTD_frameHeaderSize_prefix;
+    if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {
+        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+            if (srcSize < ZSTD_skippableHeaderSize) return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
+            memset(fparamsPtr, 0, sizeof(*fparamsPtr));
+            fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
+            fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
+            return 0;
+        }
+        return ERROR(prefix_unknown);
+    }
+
+    /* ensure there is enough `srcSize` to fully read/decode frame header */
+    { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
+      if (srcSize < fhsize) return fhsize; }
+
+    {   BYTE const fhdByte = ip[4];
+        size_t pos = 5;
+        U32 const dictIDSizeCode = fhdByte&3;
+        U32 const checksumFlag = (fhdByte>>2)&1;
+        U32 const singleSegment = (fhdByte>>5)&1;
+        U32 const fcsID = fhdByte>>6;
+        U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
+        U32 windowSize = 0;
+        U32 dictID = 0;
+        U64 frameContentSize = 0;
+        if ((fhdByte & 0x08) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits, which must be zero */
+        if (!singleSegment) {
+            BYTE const wlByte = ip[pos++];
+            U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+            if (windowLog > ZSTD_WINDOWLOG_MAX) return ERROR(frameParameter_windowTooLarge);  /* avoids issue with 1 << windowLog */
+            windowSize = (1U << windowLog);
+            windowSize += (windowSize >> 3) * (wlByte&7);
+        }
+
+        switch(dictIDSizeCode)
+        {
+            default:   /* impossible */
+            case 0 : break;
+            case 1 : dictID = ip[pos]; pos++; break;
+            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+        }
+        switch(fcsID)
+        {
+            default:   /* impossible */
+            case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
+            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+        }
+        if (!windowSize) windowSize = (U32)frameContentSize;
+        if (windowSize > windowSizeMax) return ERROR(frameParameter_windowTooLarge);
+        fparamsPtr->frameContentSize = frameContentSize;
+        fparamsPtr->windowSize = windowSize;
+        fparamsPtr->dictID = dictID;
+        fparamsPtr->checksumFlag = checksumFlag;
+    }
+    return 0;
+}
+
+
+/** ZSTD_getDecompressedSize() :
+*   compatible with legacy mode
+*   @return : decompressed size if known, 0 otherwise
+              note : 0 can mean any of the following :
+                   - decompressed size is not present within frame header
+                   - frame header unknown / not supported
+                   - frame header not complete (`srcSize` too small) */
+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
+    if (ZSTD_isLegacy(src, srcSize)) return ZSTD_getDecompressedSize_legacy(src, srcSize);
+#endif
+    {   ZSTD_frameParams fparams;
+        size_t const frResult = ZSTD_getFrameParams(&fparams, src, srcSize);
+        if (frResult!=0) return 0;
+        return fparams.frameContentSize;
+    }
+}
+
+
+/** ZSTD_decodeFrameHeader() :
+*   `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+*   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
+{
+    size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
+    if (ZSTD_isError(result)) return result;  /* invalid header */
+    if (result>0) return ERROR(srcSize_wrong);   /* headerSize too small */
+    if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong);
+    if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
+    return 0;
+}
+
+
+typedef struct
+{
+    blockType_e blockType;
+    U32 lastBlock;
+    U32 origSize;
+} blockProperties_t;
+
+/*! ZSTD_getcBlockSize() :
+*   Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+    if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    {   U32 const cBlockHeader = MEM_readLE24(src);
+        U32 const cSize = cBlockHeader >> 3;
+        bpPtr->lastBlock = cBlockHeader & 1;
+        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+        bpPtr->origSize = cSize;   /* only useful for RLE */
+        if (bpPtr->blockType == bt_rle) return 1;
+        if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
+        return cSize;
+    }
+}
+
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, size_t regenSize)
+{
+    if (srcSize != 1) return ERROR(srcSize_wrong);
+    if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    memset(dst, *(const BYTE*)src, regenSize);
+    return regenSize;
+}
+
+/*! ZSTD_decodeLiteralsBlock() :
+    @return : nb of bytes read from src (< srcSize ) */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
+{
+    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+    {   const BYTE* const istart = (const BYTE*) src;
+        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+        switch(litEncType)
+        {
+        case set_repeat:
+            if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
+            /* fall-through */
+        case set_compressed:
+            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
+            {   size_t lhSize, litSize, litCSize;
+                U32 singleStream=0;
+                U32 const lhlCode = (istart[0] >> 2) & 3;
+                U32 const lhc = MEM_readLE32(istart);
+                switch(lhlCode)
+                {
+                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    /* 2 - 2 - 10 - 10 */
+                    singleStream = !lhlCode;
+                    lhSize = 3;
+                    litSize  = (lhc >> 4) & 0x3FF;
+                    litCSize = (lhc >> 14) & 0x3FF;
+                    break;
+                case 2:
+                    /* 2 - 2 - 14 - 14 */
+                    lhSize = 4;
+                    litSize  = (lhc >> 4) & 0x3FFF;
+                    litCSize = lhc >> 18;
+                    break;
+                case 3:
+                    /* 2 - 2 - 18 - 18 */
+                    lhSize = 5;
+                    litSize  = (lhc >> 4) & 0x3FFFF;
+                    litCSize = (lhc >> 22) + (istart[4] << 10);
+                    break;
+                }
+                if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
+                if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+                if (HUF_isError((litEncType==set_repeat) ?
+                                    ( singleStream ?
+                                        HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) :
+                                        HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) ) :
+                                    ( singleStream ?
+                                        HUF_decompress1X2_DCtx(dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) :
+                                        HUF_decompress4X_hufOnly (dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize)) ))
+                    return ERROR(corruption_detected);
+
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                dctx->litEntropy = 1;
+                if (litEncType==set_compressed) dctx->HUFptr = dctx->hufTable;
+                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                return litCSize + lhSize;
+            }
+
+        case set_basic:
+            {   size_t litSize, lhSize;
+                U32 const lhlCode = ((istart[0]) >> 2) & 3;
+                switch(lhlCode)
+                {
+                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    lhSize = 1;
+                    litSize = istart[0] >> 3;
+                    break;
+                case 1:
+                    lhSize = 2;
+                    litSize = MEM_readLE16(istart) >> 4;
+                    break;
+                case 3:
+                    lhSize = 3;
+                    litSize = MEM_readLE24(istart) >> 4;
+                    break;
+                }
+
+                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
+                    if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+                    memcpy(dctx->litBuffer, istart+lhSize, litSize);
+                    dctx->litPtr = dctx->litBuffer;
+                    dctx->litSize = litSize;
+                    memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                    return lhSize+litSize;
+                }
+                /* direct reference into compressed stream */
+                dctx->litPtr = istart+lhSize;
+                dctx->litSize = litSize;
+                return lhSize+litSize;
+            }
+
+        case set_rle:
+            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
+                size_t litSize, lhSize;
+                switch(lhlCode)
+                {
+                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    lhSize = 1;
+                    litSize = istart[0] >> 3;
+                    break;
+                case 1:
+                    lhSize = 2;
+                    litSize = MEM_readLE16(istart) >> 4;
+                    break;
+                case 3:
+                    lhSize = 3;
+                    litSize = MEM_readLE24(istart) >> 4;
+                    if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+                    break;
+                }
+                if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
+                memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                return lhSize+1;
+            }
+        default:
+            return ERROR(corruption_detected);   /* impossible */
+        }
+    }
+}
+
+
+typedef union {
+    FSE_decode_t realData;
+    U32 alignedBy4;
+} FSE_decode_t4;
+
+static const FSE_decode_t4 LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
+    { { LL_DEFAULTNORMLOG, 1, 1 } }, /* header : tableLog, fastMode, fastMode */
+    { {  0,  0,  4 } },              /* 0 : base, symbol, bits */
+    { { 16,  0,  4 } },
+    { { 32,  1,  5 } },
+    { {  0,  3,  5 } },
+    { {  0,  4,  5 } },
+    { {  0,  6,  5 } },
+    { {  0,  7,  5 } },
+    { {  0,  9,  5 } },
+    { {  0, 10,  5 } },
+    { {  0, 12,  5 } },
+    { {  0, 14,  6 } },
+    { {  0, 16,  5 } },
+    { {  0, 18,  5 } },
+    { {  0, 19,  5 } },
+    { {  0, 21,  5 } },
+    { {  0, 22,  5 } },
+    { {  0, 24,  5 } },
+    { { 32, 25,  5 } },
+    { {  0, 26,  5 } },
+    { {  0, 27,  6 } },
+    { {  0, 29,  6 } },
+    { {  0, 31,  6 } },
+    { { 32,  0,  4 } },
+    { {  0,  1,  4 } },
+    { {  0,  2,  5 } },
+    { { 32,  4,  5 } },
+    { {  0,  5,  5 } },
+    { { 32,  7,  5 } },
+    { {  0,  8,  5 } },
+    { { 32, 10,  5 } },
+    { {  0, 11,  5 } },
+    { {  0, 13,  6 } },
+    { { 32, 16,  5 } },
+    { {  0, 17,  5 } },
+    { { 32, 19,  5 } },
+    { {  0, 20,  5 } },
+    { { 32, 22,  5 } },
+    { {  0, 23,  5 } },
+    { {  0, 25,  4 } },
+    { { 16, 25,  4 } },
+    { { 32, 26,  5 } },
+    { {  0, 28,  6 } },
+    { {  0, 30,  6 } },
+    { { 48,  0,  4 } },
+    { { 16,  1,  4 } },
+    { { 32,  2,  5 } },
+    { { 32,  3,  5 } },
+    { { 32,  5,  5 } },
+    { { 32,  6,  5 } },
+    { { 32,  8,  5 } },
+    { { 32,  9,  5 } },
+    { { 32, 11,  5 } },
+    { { 32, 12,  5 } },
+    { {  0, 15,  6 } },
+    { { 32, 17,  5 } },
+    { { 32, 18,  5 } },
+    { { 32, 20,  5 } },
+    { { 32, 21,  5 } },
+    { { 32, 23,  5 } },
+    { { 32, 24,  5 } },
+    { {  0, 35,  6 } },
+    { {  0, 34,  6 } },
+    { {  0, 33,  6 } },
+    { {  0, 32,  6 } },
+};   /* LL_defaultDTable */
+
+static const FSE_decode_t4 ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
+    { { ML_DEFAULTNORMLOG, 1, 1 } }, /* header : tableLog, fastMode, fastMode */
+    { {  0,  0,  6 } },              /* 0 : base, symbol, bits */
+    { {  0,  1,  4 } },
+    { { 32,  2,  5 } },
+    { {  0,  3,  5 } },
+    { {  0,  5,  5 } },
+    { {  0,  6,  5 } },
+    { {  0,  8,  5 } },
+    { {  0, 10,  6 } },
+    { {  0, 13,  6 } },
+    { {  0, 16,  6 } },
+    { {  0, 19,  6 } },
+    { {  0, 22,  6 } },
+    { {  0, 25,  6 } },
+    { {  0, 28,  6 } },
+    { {  0, 31,  6 } },
+    { {  0, 33,  6 } },
+    { {  0, 35,  6 } },
+    { {  0, 37,  6 } },
+    { {  0, 39,  6 } },
+    { {  0, 41,  6 } },
+    { {  0, 43,  6 } },
+    { {  0, 45,  6 } },
+    { { 16,  1,  4 } },
+    { {  0,  2,  4 } },
+    { { 32,  3,  5 } },
+    { {  0,  4,  5 } },
+    { { 32,  6,  5 } },
+    { {  0,  7,  5 } },
+    { {  0,  9,  6 } },
+    { {  0, 12,  6 } },
+    { {  0, 15,  6 } },
+    { {  0, 18,  6 } },
+    { {  0, 21,  6 } },
+    { {  0, 24,  6 } },
+    { {  0, 27,  6 } },
+    { {  0, 30,  6 } },
+    { {  0, 32,  6 } },
+    { {  0, 34,  6 } },
+    { {  0, 36,  6 } },
+    { {  0, 38,  6 } },
+    { {  0, 40,  6 } },
+    { {  0, 42,  6 } },
+    { {  0, 44,  6 } },
+    { { 32,  1,  4 } },
+    { { 48,  1,  4 } },
+    { { 16,  2,  4 } },
+    { { 32,  4,  5 } },
+    { { 32,  5,  5 } },
+    { { 32,  7,  5 } },
+    { { 32,  8,  5 } },
+    { {  0, 11,  6 } },
+    { {  0, 14,  6 } },
+    { {  0, 17,  6 } },
+    { {  0, 20,  6 } },
+    { {  0, 23,  6 } },
+    { {  0, 26,  6 } },
+    { {  0, 29,  6 } },
+    { {  0, 52,  6 } },
+    { {  0, 51,  6 } },
+    { {  0, 50,  6 } },
+    { {  0, 49,  6 } },
+    { {  0, 48,  6 } },
+    { {  0, 47,  6 } },
+    { {  0, 46,  6 } },
+};   /* ML_defaultDTable */
+
+static const FSE_decode_t4 OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
+    { { OF_DEFAULTNORMLOG, 1, 1 } }, /* header : tableLog, fastMode, fastMode */
+    { {  0,  0,  5 } },              /* 0 : base, symbol, bits */
+    { {  0,  6,  4 } },
+    { {  0,  9,  5 } },
+    { {  0, 15,  5 } },
+    { {  0, 21,  5 } },
+    { {  0,  3,  5 } },
+    { {  0,  7,  4 } },
+    { {  0, 12,  5 } },
+    { {  0, 18,  5 } },
+    { {  0, 23,  5 } },
+    { {  0,  5,  5 } },
+    { {  0,  8,  4 } },
+    { {  0, 14,  5 } },
+    { {  0, 20,  5 } },
+    { {  0,  2,  5 } },
+    { { 16,  7,  4 } },
+    { {  0, 11,  5 } },
+    { {  0, 17,  5 } },
+    { {  0, 22,  5 } },
+    { {  0,  4,  5 } },
+    { { 16,  8,  4 } },
+    { {  0, 13,  5 } },
+    { {  0, 19,  5 } },
+    { {  0,  1,  5 } },
+    { { 16,  6,  4 } },
+    { {  0, 10,  5 } },
+    { {  0, 16,  5 } },
+    { {  0, 28,  5 } },
+    { {  0, 27,  5 } },
+    { {  0, 26,  5 } },
+    { {  0, 25,  5 } },
+    { {  0, 24,  5 } },
+};   /* OF_defaultDTable */
+
+/*! ZSTD_buildSeqTable() :
+    @return : nb bytes read from src,
+              or an error code if it fails, testable with ZSTD_isError()
+*/
+static size_t ZSTD_buildSeqTable(FSE_DTable* DTableSpace, const FSE_DTable** DTablePtr,
+                                 symbolEncodingType_e type, U32 max, U32 maxLog,
+                                 const void* src, size_t srcSize,
+                                 const FSE_decode_t4* defaultTable, U32 flagRepeatTable)
+{
+    const void* const tmpPtr = defaultTable;   /* bypass strict aliasing */
+    switch(type)
+    {
+    case set_rle :
+        if (!srcSize) return ERROR(srcSize_wrong);
+        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+        FSE_buildDTable_rle(DTableSpace, *(const BYTE*)src);
+        *DTablePtr = DTableSpace;
+        return 1;
+    case set_basic :
+        *DTablePtr = (const FSE_DTable*)tmpPtr;
+        return 0;
+    case set_repeat:
+        if (!flagRepeatTable) return ERROR(corruption_detected);
+        return 0;
+    default :   /* impossible */
+    case set_compressed :
+        {   U32 tableLog;
+            S16 norm[MaxSeq+1];
+            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+            if (FSE_isError(headerSize)) return ERROR(corruption_detected);
+            if (tableLog > maxLog) return ERROR(corruption_detected);
+            FSE_buildDTable(DTableSpace, norm, max, tableLog);
+            *DTablePtr = DTableSpace;
+            return headerSize;
+    }   }
+}
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+                             const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* ip = istart;
+
+    /* check */
+    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    {   int nbSeq = *ip++;
+        if (!nbSeq) { *nbSeqPtr=0; return 1; }
+        if (nbSeq > 0x7F) {
+            if (nbSeq == 0xFF) {
+                if (ip+2 > iend) return ERROR(srcSize_wrong);
+                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+            } else {
+                if (ip >= iend) return ERROR(srcSize_wrong);
+                nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+            }
+        }
+        *nbSeqPtr = nbSeq;
+    }
+
+    /* FSE table descriptors */
+    if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
+    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+        ip++;
+
+        /* Build DTables */
+        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->LLTable, &dctx->LLTptr,
+                                                      LLtype, MaxLL, LLFSELog,
+                                                      ip, iend-ip, LL_defaultDTable, dctx->fseEntropy);
+            if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
+            ip += llhSize;
+        }
+        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->OFTable, &dctx->OFTptr,
+                                                      OFtype, MaxOff, OffFSELog,
+                                                      ip, iend-ip, OF_defaultDTable, dctx->fseEntropy);
+            if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
+            ip += ofhSize;
+        }
+        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->MLTable, &dctx->MLTptr,
+                                                      MLtype, MaxML, MLFSELog,
+                                                      ip, iend-ip, ML_defaultDTable, dctx->fseEntropy);
+            if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
+            ip += mlhSize;
+        }
+    }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t matchLength;
+    size_t offset;
+    const BYTE* match;
+} seq_t;
+
+typedef struct {
+    BIT_DStream_t DStream;
+    FSE_DState_t stateLL;
+    FSE_DState_t stateOffb;
+    FSE_DState_t stateML;
+    size_t prevOffset[ZSTD_REP_NUM];
+    const BYTE* base;
+    size_t pos;
+    iPtrDiff gotoDict;
+} seqState_t;
+
+
+FORCE_NOINLINE
+size_t ZSTD_execSequenceLast7(BYTE* op,
+                              BYTE* const oend, seq_t sequence,
+                              const BYTE** litPtr, const BYTE* const litLimit,
+                              const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+    if (oLitEnd <= oend_w) return ERROR(GENERIC);   /* Precondition */
+
+    /* copy literals */
+    if (op < oend_w) {
+        ZSTD_wildcopy(op, *litPtr, oend_w - op);
+        *litPtr += oend_w - op;
+        op = oend_w;
+    }
+    while (op < oLitEnd) *op++ = *(*litPtr)++;
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+        match = dictEnd - (base-match);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+    }   }
+    while (op < oMatchEnd) *op++ = *match++;
+    return sequenceLength;
+}
+
+
+
+
+static seq_t ZSTD_decodeSequence(seqState_t* seqState)
+{
+    seq_t seq;
+
+    U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
+    U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
+    U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb);   /* <= maxOff, by table construction */
+
+    U32 const llBits = LL_bits[llCode];
+    U32 const mlBits = ML_bits[mlCode];
+    U32 const ofBits = ofCode;
+    U32 const totalBits = llBits+mlBits+ofBits;
+
+    static const U32 LL_base[MaxLL+1] = {
+                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,
+                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+                            0x2000, 0x4000, 0x8000, 0x10000 };
+
+    static const U32 ML_base[MaxML+1] = {
+                             3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,   14,    15,    16,    17,    18,
+                            19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,   30,    31,    32,    33,    34,
+                            35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+                            0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+    static const U32 OF_base[MaxOff+1] = {
+                             0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
+                             0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
+                             0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+                             0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
+
+    /* sequence */
+    {   size_t offset;
+        if (!ofCode)
+            offset = 0;
+        else {
+            offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
+            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+        }
+
+        if (ofCode <= 1) {
+            offset += (llCode==0);
+            if (offset) {
+                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
+                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+                seqState->prevOffset[1] = seqState->prevOffset[0];
+                seqState->prevOffset[0] = offset = temp;
+            } else {
+                offset = seqState->prevOffset[0];
+            }
+        } else {
+            seqState->prevOffset[2] = seqState->prevOffset[1];
+            seqState->prevOffset[1] = seqState->prevOffset[0];
+            seqState->prevOffset[0] = offset;
+        }
+        seq.offset = offset;
+    }
+
+    seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0);  /* <=  16 bits */
+    if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream);
+
+    seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0);    /* <=  16 bits */
+    if (MEM_32bits() ||
+       (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream);
+
+    /* ANS state update */
+    FSE_updateState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
+    FSE_updateState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
+    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
+    FSE_updateState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
+
+    return seq;
+}
+
+
+FORCE_INLINE
+size_t ZSTD_execSequence(BYTE* op,
+                                BYTE* const oend, seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
+
+    /* copy Literals */
+    ZSTD_copy8(op, *litPtr);
+    if (sequence.litLength > 8)
+        ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = iLitEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+        match += (dictEnd-base);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+            if (op > oend_w || sequence.matchLength < MINMATCH) {
+              U32 i;
+              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+              return sequenceLength;
+            }
+    }   }
+    /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+        int const sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTD_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTD_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_w) {
+            ZSTD_wildcopy(op, match, oend_w - op);
+            match += oend_w - op;
+            op = oend_w;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    } else {
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+
+static size_t ZSTD_decompressSequences(
+                               ZSTD_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    BYTE* op = ostart;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    const BYTE* const base = (const BYTE*) (dctx->base);
+    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+    int nbSeq;
+
+    /* Build Decoding Tables */
+    {   size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
+        if (ZSTD_isError(seqHSize)) return seqHSize;
+        ip += seqHSize;
+    }
+
+    /* Regen sequences */
+    if (nbSeq) {
+        seqState_t seqState;
+        dctx->fseEntropy = 1;
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->rep[i]; }
+        CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+        FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+        FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+        FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
+            nbSeq--;
+            {   seq_t const sequence = ZSTD_decodeSequence(&seqState);
+                size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+                if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+                op += oneSeqSize;
+        }   }
+
+        /* check if reached exact end */
+        if (nbSeq) return ERROR(corruption_detected);
+        /* save reps for next block */
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
+    }
+
+    /* last literal segment */
+    {   size_t const lastLLSize = litEnd - litPtr;
+        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+
+static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState)
+{
+    seq_t seq;
+
+    U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
+    U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
+    U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb);   /* <= maxOff, by table construction */
+
+    U32 const llBits = LL_bits[llCode];
+    U32 const mlBits = ML_bits[mlCode];
+    U32 const ofBits = ofCode;
+    U32 const totalBits = llBits+mlBits+ofBits;
+
+    static const U32 LL_base[MaxLL+1] = {
+                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,
+                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+                            0x2000, 0x4000, 0x8000, 0x10000 };
+
+    static const U32 ML_base[MaxML+1] = {
+                             3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,   14,    15,    16,    17,    18,
+                            19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,   30,    31,    32,    33,    34,
+                            35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+                            0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+    static const U32 OF_base[MaxOff+1] = {
+                             0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
+                             0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
+                             0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+                             0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
+
+    /* sequence */
+    {   size_t offset;
+        if (!ofCode)
+            offset = 0;
+        else {
+            offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
+            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+        }
+
+        if (ofCode <= 1) {
+            offset += (llCode==0);
+            if (offset) {
+                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
+                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+                seqState->prevOffset[1] = seqState->prevOffset[0];
+                seqState->prevOffset[0] = offset = temp;
+            } else {
+                offset = seqState->prevOffset[0];
+            }
+        } else {
+            seqState->prevOffset[2] = seqState->prevOffset[1];
+            seqState->prevOffset[1] = seqState->prevOffset[0];
+            seqState->prevOffset[0] = offset;
+        }
+        seq.offset = offset;
+    }
+
+    seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0);  /* <=  16 bits */
+    if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream);
+
+    seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0);    /* <=  16 bits */
+    if (MEM_32bits() ||
+       (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream);
+
+    {   size_t const pos = seqState->pos + seq.litLength;
+        seq.match = seqState->base + pos - seq.offset;    /* single memory segment */
+        if (seq.offset > pos) seq.match += seqState->gotoDict;   /* separate memory segment */
+        seqState->pos = pos + seq.matchLength;
+    }
+
+    /* ANS state update */
+    FSE_updateState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
+    FSE_updateState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
+    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
+    FSE_updateState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
+
+    return seq;
+}
+
+FORCE_INLINE
+size_t ZSTD_execSequenceLong(BYTE* op,
+                                BYTE* const oend, seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = sequence.match;
+
+    /* check */
+#if 1
+    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
+#endif
+
+    /* copy Literals */
+    ZSTD_copy8(op, *litPtr);
+    if (sequence.litLength > 8)
+        ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = iLitEnd;   /* update for next sequence */
+
+    /* copy Match */
+#if 1
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+            if (op > oend_w || sequence.matchLength < MINMATCH) {
+              U32 i;
+              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+              return sequenceLength;
+            }
+    }   }
+    /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
+#endif
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+        int const sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTD_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTD_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_w) {
+            ZSTD_wildcopy(op, match, oend_w - op);
+            match += oend_w - op;
+            op = oend_w;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    } else {
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+static size_t ZSTD_decompressSequencesLong(
+                               ZSTD_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    BYTE* op = ostart;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    const BYTE* const base = (const BYTE*) (dctx->base);
+    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+    int nbSeq;
+
+    /* Build Decoding Tables */
+    {   size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
+        if (ZSTD_isError(seqHSize)) return seqHSize;
+        ip += seqHSize;
+    }
+
+    /* Regen sequences */
+    if (nbSeq) {
+#define STORED_SEQS 4
+#define STOSEQ_MASK (STORED_SEQS-1)
+#define ADVANCED_SEQS 4
+        seq_t sequences[STORED_SEQS];
+        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+        seqState_t seqState;
+        int seqNb;
+        dctx->fseEntropy = 1;
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->rep[i]; }
+        seqState.base = base;
+        seqState.pos = (size_t)(op-base);
+        seqState.gotoDict = (iPtrDiff)(dictEnd - base);
+        CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+        FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+        FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+        FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+        /* prepare in advance */
+        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb<seqAdvance; seqNb++) {
+            sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState);
+        }
+        if (seqNb<seqAdvance) return ERROR(corruption_detected);
+
+        /* decode and decompress */
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb<nbSeq ; seqNb++) {
+            seq_t const sequence = ZSTD_decodeSequenceLong(&seqState);
+            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            ZSTD_PREFETCH(sequence.match);
+            sequences[seqNb&STOSEQ_MASK] = sequence;
+            op += oneSeqSize;
+        }
+        if (seqNb<nbSeq) return ERROR(corruption_detected);
+
+        /* finish queue */
+        seqNb -= seqAdvance;
+        for ( ; seqNb<nbSeq ; seqNb++) {
+            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            op += oneSeqSize;
+        }
+
+        /* save reps for next block */
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
+    }
+
+    /* last literal segment */
+    {   size_t const lastLLSize = litEnd - litPtr;
+        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+
+static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{   /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+
+    if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong);
+
+    /* Decode literals sub-block */
+    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+        if (ZSTD_isError(litCSize)) return litCSize;
+        ip += litCSize;
+        srcSize -= litCSize;
+    }
+    if (dctx->fParams.windowSize > (1<<23)) return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
+    return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+}
+
+
+static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
+{
+    if (dst != dctx->previousDstEnd) {   /* not contiguous */
+        dctx->dictEnd = dctx->previousDstEnd;
+        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+        dctx->base = dst;
+        dctx->previousDstEnd = dst;
+    }
+}
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{
+    size_t dSize;
+    ZSTD_checkContinuity(dctx, dst);
+    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+    dctx->previousDstEnd = (char*)dst + dSize;
+    return dSize;
+}
+
+
+/** ZSTD_insertBlock() :
+    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+    ZSTD_checkContinuity(dctx, blockStart);
+    dctx->previousDstEnd = (const char*)blockStart + blockSize;
+    return blockSize;
+}
+
+
+size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
+{
+    if (length > dstCapacity) return ERROR(dstSize_tooSmall);
+    memset(dst, byte, length);
+    return length;
+}
+
+
+/*! ZSTD_decompressFrame() :
+*   `dctx` must be properly initialized */
+static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+                                 void* dst, size_t dstCapacity,
+                                 const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + dstCapacity;
+    BYTE* op = ostart;
+    size_t remainingSize = srcSize;
+
+    /* check */
+    if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+
+    /* Frame Header */
+    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
+        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+        if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+        CHECK_F(ZSTD_decodeFrameHeader(dctx, src, frameHeaderSize));
+        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+    }
+
+    /* Loop on each block */
+    while (1) {
+        size_t decodedSize;
+        blockProperties_t blockProperties;
+        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
+            break;
+        case bt_raw :
+            decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
+            break;
+        case bt_rle :
+            decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
+            break;
+        case bt_reserved :
+        default:
+            return ERROR(corruption_detected);
+        }
+
+        if (ZSTD_isError(decodedSize)) return decodedSize;
+        if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize);
+        op += decodedSize;
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+        if (blockProperties.lastBlock) break;
+    }
+
+    if (dctx->fParams.checksumFlag) {   /* Frame content checksum verification */
+        U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
+        U32 checkRead;
+        if (remainingSize<4) return ERROR(checksum_wrong);
+        checkRead = MEM_readLE32(ip);
+        if (checkRead != checkCalc) return ERROR(checksum_wrong);
+        remainingSize -= 4;
+    }
+
+    if (remainingSize) return ERROR(srcSize_wrong);
+    return op-ostart;
+}
+
+
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+                                 void* dst, size_t dstCapacity,
+                           const void* src, size_t srcSize,
+                           const void* dict, size_t dictSize)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
+    if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, dict, dictSize);
+#endif
+    ZSTD_decompressBegin_usingDict(dctx, dict, dictSize);
+    ZSTD_checkContinuity(dctx, dst);
+    return ZSTD_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+}
+
+
+size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1)
+    size_t regenSize;
+    ZSTD_DCtx* const dctx = ZSTD_createDCtx();
+    if (dctx==NULL) return ERROR(memory_allocation);
+    regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+    ZSTD_freeDCtx(dctx);
+    return regenSize;
+#else   /* stack mode */
+    ZSTD_DCtx dctx;
+    return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+
+/*-**************************************
+*   Advanced Streaming Decompression API
+*   Bufferless and synchronous
+****************************************/
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
+
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
+    switch(dctx->stage)
+    {
+    default:   /* should not happen */
+    case ZSTDds_getFrameHeaderSize:
+    case ZSTDds_decodeFrameHeader:
+        return ZSTDnit_frameHeader;
+    case ZSTDds_decodeBlockHeader:
+        return ZSTDnit_blockHeader;
+    case ZSTDds_decompressBlock:
+        return ZSTDnit_block;
+    case ZSTDds_decompressLastBlock:
+        return ZSTDnit_lastBlock;
+    case ZSTDds_checkChecksum:
+        return ZSTDnit_checksum;
+    case ZSTDds_decodeSkippableHeader:
+    case ZSTDds_skipFrame:
+        return ZSTDnit_skippableFrame;
+    }
+}
+
+int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }   /* for zbuff */
+
+/** ZSTD_decompressContinue() :
+*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+*             or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    /* Sanity check */
+    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
+    if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
+
+    switch (dctx->stage)
+    {
+    case ZSTDds_getFrameHeaderSize :
+        if (srcSize != ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong);      /* impossible */
+        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
+            memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
+            dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix;  /* magic number + skippable frame length */
+            dctx->stage = ZSTDds_decodeSkippableHeader;
+            return 0;
+        }
+        dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
+        if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
+        memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
+        if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
+            dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
+            dctx->stage = ZSTDds_decodeFrameHeader;
+            return 0;
+        }
+        dctx->expected = 0;   /* not necessary to copy more */
+
+    case ZSTDds_decodeFrameHeader:
+        memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
+        CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
+        dctx->expected = ZSTD_blockHeaderSize;
+        dctx->stage = ZSTDds_decodeBlockHeader;
+        return 0;
+
+    case ZSTDds_decodeBlockHeader:
+        {   blockProperties_t bp;
+            size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+            if (ZSTD_isError(cBlockSize)) return cBlockSize;
+            dctx->expected = cBlockSize;
+            dctx->bType = bp.blockType;
+            dctx->rleSize = bp.origSize;
+            if (cBlockSize) {
+                dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
+                return 0;
+            }
+            /* empty block */
+            if (bp.lastBlock) {
+                if (dctx->fParams.checksumFlag) {
+                    dctx->expected = 4;
+                    dctx->stage = ZSTDds_checkChecksum;
+                } else {
+                    dctx->expected = 0; /* end of frame */
+                    dctx->stage = ZSTDds_getFrameHeaderSize;
+                }
+            } else {
+                dctx->expected = 3;  /* go directly to next header */
+                dctx->stage = ZSTDds_decodeBlockHeader;
+            }
+            return 0;
+        }
+    case ZSTDds_decompressLastBlock:
+    case ZSTDds_decompressBlock:
+        {   size_t rSize;
+            switch(dctx->bType)
+            {
+            case bt_compressed:
+                rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+                break;
+            case bt_raw :
+                rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
+                break;
+            case bt_rle :
+                rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize);
+                break;
+            case bt_reserved :   /* should never happen */
+            default:
+                return ERROR(corruption_detected);
+            }
+            if (ZSTD_isError(rSize)) return rSize;
+            if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
+
+            if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
+                if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
+                    dctx->expected = 4;
+                    dctx->stage = ZSTDds_checkChecksum;
+                } else {
+                    dctx->expected = 0;   /* ends here */
+                    dctx->stage = ZSTDds_getFrameHeaderSize;
+                }
+            } else {
+                dctx->stage = ZSTDds_decodeBlockHeader;
+                dctx->expected = ZSTD_blockHeaderSize;
+                dctx->previousDstEnd = (char*)dst + rSize;
+            }
+            return rSize;
+        }
+    case ZSTDds_checkChecksum:
+        {   U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
+            U32 const check32 = MEM_readLE32(src);   /* srcSize == 4, guaranteed by dctx->expected */
+            if (check32 != h32) return ERROR(checksum_wrong);
+            dctx->expected = 0;
+            dctx->stage = ZSTDds_getFrameHeaderSize;
+            return 0;
+        }
+    case ZSTDds_decodeSkippableHeader:
+        {   memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
+            dctx->expected = MEM_readLE32(dctx->headerBuffer + 4);
+            dctx->stage = ZSTDds_skipFrame;
+            return 0;
+        }
+    case ZSTDds_skipFrame:
+        {   dctx->expected = 0;
+            dctx->stage = ZSTDds_getFrameHeaderSize;
+            return 0;
+        }
+    default:
+        return ERROR(GENERIC);   /* impossible */
+    }
+}
+
+
+static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    dctx->dictEnd = dctx->previousDstEnd;
+    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+    dctx->base = dict;
+    dctx->previousDstEnd = (const char*)dict + dictSize;
+    return 0;
+}
+
+static size_t ZSTD_loadEntropy(ZSTD_DCtx* dctx, const void* const dict, size_t const dictSize)
+{
+    const BYTE* dictPtr = (const BYTE*)dict;
+    const BYTE* const dictEnd = dictPtr + dictSize;
+
+    {   size_t const hSize = HUF_readDTableX4(dctx->hufTable, dict, dictSize);
+        if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
+        dictPtr += hSize;
+    }
+
+    {   short offcodeNCount[MaxOff+1];
+        U32 offcodeMaxValue=MaxOff, offcodeLog;
+        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        CHECK_E(FSE_buildDTable(dctx->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted);
+        dictPtr += offcodeHeaderSize;
+    }
+
+    {   short matchlengthNCount[MaxML+1];
+        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        CHECK_E(FSE_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted);
+        dictPtr += matchlengthHeaderSize;
+    }
+
+    {   short litlengthNCount[MaxLL+1];
+        unsigned litlengthMaxValue = MaxLL, litlengthLog;
+        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        CHECK_E(FSE_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted);
+        dictPtr += litlengthHeaderSize;
+    }
+
+    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+    dictPtr += 12;
+
+    dctx->litEntropy = dctx->fseEntropy = 1;
+    return dictPtr - (const BYTE*)dict;
+}
+
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
+    {   U32 const magic = MEM_readLE32(dict);
+        if (magic != ZSTD_DICT_MAGIC) {
+            return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
+    }   }
+    dctx->dictID = MEM_readLE32((const char*)dict + 4);
+
+    /* load entropy tables */
+    dict = (const char*)dict + 8;
+    dictSize -= 8;
+    {   size_t const eSize = ZSTD_loadEntropy(dctx, dict, dictSize);
+        if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
+        dict = (const char*)dict + eSize;
+        dictSize -= eSize;
+    }
+
+    /* reference dictionary content */
+    return ZSTD_refDictContent(dctx, dict, dictSize);
+}
+
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    CHECK_F(ZSTD_decompressBegin(dctx));
+    if (dict && dictSize) CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
+    return 0;
+}
+
+
+/* ======   ZSTD_DDict   ====== */
+
+struct ZSTD_DDict_s {
+    void* dict;
+    size_t dictSize;
+    ZSTD_DCtx* refContext;
+};  /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_customMem customMem)
+{
+    if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
+    if (!customMem.customAlloc || !customMem.customFree) return NULL;
+
+    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
+        void* const dictContent = ZSTD_malloc(dictSize, customMem);
+        ZSTD_DCtx* const dctx = ZSTD_createDCtx_advanced(customMem);
+
+        if (!dictContent || !ddict || !dctx) {
+            ZSTD_free(dictContent, customMem);
+            ZSTD_free(ddict, customMem);
+            ZSTD_free(dctx, customMem);
+            return NULL;
+        }
+
+        if (dictSize) {
+            memcpy(dictContent, dict, dictSize);
+        }
+        {   size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, dictContent, dictSize);
+            if (ZSTD_isError(errorCode)) {
+                ZSTD_free(dictContent, customMem);
+                ZSTD_free(ddict, customMem);
+                ZSTD_free(dctx, customMem);
+                return NULL;
+        }   }
+
+        ddict->dict = dictContent;
+        ddict->dictSize = dictSize;
+        ddict->refContext = dctx;
+        return ddict;
+    }
+}
+
+/*! ZSTD_createDDict() :
+*   Create a digested dictionary, ready to start decompression without startup delay.
+*   `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    return ZSTD_createDDict_advanced(dict, dictSize, allocator);
+}
+
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;   /* support free on NULL */
+    {   ZSTD_customMem const cMem = ddict->refContext->customMem;
+        ZSTD_freeDCtx(ddict->refContext);
+        ZSTD_free(ddict->dict, cMem);
+        ZSTD_free(ddict, cMem);
+        return 0;
+    }
+}
+
+size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;   /* support sizeof on NULL */
+    return sizeof(*ddict) + sizeof(ddict->refContext) + ddict->dictSize;
+}
+
+/*! ZSTD_getDictID_fromDict() :
+ *  Provides the dictID stored within dictionary.
+ *  if @return == 0, the dictionary is not conformant with Zstandard specification.
+ *  It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
+{
+    if (dictSize < 8) return 0;
+    if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return 0;
+    return MEM_readLE32((const char*)dict + 4);
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ *  Provides the dictID of the dictionary loaded into `ddict`.
+ *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;
+    return ZSTD_getDictID_fromDict(ddict->dict, ddict->dictSize);
+}
+
+/*! ZSTD_getDictID_fromFrame() :
+ *  Provides the dictID required to decompressed the frame stored within `src`.
+ *  If @return == 0, the dictID could not be decoded.
+ *  This could for one of the following reasons :
+ *  - The frame does not require a dictionary to be decoded (most common case).
+ *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ *    Note : this use case also happens when using a non-conformant dictionary.
+ *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ *  - This is not a Zstandard frame.
+ *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
+{
+    ZSTD_frameParams zfp = { 0 , 0 , 0 , 0 };
+    size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
+    if (ZSTD_isError(hError)) return 0;
+    return zfp.dictID;
+}
+
+
+/*! ZSTD_decompress_usingDDict() :
+*   Decompression using a pre-digested Dictionary
+*   Use dictionary without significant overhead. */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+                                  void* dst, size_t dstCapacity,
+                            const void* src, size_t srcSize,
+                            const ZSTD_DDict* ddict)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
+    if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, ddict->dict, ddict->dictSize);
+#endif
+    ZSTD_refDCtx(dctx, ddict->refContext);
+    ZSTD_checkContinuity(dctx, dst);
+    return ZSTD_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+/*=====================================
+*   Streaming decompression
+*====================================*/
+
+typedef enum { zdss_init, zdss_loadHeader,
+               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+/* *** Resource management *** */
+struct ZSTD_DStream_s {
+    ZSTD_DCtx* dctx;
+    ZSTD_DDict* ddictLocal;
+    const ZSTD_DDict* ddict;
+    ZSTD_frameParams fParams;
+    ZSTD_dStreamStage stage;
+    char*  inBuff;
+    size_t inBuffSize;
+    size_t inPos;
+    size_t maxWindowSize;
+    char*  outBuff;
+    size_t outBuffSize;
+    size_t outStart;
+    size_t outEnd;
+    size_t blockSize;
+    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];   /* tmp buffer to store frame header */
+    size_t lhSize;
+    ZSTD_customMem customMem;
+    void* legacyContext;
+    U32 previousLegacyVersion;
+    U32 legacyVersion;
+    U32 hostageByte;
+};   /* typedef'd to ZSTD_DStream within "zstd.h" */
+
+
+ZSTD_DStream* ZSTD_createDStream(void)
+{
+    return ZSTD_createDStream_advanced(defaultCustomMem);
+}
+
+ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
+    ZSTD_DStream* zds;
+
+    if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
+    if (!customMem.customAlloc || !customMem.customFree) return NULL;
+
+    zds = (ZSTD_DStream*) ZSTD_malloc(sizeof(ZSTD_DStream), customMem);
+    if (zds==NULL) return NULL;
+    memset(zds, 0, sizeof(ZSTD_DStream));
+    memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem));
+    zds->dctx = ZSTD_createDCtx_advanced(customMem);
+    if (zds->dctx == NULL) { ZSTD_freeDStream(zds); return NULL; }
+    zds->stage = zdss_init;
+    zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+    return zds;
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+    if (zds==NULL) return 0;   /* support free on null */
+    {   ZSTD_customMem const cMem = zds->customMem;
+        ZSTD_freeDCtx(zds->dctx);
+        ZSTD_freeDDict(zds->ddictLocal);
+        ZSTD_free(zds->inBuff, cMem);
+        ZSTD_free(zds->outBuff, cMem);
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+        if (zds->legacyContext)
+            ZSTD_freeLegacyStreamContext(zds->legacyContext, zds->previousLegacyVersion);
+#endif
+        ZSTD_free(zds, cMem);
+        return 0;
+    }
+}
+
+
+/* *** Initialization *** */
+
+size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
+
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
+{
+    zds->stage = zdss_loadHeader;
+    zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+    ZSTD_freeDDict(zds->ddictLocal);
+    if (dict) {
+        zds->ddictLocal = ZSTD_createDDict(dict, dictSize);
+        if (zds->ddictLocal == NULL) return ERROR(memory_allocation);
+    } else zds->ddictLocal = NULL;
+    zds->ddict = zds->ddictLocal;
+    zds->legacyVersion = 0;
+    zds->hostageByte = 0;
+    return ZSTD_frameHeaderSize_prefix;
+}
+
+size_t ZSTD_initDStream(ZSTD_DStream* zds)
+{
+    return ZSTD_initDStream_usingDict(zds, NULL, 0);
+}
+
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict)  /**< note : ddict will just be referenced, and must outlive decompression session */
+{
+    size_t const initResult = ZSTD_initDStream(zds);
+    zds->ddict = ddict;
+    return initResult;
+}
+
+size_t ZSTD_resetDStream(ZSTD_DStream* zds)
+{
+    zds->stage = zdss_loadHeader;
+    zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+    zds->legacyVersion = 0;
+    zds->hostageByte = 0;
+    return ZSTD_frameHeaderSize_prefix;
+}
+
+size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds,
+                                ZSTD_DStreamParameter_e paramType, unsigned paramValue)
+{
+    switch(paramType)
+    {
+        default : return ERROR(parameter_unknown);
+        case ZSTDdsp_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break;
+    }
+    return 0;
+}
+
+
+size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds)
+{
+    if (zds==NULL) return 0;   /* support sizeof on NULL */
+    return sizeof(*zds) + ZSTD_sizeof_DCtx(zds->dctx) + ZSTD_sizeof_DDict(zds->ddictLocal) + zds->inBuffSize + zds->outBuffSize;
+}
+
+
+/* *****   Decompression   ***** */
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    size_t const length = MIN(dstCapacity, srcSize);
+    memcpy(dst, src, length);
+    return length;
+}
+
+
+size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    const char* const istart = (const char*)(input->src) + input->pos;
+    const char* const iend = (const char*)(input->src) + input->size;
+    const char* ip = istart;
+    char* const ostart = (char*)(output->dst) + output->pos;
+    char* const oend = (char*)(output->dst) + output->size;
+    char* op = ostart;
+    U32 someMoreWork = 1;
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+    if (zds->legacyVersion)
+        return ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
+#endif
+
+    while (someMoreWork) {
+        switch(zds->stage)
+        {
+        case zdss_init :
+            ZSTD_resetDStream(zds);   /* transparent reset on starting decoding a new frame */
+            /* fall-through */
+
+        case zdss_loadHeader :
+            {   size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
+                if (ZSTD_isError(hSize))
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+                {   U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
+                    if (legacyVersion) {
+                        const void* const dict = zds->ddict ? zds->ddict->dict : NULL;
+                        size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
+                        CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion,
+                                                       dict, dictSize));
+                        zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
+                        return ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
+                    } else {
+                        return hSize; /* error */
+                }   }
+#else
+                return hSize;
+#endif
+                if (hSize != 0) {   /* need more input */
+                    size_t const toLoad = hSize - zds->lhSize;   /* if hSize!=0, hSize > zds->lhSize */
+                    if (toLoad > (size_t)(iend-ip)) {   /* not enough input to load full header */
+                        memcpy(zds->headerBuffer + zds->lhSize, ip, iend-ip);
+                        zds->lhSize += iend-ip;
+                        input->pos = input->size;
+                        return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
+                    }
+                    memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
+                    break;
+            }   }
+
+            /* Consume header */
+            {   const ZSTD_DCtx* refContext = zds->ddict ? zds->ddict->refContext : NULL;
+                ZSTD_refDCtx(zds->dctx, refContext);
+            }
+            {   size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);  /* == ZSTD_frameHeaderSize_prefix */
+                CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
+                {   size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+                    CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer+h1Size, h2Size));
+            }   }
+
+            zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+            if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
+
+            /* Adapt buffer sizes to frame header instructions */
+            {   size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
+                size_t const neededOutSize = zds->fParams.windowSize + blockSize;
+                zds->blockSize = blockSize;
+                if (zds->inBuffSize < blockSize) {
+                    ZSTD_free(zds->inBuff, zds->customMem);
+                    zds->inBuffSize = blockSize;
+                    zds->inBuff = (char*)ZSTD_malloc(blockSize, zds->customMem);
+                    if (zds->inBuff == NULL) return ERROR(memory_allocation);
+                }
+                if (zds->outBuffSize < neededOutSize) {
+                    ZSTD_free(zds->outBuff, zds->customMem);
+                    zds->outBuffSize = neededOutSize;
+                    zds->outBuff = (char*)ZSTD_malloc(neededOutSize, zds->customMem);
+                    if (zds->outBuff == NULL) return ERROR(memory_allocation);
+            }   }
+            zds->stage = zdss_read;
+            /* pass-through */
+
+        case zdss_read:
+            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+                if (neededInSize==0) {  /* end of frame */
+                    zds->stage = zdss_init;
+                    someMoreWork = 0;
+                    break;
+                }
+                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
+                    const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
+                    size_t const decodedSize = ZSTD_decompressContinue(zds->dctx,
+                        zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart),
+                        ip, neededInSize);
+                    if (ZSTD_isError(decodedSize)) return decodedSize;
+                    ip += neededInSize;
+                    if (!decodedSize && !isSkipFrame) break;   /* this was just a header */
+                    zds->outEnd = zds->outStart + decodedSize;
+                    zds->stage = zdss_flush;
+                    break;
+                }
+                if (ip==iend) { someMoreWork = 0; break; }   /* no more input */
+                zds->stage = zdss_load;
+                /* pass-through */
+            }
+
+        case zdss_load:
+            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+                size_t const toLoad = neededInSize - zds->inPos;   /* should always be <= remaining space within inBuff */
+                size_t loadedSize;
+                if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected);   /* should never happen */
+                loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
+                ip += loadedSize;
+                zds->inPos += loadedSize;
+                if (loadedSize < toLoad) { someMoreWork = 0; break; }   /* not enough input, wait for more */
+
+                /* decode loaded input */
+                {  const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
+                   size_t const decodedSize = ZSTD_decompressContinue(zds->dctx,
+                        zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
+                        zds->inBuff, neededInSize);
+                    if (ZSTD_isError(decodedSize)) return decodedSize;
+                    zds->inPos = 0;   /* input is consumed */
+                    if (!decodedSize && !isSkipFrame) { zds->stage = zdss_read; break; }   /* this was just a header */
+                    zds->outEnd = zds->outStart +  decodedSize;
+                    zds->stage = zdss_flush;
+                    /* pass-through */
+            }   }
+
+        case zdss_flush:
+            {   size_t const toFlushSize = zds->outEnd - zds->outStart;
+                size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
+                op += flushedSize;
+                zds->outStart += flushedSize;
+                if (flushedSize == toFlushSize) {  /* flush completed */
+                    zds->stage = zdss_read;
+                    if (zds->outStart + zds->blockSize > zds->outBuffSize)
+                        zds->outStart = zds->outEnd = 0;
+                    break;
+                }
+                /* cannot complete flush */
+                someMoreWork = 0;
+                break;
+            }
+        default: return ERROR(GENERIC);   /* impossible */
+    }   }
+
+    /* result */
+    input->pos += (size_t)(ip-istart);
+    output->pos += (size_t)(op-ostart);
+    {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+        if (!nextSrcSizeHint) {   /* frame fully decoded */
+            if (zds->outEnd == zds->outStart) {  /* output fully flushed */
+                if (zds->hostageByte) {
+                    if (input->pos >= input->size) { zds->stage = zdss_read; return 1; }  /* can't release hostage (not present) */
+                    input->pos++;  /* release hostage */
+                }
+                return 0;
+            }
+            if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
+                input->pos--;   /* note : pos > 0, otherwise, impossible to finish reading last block */
+                zds->hostageByte=1;
+            }
+            return 1;
+        }
+        nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block);   /* preload header of next block */
+        if (zds->inPos > nextSrcSizeHint) return ERROR(GENERIC);   /* should never happen */
+        nextSrcSizeHint -= zds->inPos;   /* already loaded*/
+        return nextSrcSizeHint;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/dictBuilder/divsufsort.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,1913 @@
+/*
+ * divsufsort.c for libdivsufsort-lite
+ * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*- Compiler specifics -*/
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wshorten-64-to-32"
+#endif
+
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4244)
+#  pragma warning(disable : 4127)    /* C4127 : Condition expression is constant */
+#endif
+
+
+/*- Dependencies -*/
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "divsufsort.h"
+
+/*- Constants -*/
+#if defined(INLINE)
+# undef INLINE
+#endif
+#if !defined(INLINE)
+# define INLINE __inline
+#endif
+#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)
+# undef ALPHABET_SIZE
+#endif
+#if !defined(ALPHABET_SIZE)
+# define ALPHABET_SIZE (256)
+#endif
+#define BUCKET_A_SIZE (ALPHABET_SIZE)
+#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)
+#if defined(SS_INSERTIONSORT_THRESHOLD)
+# if SS_INSERTIONSORT_THRESHOLD < 1
+#  undef SS_INSERTIONSORT_THRESHOLD
+#  define SS_INSERTIONSORT_THRESHOLD (1)
+# endif
+#else
+# define SS_INSERTIONSORT_THRESHOLD (8)
+#endif
+#if defined(SS_BLOCKSIZE)
+# if SS_BLOCKSIZE < 0
+#  undef SS_BLOCKSIZE
+#  define SS_BLOCKSIZE (0)
+# elif 32768 <= SS_BLOCKSIZE
+#  undef SS_BLOCKSIZE
+#  define SS_BLOCKSIZE (32767)
+# endif
+#else
+# define SS_BLOCKSIZE (1024)
+#endif
+/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */
+#if SS_BLOCKSIZE == 0
+# define SS_MISORT_STACKSIZE (96)
+#elif SS_BLOCKSIZE <= 4096
+# define SS_MISORT_STACKSIZE (16)
+#else
+# define SS_MISORT_STACKSIZE (24)
+#endif
+#define SS_SMERGE_STACKSIZE (32)
+#define TR_INSERTIONSORT_THRESHOLD (8)
+#define TR_STACKSIZE (64)
+
+
+/*- Macros -*/
+#ifndef SWAP
+# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
+#endif /* SWAP */
+#ifndef MIN
+# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
+#endif /* MIN */
+#ifndef MAX
+# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
+#endif /* MAX */
+#define STACK_PUSH(_a, _b, _c, _d)\
+  do {\
+    assert(ssize < STACK_SIZE);\
+    stack[ssize].a = (_a), stack[ssize].b = (_b),\
+    stack[ssize].c = (_c), stack[ssize++].d = (_d);\
+  } while(0)
+#define STACK_PUSH5(_a, _b, _c, _d, _e)\
+  do {\
+    assert(ssize < STACK_SIZE);\
+    stack[ssize].a = (_a), stack[ssize].b = (_b),\
+    stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
+  } while(0)
+#define STACK_POP(_a, _b, _c, _d)\
+  do {\
+    assert(0 <= ssize);\
+    if(ssize == 0) { return; }\
+    (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
+    (_c) = stack[ssize].c, (_d) = stack[ssize].d;\
+  } while(0)
+#define STACK_POP5(_a, _b, _c, _d, _e)\
+  do {\
+    assert(0 <= ssize);\
+    if(ssize == 0) { return; }\
+    (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
+    (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
+  } while(0)
+#define BUCKET_A(_c0) bucket_A[(_c0)]
+#if ALPHABET_SIZE == 256
+#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])
+#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])
+#else
+#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])
+#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])
+#endif
+
+
+/*- Private Functions -*/
+
+static const int lg_table[256]= {
+ -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+  5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
+};
+
+#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
+
+static INLINE
+int
+ss_ilg(int n) {
+#if SS_BLOCKSIZE == 0
+  return (n & 0xffff0000) ?
+          ((n & 0xff000000) ?
+            24 + lg_table[(n >> 24) & 0xff] :
+            16 + lg_table[(n >> 16) & 0xff]) :
+          ((n & 0x0000ff00) ?
+             8 + lg_table[(n >>  8) & 0xff] :
+             0 + lg_table[(n >>  0) & 0xff]);
+#elif SS_BLOCKSIZE < 256
+  return lg_table[n];
+#else
+  return (n & 0xff00) ?
+          8 + lg_table[(n >> 8) & 0xff] :
+          0 + lg_table[(n >> 0) & 0xff];
+#endif
+}
+
+#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
+
+#if SS_BLOCKSIZE != 0
+
+static const int sqq_table[256] = {
+  0,  16,  22,  27,  32,  35,  39,  42,  45,  48,  50,  53,  55,  57,  59,  61,
+ 64,  65,  67,  69,  71,  73,  75,  76,  78,  80,  81,  83,  84,  86,  87,  89,
+ 90,  91,  93,  94,  96,  97,  98,  99, 101, 102, 103, 104, 106, 107, 108, 109,
+110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,
+156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,
+169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,
+181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,
+192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
+202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,
+212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,
+221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,
+230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,
+239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,
+247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255
+};
+
+static INLINE
+int
+ss_isqrt(int x) {
+  int y, e;
+
+  if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }
+  e = (x & 0xffff0000) ?
+        ((x & 0xff000000) ?
+          24 + lg_table[(x >> 24) & 0xff] :
+          16 + lg_table[(x >> 16) & 0xff]) :
+        ((x & 0x0000ff00) ?
+           8 + lg_table[(x >>  8) & 0xff] :
+           0 + lg_table[(x >>  0) & 0xff]);
+
+  if(e >= 16) {
+    y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
+    if(e >= 24) { y = (y + 1 + x / y) >> 1; }
+    y = (y + 1 + x / y) >> 1;
+  } else if(e >= 8) {
+    y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
+  } else {
+    return sqq_table[x] >> 4;
+  }
+
+  return (x < (y * y)) ? y - 1 : y;
+}
+
+#endif /* SS_BLOCKSIZE != 0 */
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Compares two suffixes. */
+static INLINE
+int
+ss_compare(const unsigned char *T,
+           const int *p1, const int *p2,
+           int depth) {
+  const unsigned char *U1, *U2, *U1n, *U2n;
+
+  for(U1 = T + depth + *p1,
+      U2 = T + depth + *p2,
+      U1n = T + *(p1 + 1) + 2,
+      U2n = T + *(p2 + 1) + 2;
+      (U1 < U1n) && (U2 < U2n) && (*U1 == *U2);
+      ++U1, ++U2) {
+  }
+
+  return U1 < U1n ?
+        (U2 < U2n ? *U1 - *U2 : 1) :
+        (U2 < U2n ? -1 : 0);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)
+
+/* Insertionsort for small size groups */
+static
+void
+ss_insertionsort(const unsigned char *T, const int *PA,
+                 int *first, int *last, int depth) {
+  int *i, *j;
+  int t;
+  int r;
+
+  for(i = last - 2; first <= i; --i) {
+    for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {
+      do { *(j - 1) = *j; } while((++j < last) && (*j < 0));
+      if(last <= j) { break; }
+    }
+    if(r == 0) { *j = ~*j; }
+    *(j - 1) = t;
+  }
+}
+
+#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */
+
+
+/*---------------------------------------------------------------------------*/
+
+#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
+
+static INLINE
+void
+ss_fixdown(const unsigned char *Td, const int *PA,
+           int *SA, int i, int size) {
+  int j, k;
+  int v;
+  int c, d, e;
+
+  for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
+    d = Td[PA[SA[k = j++]]];
+    if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; }
+    if(d <= c) { break; }
+  }
+  SA[i] = v;
+}
+
+/* Simple top-down heapsort. */
+static
+void
+ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {
+  int i, m;
+  int t;
+
+  m = size;
+  if((size % 2) == 0) {
+    m--;
+    if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }
+  }
+
+  for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }
+  if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); }
+  for(i = m - 1; 0 < i; --i) {
+    t = SA[0], SA[0] = SA[i];
+    ss_fixdown(Td, PA, SA, 0, i);
+    SA[i] = t;
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Returns the median of three elements. */
+static INLINE
+int *
+ss_median3(const unsigned char *Td, const int *PA,
+           int *v1, int *v2, int *v3) {
+  int *t;
+  if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }
+  if(Td[PA[*v2]] > Td[PA[*v3]]) {
+    if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }
+    else { return v3; }
+  }
+  return v2;
+}
+
+/* Returns the median of five elements. */
+static INLINE
+int *
+ss_median5(const unsigned char *Td, const int *PA,
+           int *v1, int *v2, int *v3, int *v4, int *v5) {
+  int *t;
+  if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }
+  if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }
+  if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); }
+  if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }
+  if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); }
+  if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }
+  return v3;
+}
+
+/* Returns the pivot element. */
+static INLINE
+int *
+ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {
+  int *middle;
+  int t;
+
+  t = last - first;
+  middle = first + t / 2;
+
+  if(t <= 512) {
+    if(t <= 32) {
+      return ss_median3(Td, PA, first, middle, last - 1);
+    } else {
+      t >>= 2;
+      return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
+    }
+  }
+  t >>= 3;
+  first  = ss_median3(Td, PA, first, first + t, first + (t << 1));
+  middle = ss_median3(Td, PA, middle - t, middle, middle + t);
+  last   = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
+  return ss_median3(Td, PA, first, middle, last);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Binary partition for substrings. */
+static INLINE
+int *
+ss_partition(const int *PA,
+                    int *first, int *last, int depth) {
+  int *a, *b;
+  int t;
+  for(a = first - 1, b = last;;) {
+    for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }
+    for(; (a < --b) && ((PA[*b] + depth) <  (PA[*b + 1] + 1));) { }
+    if(b <= a) { break; }
+    t = ~*b;
+    *b = *a;
+    *a = t;
+  }
+  if(first < a) { *first = ~*first; }
+  return a;
+}
+
+/* Multikey introsort for medium size groups. */
+static
+void
+ss_mintrosort(const unsigned char *T, const int *PA,
+              int *first, int *last,
+              int depth) {
+#define STACK_SIZE SS_MISORT_STACKSIZE
+  struct { int *a, *b, c; int d; } stack[STACK_SIZE];
+  const unsigned char *Td;
+  int *a, *b, *c, *d, *e, *f;
+  int s, t;
+  int ssize;
+  int limit;
+  int v, x = 0;
+
+  for(ssize = 0, limit = ss_ilg(last - first);;) {
+
+    if((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
+#if 1 < SS_INSERTIONSORT_THRESHOLD
+      if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }
+#endif
+      STACK_POP(first, last, depth, limit);
+      continue;
+    }
+
+    Td = T + depth;
+    if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }
+    if(limit < 0) {
+      for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) {
+        if((x = Td[PA[*a]]) != v) {
+          if(1 < (a - first)) { break; }
+          v = x;
+          first = a;
+        }
+      }
+      if(Td[PA[*first] - 1] < v) {
+        first = ss_partition(PA, first, a, depth);
+      }
+      if((a - first) <= (last - a)) {
+        if(1 < (a - first)) {
+          STACK_PUSH(a, last, depth, -1);
+          last = a, depth += 1, limit = ss_ilg(a - first);
+        } else {
+          first = a, limit = -1;
+        }
+      } else {
+        if(1 < (last - a)) {
+          STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));
+          first = a, limit = -1;
+        } else {
+          last = a, depth += 1, limit = ss_ilg(a - first);
+        }
+      }
+      continue;
+    }
+
+    /* choose pivot */
+    a = ss_pivot(Td, PA, first, last);
+    v = Td[PA[*a]];
+    SWAP(*first, *a);
+
+    /* partition */
+    for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { }
+    if(((a = b) < last) && (x < v)) {
+      for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) {
+        if(x == v) { SWAP(*b, *a); ++a; }
+      }
+    }
+    for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { }
+    if((b < (d = c)) && (x > v)) {
+      for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
+        if(x == v) { SWAP(*c, *d); --d; }
+      }
+    }
+    for(; b < c;) {
+      SWAP(*b, *c);
+      for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) {
+        if(x == v) { SWAP(*b, *a); ++a; }
+      }
+      for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
+        if(x == v) { SWAP(*c, *d); --d; }
+      }
+    }
+
+    if(a <= d) {
+      c = b - 1;
+
+      if((s = a - first) > (t = b - a)) { s = t; }
+      for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+      if((s = d - c) > (t = last - d - 1)) { s = t; }
+      for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+
+      a = first + (b - a), c = last - (d - c);
+      b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);
+
+      if((a - first) <= (last - c)) {
+        if((last - c) <= (c - b)) {
+          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+          STACK_PUSH(c, last, depth, limit);
+          last = a;
+        } else if((a - first) <= (c - b)) {
+          STACK_PUSH(c, last, depth, limit);
+          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+          last = a;
+        } else {
+          STACK_PUSH(c, last, depth, limit);
+          STACK_PUSH(first, a, depth, limit);
+          first = b, last = c, depth += 1, limit = ss_ilg(c - b);
+        }
+      } else {
+        if((a - first) <= (c - b)) {
+          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+          STACK_PUSH(first, a, depth, limit);
+          first = c;
+        } else if((last - c) <= (c - b)) {
+          STACK_PUSH(first, a, depth, limit);
+          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+          first = c;
+        } else {
+          STACK_PUSH(first, a, depth, limit);
+          STACK_PUSH(c, last, depth, limit);
+          first = b, last = c, depth += 1, limit = ss_ilg(c - b);
+        }
+      }
+    } else {
+      limit += 1;
+      if(Td[PA[*first] - 1] < v) {
+        first = ss_partition(PA, first, last, depth);
+        limit = ss_ilg(last - first);
+      }
+      depth += 1;
+    }
+  }
+#undef STACK_SIZE
+}
+
+#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
+
+
+/*---------------------------------------------------------------------------*/
+
+#if SS_BLOCKSIZE != 0
+
+static INLINE
+void
+ss_blockswap(int *a, int *b, int n) {
+  int t;
+  for(; 0 < n; --n, ++a, ++b) {
+    t = *a, *a = *b, *b = t;
+  }
+}
+
+static INLINE
+void
+ss_rotate(int *first, int *middle, int *last) {
+  int *a, *b, t;
+  int l, r;
+  l = middle - first, r = last - middle;
+  for(; (0 < l) && (0 < r);) {
+    if(l == r) { ss_blockswap(first, middle, l); break; }
+    if(l < r) {
+      a = last - 1, b = middle - 1;
+      t = *a;
+      do {
+        *a-- = *b, *b-- = *a;
+        if(b < first) {
+          *a = t;
+          last = a;
+          if((r -= l + 1) <= l) { break; }
+          a -= 1, b = middle - 1;
+          t = *a;
+        }
+      } while(1);
+    } else {
+      a = first, b = middle;
+      t = *a;
+      do {
+        *a++ = *b, *b++ = *a;
+        if(last <= b) {
+          *a = t;
+          first = a + 1;
+          if((l -= r + 1) <= r) { break; }
+          a += 1, b = middle;
+          t = *a;
+        }
+      } while(1);
+    }
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static
+void
+ss_inplacemerge(const unsigned char *T, const int *PA,
+                int *first, int *middle, int *last,
+                int depth) {
+  const int *p;
+  int *a, *b;
+  int len, half;
+  int q, r;
+  int x;
+
+  for(;;) {
+    if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); }
+    else                { x = 0; p = PA +  *(last - 1); }
+    for(a = first, len = middle - first, half = len >> 1, r = -1;
+        0 < len;
+        len = half, half >>= 1) {
+      b = a + half;
+      q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);
+      if(q < 0) {
+        a = b + 1;
+        half -= (len & 1) ^ 1;
+      } else {
+        r = q;
+      }
+    }
+    if(a < middle) {
+      if(r == 0) { *a = ~*a; }
+      ss_rotate(a, middle, last);
+      last -= middle - a;
+      middle = a;
+      if(first == middle) { break; }
+    }
+    --last;
+    if(x != 0) { while(*--last < 0) { } }
+    if(middle == last) { break; }
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Merge-forward with internal buffer. */
+static
+void
+ss_mergeforward(const unsigned char *T, const int *PA,
+                int *first, int *middle, int *last,
+                int *buf, int depth) {
+  int *a, *b, *c, *bufend;
+  int t;
+  int r;
+
+  bufend = buf + (middle - first) - 1;
+  ss_blockswap(buf, first, middle - first);
+
+  for(t = *(a = first), b = buf, c = middle;;) {
+    r = ss_compare(T, PA + *b, PA + *c, depth);
+    if(r < 0) {
+      do {
+        *a++ = *b;
+        if(bufend <= b) { *bufend = t; return; }
+        *b++ = *a;
+      } while(*b < 0);
+    } else if(r > 0) {
+      do {
+        *a++ = *c, *c++ = *a;
+        if(last <= c) {
+          while(b < bufend) { *a++ = *b, *b++ = *a; }
+          *a = *b, *b = t;
+          return;
+        }
+      } while(*c < 0);
+    } else {
+      *c = ~*c;
+      do {
+        *a++ = *b;
+        if(bufend <= b) { *bufend = t; return; }
+        *b++ = *a;
+      } while(*b < 0);
+
+      do {
+        *a++ = *c, *c++ = *a;
+        if(last <= c) {
+          while(b < bufend) { *a++ = *b, *b++ = *a; }
+          *a = *b, *b = t;
+          return;
+        }
+      } while(*c < 0);
+    }
+  }
+}
+
+/* Merge-backward with internal buffer. */
+static
+void
+ss_mergebackward(const unsigned char *T, const int *PA,
+                 int *first, int *middle, int *last,
+                 int *buf, int depth) {
+  const int *p1, *p2;
+  int *a, *b, *c, *bufend;
+  int t;
+  int r;
+  int x;
+
+  bufend = buf + (last - middle) - 1;
+  ss_blockswap(buf, middle, last - middle);
+
+  x = 0;
+  if(*bufend < 0)       { p1 = PA + ~*bufend; x |= 1; }
+  else                  { p1 = PA +  *bufend; }
+  if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; }
+  else                  { p2 = PA +  *(middle - 1); }
+  for(t = *(a = last - 1), b = bufend, c = middle - 1;;) {
+    r = ss_compare(T, p1, p2, depth);
+    if(0 < r) {
+      if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
+      *a-- = *b;
+      if(b <= buf) { *buf = t; break; }
+      *b-- = *a;
+      if(*b < 0) { p1 = PA + ~*b; x |= 1; }
+      else       { p1 = PA +  *b; }
+    } else if(r < 0) {
+      if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
+      *a-- = *c, *c-- = *a;
+      if(c < first) {
+        while(buf < b) { *a-- = *b, *b-- = *a; }
+        *a = *b, *b = t;
+        break;
+      }
+      if(*c < 0) { p2 = PA + ~*c; x |= 2; }
+      else       { p2 = PA +  *c; }
+    } else {
+      if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
+      *a-- = ~*b;
+      if(b <= buf) { *buf = t; break; }
+      *b-- = *a;
+      if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
+      *a-- = *c, *c-- = *a;
+      if(c < first) {
+        while(buf < b) { *a-- = *b, *b-- = *a; }
+        *a = *b, *b = t;
+        break;
+      }
+      if(*b < 0) { p1 = PA + ~*b; x |= 1; }
+      else       { p1 = PA +  *b; }
+      if(*c < 0) { p2 = PA + ~*c; x |= 2; }
+      else       { p2 = PA +  *c; }
+    }
+  }
+}
+
+/* D&C based merge. */
+static
+void
+ss_swapmerge(const unsigned char *T, const int *PA,
+             int *first, int *middle, int *last,
+             int *buf, int bufsize, int depth) {
+#define STACK_SIZE SS_SMERGE_STACKSIZE
+#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))
+#define MERGE_CHECK(a, b, c)\
+  do {\
+    if(((c) & 1) ||\
+       (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
+      *(a) = ~*(a);\
+    }\
+    if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
+      *(b) = ~*(b);\
+    }\
+  } while(0)
+  struct { int *a, *b, *c; int d; } stack[STACK_SIZE];
+  int *l, *r, *lm, *rm;
+  int m, len, half;
+  int ssize;
+  int check, next;
+
+  for(check = 0, ssize = 0;;) {
+    if((last - middle) <= bufsize) {
+      if((first < middle) && (middle < last)) {
+        ss_mergebackward(T, PA, first, middle, last, buf, depth);
+      }
+      MERGE_CHECK(first, last, check);
+      STACK_POP(first, middle, last, check);
+      continue;
+    }
+
+    if((middle - first) <= bufsize) {
+      if(first < middle) {
+        ss_mergeforward(T, PA, first, middle, last, buf, depth);
+      }
+      MERGE_CHECK(first, last, check);
+      STACK_POP(first, middle, last, check);
+      continue;
+    }
+
+    for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1;
+        0 < len;
+        len = half, half >>= 1) {
+      if(ss_compare(T, PA + GETIDX(*(middle + m + half)),
+                       PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {
+        m += half + 1;
+        half -= (len & 1) ^ 1;
+      }
+    }
+
+    if(0 < m) {
+      lm = middle - m, rm = middle + m;
+      ss_blockswap(lm, middle, m);
+      l = r = middle, next = 0;
+      if(rm < last) {
+        if(*rm < 0) {
+          *rm = ~*rm;
+          if(first < lm) { for(; *--l < 0;) { } next |= 4; }
+          next |= 1;
+        } else if(first < lm) {
+          for(; *r < 0; ++r) { }
+          next |= 2;
+        }
+      }
+
+      if((l - first) <= (last - r)) {
+        STACK_PUSH(r, rm, last, (next & 3) | (check & 4));
+        middle = lm, last = l, check = (check & 3) | (next & 4);
+      } else {
+        if((next & 2) && (r == middle)) { next ^= 6; }
+        STACK_PUSH(first, lm, l, (check & 3) | (next & 4));
+        first = r, middle = rm, check = (next & 3) | (check & 4);
+      }
+    } else {
+      if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {
+        *middle = ~*middle;
+      }
+      MERGE_CHECK(first, last, check);
+      STACK_POP(first, middle, last, check);
+    }
+  }
+#undef STACK_SIZE
+}
+
+#endif /* SS_BLOCKSIZE != 0 */
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Substring sort */
+static
+void
+sssort(const unsigned char *T, const int *PA,
+       int *first, int *last,
+       int *buf, int bufsize,
+       int depth, int n, int lastsuffix) {
+  int *a;
+#if SS_BLOCKSIZE != 0
+  int *b, *middle, *curbuf;
+  int j, k, curbufsize, limit;
+#endif
+  int i;
+
+  if(lastsuffix != 0) { ++first; }
+
+#if SS_BLOCKSIZE == 0
+  ss_mintrosort(T, PA, first, last, depth);
+#else
+  if((bufsize < SS_BLOCKSIZE) &&
+      (bufsize < (last - first)) &&
+      (bufsize < (limit = ss_isqrt(last - first)))) {
+    if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }
+    buf = middle = last - limit, bufsize = limit;
+  } else {
+    middle = last, limit = 0;
+  }
+  for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+    ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);
+#elif 1 < SS_BLOCKSIZE
+    ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);
+#endif
+    curbufsize = last - (a + SS_BLOCKSIZE);
+    curbuf = a + SS_BLOCKSIZE;
+    if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }
+    for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {
+      ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);
+    }
+  }
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+  ss_mintrosort(T, PA, a, middle, depth);
+#elif 1 < SS_BLOCKSIZE
+  ss_insertionsort(T, PA, a, middle, depth);
+#endif
+  for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
+    if(i & 1) {
+      ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);
+      a -= k;
+    }
+  }
+  if(limit != 0) {
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+    ss_mintrosort(T, PA, middle, last, depth);
+#elif 1 < SS_BLOCKSIZE
+    ss_insertionsort(T, PA, middle, last, depth);
+#endif
+    ss_inplacemerge(T, PA, first, middle, last, depth);
+  }
+#endif
+
+  if(lastsuffix != 0) {
+    /* Insert last type B* suffix. */
+    int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;
+    for(a = first, i = *(first - 1);
+        (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));
+        ++a) {
+      *(a - 1) = *a;
+    }
+    *(a - 1) = i;
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+int
+tr_ilg(int n) {
+  return (n & 0xffff0000) ?
+          ((n & 0xff000000) ?
+            24 + lg_table[(n >> 24) & 0xff] :
+            16 + lg_table[(n >> 16) & 0xff]) :
+          ((n & 0x0000ff00) ?
+             8 + lg_table[(n >>  8) & 0xff] :
+             0 + lg_table[(n >>  0) & 0xff]);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Simple insertionsort for small size groups. */
+static
+void
+tr_insertionsort(const int *ISAd, int *first, int *last) {
+  int *a, *b;
+  int t, r;
+
+  for(a = first + 1; a < last; ++a) {
+    for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
+      do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));
+      if(b < first) { break; }
+    }
+    if(r == 0) { *b = ~*b; }
+    *(b + 1) = t;
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+void
+tr_fixdown(const int *ISAd, int *SA, int i, int size) {
+  int j, k;
+  int v;
+  int c, d, e;
+
+  for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
+    d = ISAd[SA[k = j++]];
+    if(d < (e = ISAd[SA[j]])) { k = j; d = e; }
+    if(d <= c) { break; }
+  }
+  SA[i] = v;
+}
+
+/* Simple top-down heapsort. */
+static
+void
+tr_heapsort(const int *ISAd, int *SA, int size) {
+  int i, m;
+  int t;
+
+  m = size;
+  if((size % 2) == 0) {
+    m--;
+    if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
+  }
+
+  for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
+  if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }
+  for(i = m - 1; 0 < i; --i) {
+    t = SA[0], SA[0] = SA[i];
+    tr_fixdown(ISAd, SA, 0, i);
+    SA[i] = t;
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Returns the median of three elements. */
+static INLINE
+int *
+tr_median3(const int *ISAd, int *v1, int *v2, int *v3) {
+  int *t;
+  if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
+  if(ISAd[*v2] > ISAd[*v3]) {
+    if(ISAd[*v1] > ISAd[*v3]) { return v1; }
+    else { return v3; }
+  }
+  return v2;
+}
+
+/* Returns the median of five elements. */
+static INLINE
+int *
+tr_median5(const int *ISAd,
+           int *v1, int *v2, int *v3, int *v4, int *v5) {
+  int *t;
+  if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
+  if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
+  if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }
+  if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
+  if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }
+  if(ISAd[*v3] > ISAd[*v4]) { return v4; }
+  return v3;
+}
+
+/* Returns the pivot element. */
+static INLINE
+int *
+tr_pivot(const int *ISAd, int *first, int *last) {
+  int *middle;
+  int t;
+
+  t = last - first;
+  middle = first + t / 2;
+
+  if(t <= 512) {
+    if(t <= 32) {
+      return tr_median3(ISAd, first, middle, last - 1);
+    } else {
+      t >>= 2;
+      return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
+    }
+  }
+  t >>= 3;
+  first  = tr_median3(ISAd, first, first + t, first + (t << 1));
+  middle = tr_median3(ISAd, middle - t, middle, middle + t);
+  last   = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
+  return tr_median3(ISAd, first, middle, last);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+typedef struct _trbudget_t trbudget_t;
+struct _trbudget_t {
+  int chance;
+  int remain;
+  int incval;
+  int count;
+};
+
+static INLINE
+void
+trbudget_init(trbudget_t *budget, int chance, int incval) {
+  budget->chance = chance;
+  budget->remain = budget->incval = incval;
+}
+
+static INLINE
+int
+trbudget_check(trbudget_t *budget, int size) {
+  if(size <= budget->remain) { budget->remain -= size; return 1; }
+  if(budget->chance == 0) { budget->count += size; return 0; }
+  budget->remain += budget->incval - size;
+  budget->chance -= 1;
+  return 1;
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+void
+tr_partition(const int *ISAd,
+             int *first, int *middle, int *last,
+             int **pa, int **pb, int v) {
+  int *a, *b, *c, *d, *e, *f;
+  int t, s;
+  int x = 0;
+
+  for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }
+  if(((a = b) < last) && (x < v)) {
+    for(; (++b < last) && ((x = ISAd[*b]) <= v);) {
+      if(x == v) { SWAP(*b, *a); ++a; }
+    }
+  }
+  for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }
+  if((b < (d = c)) && (x > v)) {
+    for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
+      if(x == v) { SWAP(*c, *d); --d; }
+    }
+  }
+  for(; b < c;) {
+    SWAP(*b, *c);
+    for(; (++b < c) && ((x = ISAd[*b]) <= v);) {
+      if(x == v) { SWAP(*b, *a); ++a; }
+    }
+    for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
+      if(x == v) { SWAP(*c, *d); --d; }
+    }
+  }
+
+  if(a <= d) {
+    c = b - 1;
+    if((s = a - first) > (t = b - a)) { s = t; }
+    for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+    if((s = d - c) > (t = last - d - 1)) { s = t; }
+    for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+    first += (b - a), last -= (d - c);
+  }
+  *pa = first, *pb = last;
+}
+
+static
+void
+tr_copy(int *ISA, const int *SA,
+        int *first, int *a, int *b, int *last,
+        int depth) {
+  /* sort suffixes of middle partition
+     by using sorted order of suffixes of left and right partition. */
+  int *c, *d, *e;
+  int s, v;
+
+  v = b - SA - 1;
+  for(c = first, d = a - 1; c <= d; ++c) {
+    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+      *++d = s;
+      ISA[s] = d - SA;
+    }
+  }
+  for(c = last - 1, e = d + 1, d = b; e < d; --c) {
+    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+      *--d = s;
+      ISA[s] = d - SA;
+    }
+  }
+}
+
+static
+void
+tr_partialcopy(int *ISA, const int *SA,
+               int *first, int *a, int *b, int *last,
+               int depth) {
+  int *c, *d, *e;
+  int s, v;
+  int rank, lastrank, newrank = -1;
+
+  v = b - SA - 1;
+  lastrank = -1;
+  for(c = first, d = a - 1; c <= d; ++c) {
+    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+      *++d = s;
+      rank = ISA[s + depth];
+      if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
+      ISA[s] = newrank;
+    }
+  }
+
+  lastrank = -1;
+  for(e = d; first <= e; --e) {
+    rank = ISA[*e];
+    if(lastrank != rank) { lastrank = rank; newrank = e - SA; }
+    if(newrank != rank) { ISA[*e] = newrank; }
+  }
+
+  lastrank = -1;
+  for(c = last - 1, e = d + 1, d = b; e < d; --c) {
+    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+      *--d = s;
+      rank = ISA[s + depth];
+      if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
+      ISA[s] = newrank;
+    }
+  }
+}
+
+static
+void
+tr_introsort(int *ISA, const int *ISAd,
+             int *SA, int *first, int *last,
+             trbudget_t *budget) {
+#define STACK_SIZE TR_STACKSIZE
+  struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE];
+  int *a, *b, *c;
+  int t;
+  int v, x = 0;
+  int incr = ISAd - ISA;
+  int limit, next;
+  int ssize, trlink = -1;
+
+  for(ssize = 0, limit = tr_ilg(last - first);;) {
+
+    if(limit < 0) {
+      if(limit == -1) {
+        /* tandem repeat partition */
+        tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
+
+        /* update ranks */
+        if(a < last) {
+          for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
+        }
+        if(b < last) {
+          for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
+        }
+
+        /* push */
+        if(1 < (b - a)) {
+          STACK_PUSH5(NULL, a, b, 0, 0);
+          STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
+          trlink = ssize - 2;
+        }
+        if((a - first) <= (last - b)) {
+          if(1 < (a - first)) {
+            STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
+            last = a, limit = tr_ilg(a - first);
+          } else if(1 < (last - b)) {
+            first = b, limit = tr_ilg(last - b);
+          } else {
+            STACK_POP5(ISAd, first, last, limit, trlink);
+          }
+        } else {
+          if(1 < (last - b)) {
+            STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
+            first = b, limit = tr_ilg(last - b);
+          } else if(1 < (a - first)) {
+            last = a, limit = tr_ilg(a - first);
+          } else {
+            STACK_POP5(ISAd, first, last, limit, trlink);
+          }
+        }
+      } else if(limit == -2) {
+        /* tandem repeat copy */
+        a = stack[--ssize].b, b = stack[ssize].c;
+        if(stack[ssize].d == 0) {
+          tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
+        } else {
+          if(0 <= trlink) { stack[trlink].d = -1; }
+          tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
+        }
+        STACK_POP5(ISAd, first, last, limit, trlink);
+      } else {
+        /* sorted partition */
+        if(0 <= *first) {
+          a = first;
+          do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));
+          first = a;
+        }
+        if(first < last) {
+          a = first; do { *a = ~*a; } while(*++a < 0);
+          next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
+          if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }
+
+          /* push */
+          if(trbudget_check(budget, a - first)) {
+            if((a - first) <= (last - a)) {
+              STACK_PUSH5(ISAd, a, last, -3, trlink);
+              ISAd += incr, last = a, limit = next;
+            } else {
+              if(1 < (last - a)) {
+                STACK_PUSH5(ISAd + incr, first, a, next, trlink);
+                first = a, limit = -3;
+              } else {
+                ISAd += incr, last = a, limit = next;
+              }
+            }
+          } else {
+            if(0 <= trlink) { stack[trlink].d = -1; }
+            if(1 < (last - a)) {
+              first = a, limit = -3;
+            } else {
+              STACK_POP5(ISAd, first, last, limit, trlink);
+            }
+          }
+        } else {
+          STACK_POP5(ISAd, first, last, limit, trlink);
+        }
+      }
+      continue;
+    }
+
+    if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
+      tr_insertionsort(ISAd, first, last);
+      limit = -3;
+      continue;
+    }
+
+    if(limit-- == 0) {
+      tr_heapsort(ISAd, first, last - first);
+      for(a = last - 1; first < a; a = b) {
+        for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
+      }
+      limit = -3;
+      continue;
+    }
+
+    /* choose pivot */
+    a = tr_pivot(ISAd, first, last);
+    SWAP(*first, *a);
+    v = ISAd[*first];
+
+    /* partition */
+    tr_partition(ISAd, first, first + 1, last, &a, &b, v);
+    if((last - first) != (b - a)) {
+      next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
+
+      /* update ranks */
+      for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
+      if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }
+
+      /* push */
+      if((1 < (b - a)) && (trbudget_check(budget, b - a))) {
+        if((a - first) <= (last - b)) {
+          if((last - b) <= (b - a)) {
+            if(1 < (a - first)) {
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              STACK_PUSH5(ISAd, b, last, limit, trlink);
+              last = a;
+            } else if(1 < (last - b)) {
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              first = b;
+            } else {
+              ISAd += incr, first = a, last = b, limit = next;
+            }
+          } else if((a - first) <= (b - a)) {
+            if(1 < (a - first)) {
+              STACK_PUSH5(ISAd, b, last, limit, trlink);
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              last = a;
+            } else {
+              STACK_PUSH5(ISAd, b, last, limit, trlink);
+              ISAd += incr, first = a, last = b, limit = next;
+            }
+          } else {
+            STACK_PUSH5(ISAd, b, last, limit, trlink);
+            STACK_PUSH5(ISAd, first, a, limit, trlink);
+            ISAd += incr, first = a, last = b, limit = next;
+          }
+        } else {
+          if((a - first) <= (b - a)) {
+            if(1 < (last - b)) {
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              STACK_PUSH5(ISAd, first, a, limit, trlink);
+              first = b;
+            } else if(1 < (a - first)) {
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              last = a;
+            } else {
+              ISAd += incr, first = a, last = b, limit = next;
+            }
+          } else if((last - b) <= (b - a)) {
+            if(1 < (last - b)) {
+              STACK_PUSH5(ISAd, first, a, limit, trlink);
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              first = b;
+            } else {
+              STACK_PUSH5(ISAd, first, a, limit, trlink);
+              ISAd += incr, first = a, last = b, limit = next;
+            }
+          } else {
+            STACK_PUSH5(ISAd, first, a, limit, trlink);
+            STACK_PUSH5(ISAd, b, last, limit, trlink);
+            ISAd += incr, first = a, last = b, limit = next;
+          }
+        }
+      } else {
+        if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
+        if((a - first) <= (last - b)) {
+          if(1 < (a - first)) {
+            STACK_PUSH5(ISAd, b, last, limit, trlink);
+            last = a;
+          } else if(1 < (last - b)) {
+            first = b;
+          } else {
+            STACK_POP5(ISAd, first, last, limit, trlink);
+          }
+        } else {
+          if(1 < (last - b)) {
+            STACK_PUSH5(ISAd, first, a, limit, trlink);
+            first = b;
+          } else if(1 < (a - first)) {
+            last = a;
+          } else {
+            STACK_POP5(ISAd, first, last, limit, trlink);
+          }
+        }
+      }
+    } else {
+      if(trbudget_check(budget, last - first)) {
+        limit = tr_ilg(last - first), ISAd += incr;
+      } else {
+        if(0 <= trlink) { stack[trlink].d = -1; }
+        STACK_POP5(ISAd, first, last, limit, trlink);
+      }
+    }
+  }
+#undef STACK_SIZE
+}
+
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Tandem repeat sort */
+static
+void
+trsort(int *ISA, int *SA, int n, int depth) {
+  int *ISAd;
+  int *first, *last;
+  trbudget_t budget;
+  int t, skip, unsorted;
+
+  trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
+/*  trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
+  for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
+    first = SA;
+    skip = 0;
+    unsorted = 0;
+    do {
+      if((t = *first) < 0) { first -= t; skip += t; }
+      else {
+        if(skip != 0) { *(first + skip) = skip; skip = 0; }
+        last = SA + ISA[t] + 1;
+        if(1 < (last - first)) {
+          budget.count = 0;
+          tr_introsort(ISA, ISAd, SA, first, last, &budget);
+          if(budget.count != 0) { unsorted += budget.count; }
+          else { skip = first - last; }
+        } else if((last - first) == 1) {
+          skip = -1;
+        }
+        first = last;
+      }
+    } while(first < (SA + n));
+    if(skip != 0) { *(first + skip) = skip; }
+    if(unsorted == 0) { break; }
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Sorts suffixes of type B*. */
+static
+int
+sort_typeBstar(const unsigned char *T, int *SA,
+               int *bucket_A, int *bucket_B,
+               int n, int openMP) {
+  int *PAb, *ISAb, *buf;
+#ifdef LIBBSC_OPENMP
+  int *curbuf;
+  int l;
+#endif
+  int i, j, k, t, m, bufsize;
+  int c0, c1;
+#ifdef LIBBSC_OPENMP
+  int d0, d1;
+#endif
+  (void)openMP;
+
+  /* Initialize bucket arrays. */
+  for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
+  for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
+
+  /* Count the number of occurrences of the first one or two characters of each
+     type A, B and B* suffix. Moreover, store the beginning position of all
+     type B* suffixes into the array SA. */
+  for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
+    /* type A suffix. */
+    do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
+    if(0 <= i) {
+      /* type B* suffix. */
+      ++BUCKET_BSTAR(c0, c1);
+      SA[--m] = i;
+      /* type B suffix. */
+      for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
+        ++BUCKET_B(c0, c1);
+      }
+    }
+  }
+  m = n - m;
+/*
+note:
+  A type B* suffix is lexicographically smaller than a type B suffix that
+  begins with the same first two characters.
+*/
+
+  /* Calculate the index of start/end point of each bucket. */
+  for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
+    t = i + BUCKET_A(c0);
+    BUCKET_A(c0) = i + j; /* start point */
+    i = t + BUCKET_B(c0, c0);
+    for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
+      j += BUCKET_BSTAR(c0, c1);
+      BUCKET_BSTAR(c0, c1) = j; /* end point */
+      i += BUCKET_B(c0, c1);
+    }
+  }
+
+  if(0 < m) {
+    /* Sort the type B* suffixes by their first two characters. */
+    PAb = SA + n - m; ISAb = SA + m;
+    for(i = m - 2; 0 <= i; --i) {
+      t = PAb[i], c0 = T[t], c1 = T[t + 1];
+      SA[--BUCKET_BSTAR(c0, c1)] = i;
+    }
+    t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
+    SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
+
+    /* Sort the type B* substrings using sssort. */
+#ifdef LIBBSC_OPENMP
+    if (openMP)
+    {
+        buf = SA + m;
+        c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
+#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1)
+        {
+          bufsize = (n - (2 * m)) / omp_get_num_threads();
+          curbuf = buf + omp_get_thread_num() * bufsize;
+          k = 0;
+          for(;;) {
+            #pragma omp critical(sssort_lock)
+            {
+              if(0 < (l = j)) {
+                d0 = c0, d1 = c1;
+                do {
+                  k = BUCKET_BSTAR(d0, d1);
+                  if(--d1 <= d0) {
+                    d1 = ALPHABET_SIZE - 1;
+                    if(--d0 < 0) { break; }
+                  }
+                } while(((l - k) <= 1) && (0 < (l = k)));
+                c0 = d0, c1 = d1, j = k;
+              }
+            }
+            if(l == 0) { break; }
+            sssort(T, PAb, SA + k, SA + l,
+                   curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
+          }
+        }
+    }
+    else
+    {
+        buf = SA + m, bufsize = n - (2 * m);
+        for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
+          for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
+            i = BUCKET_BSTAR(c0, c1);
+            if(1 < (j - i)) {
+              sssort(T, PAb, SA + i, SA + j,
+                     buf, bufsize, 2, n, *(SA + i) == (m - 1));
+            }
+          }
+        }
+    }
+#else
+    buf = SA + m, bufsize = n - (2 * m);
+    for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
+      for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
+        i = BUCKET_BSTAR(c0, c1);
+        if(1 < (j - i)) {
+          sssort(T, PAb, SA + i, SA + j,
+                 buf, bufsize, 2, n, *(SA + i) == (m - 1));
+        }
+      }
+    }
+#endif
+
+    /* Compute ranks of type B* substrings. */
+    for(i = m - 1; 0 <= i; --i) {
+      if(0 <= SA[i]) {
+        j = i;
+        do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
+        SA[i + 1] = i - j;
+        if(i <= 0) { break; }
+      }
+      j = i;
+      do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
+      ISAb[SA[i]] = j;
+    }
+
+    /* Construct the inverse suffix array of type B* suffixes using trsort. */
+    trsort(ISAb, SA, m, 1);
+
+    /* Set the sorted order of tyoe B* suffixes. */
+    for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
+      for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
+      if(0 <= i) {
+        t = i;
+        for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
+        SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
+      }
+    }
+
+    /* Calculate the index of start/end point of each bucket. */
+    BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
+    for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
+      i = BUCKET_A(c0 + 1) - 1;
+      for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
+        t = i - BUCKET_B(c0, c1);
+        BUCKET_B(c0, c1) = i; /* end point */
+
+        /* Move all type B* suffixes to the correct position. */
+        for(i = t, j = BUCKET_BSTAR(c0, c1);
+            j <= k;
+            --i, --k) { SA[i] = SA[k]; }
+      }
+      BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
+      BUCKET_B(c0, c0) = i; /* end point */
+    }
+  }
+
+  return m;
+}
+
+/* Constructs the suffix array by using the sorted order of type B* suffixes. */
+static
+void
+construct_SA(const unsigned char *T, int *SA,
+             int *bucket_A, int *bucket_B,
+             int n, int m) {
+  int *i, *j, *k;
+  int s;
+  int c0, c1, c2;
+
+  if(0 < m) {
+    /* Construct the sorted order of type B suffixes by using
+       the sorted order of type B* suffixes. */
+    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+      /* Scan the suffix array from right to left. */
+      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+          i <= j;
+          --j) {
+        if(0 < (s = *j)) {
+          assert(T[s] == c1);
+          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+          assert(T[s - 1] <= T[s]);
+          *j = ~s;
+          c0 = T[--s];
+          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+          if(c0 != c2) {
+            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+            k = SA + BUCKET_B(c2 = c0, c1);
+          }
+          assert(k < j);
+          *k-- = s;
+        } else {
+          assert(((s == 0) && (T[s] == c1)) || (s < 0));
+          *j = ~s;
+        }
+      }
+    }
+  }
+
+  /* Construct the suffix array by using
+     the sorted order of type B suffixes. */
+  k = SA + BUCKET_A(c2 = T[n - 1]);
+  *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
+  /* Scan the suffix array from left to right. */
+  for(i = SA, j = SA + n; i < j; ++i) {
+    if(0 < (s = *i)) {
+      assert(T[s - 1] >= T[s]);
+      c0 = T[--s];
+      if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
+      if(c0 != c2) {
+        BUCKET_A(c2) = k - SA;
+        k = SA + BUCKET_A(c2 = c0);
+      }
+      assert(i < k);
+      *k++ = s;
+    } else {
+      assert(s < 0);
+      *i = ~s;
+    }
+  }
+}
+
+/* Constructs the burrows-wheeler transformed string directly
+   by using the sorted order of type B* suffixes. */
+static
+int
+construct_BWT(const unsigned char *T, int *SA,
+              int *bucket_A, int *bucket_B,
+              int n, int m) {
+  int *i, *j, *k, *orig;
+  int s;
+  int c0, c1, c2;
+
+  if(0 < m) {
+    /* Construct the sorted order of type B suffixes by using
+       the sorted order of type B* suffixes. */
+    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+      /* Scan the suffix array from right to left. */
+      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+          i <= j;
+          --j) {
+        if(0 < (s = *j)) {
+          assert(T[s] == c1);
+          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+          assert(T[s - 1] <= T[s]);
+          c0 = T[--s];
+          *j = ~((int)c0);
+          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+          if(c0 != c2) {
+            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+            k = SA + BUCKET_B(c2 = c0, c1);
+          }
+          assert(k < j);
+          *k-- = s;
+        } else if(s != 0) {
+          *j = ~s;
+#ifndef NDEBUG
+        } else {
+          assert(T[s] == c1);
+#endif
+        }
+      }
+    }
+  }
+
+  /* Construct the BWTed string by using
+     the sorted order of type B suffixes. */
+  k = SA + BUCKET_A(c2 = T[n - 1]);
+  *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1);
+  /* Scan the suffix array from left to right. */
+  for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
+    if(0 < (s = *i)) {
+      assert(T[s - 1] >= T[s]);
+      c0 = T[--s];
+      *i = c0;
+      if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); }
+      if(c0 != c2) {
+        BUCKET_A(c2) = k - SA;
+        k = SA + BUCKET_A(c2 = c0);
+      }
+      assert(i < k);
+      *k++ = s;
+    } else if(s != 0) {
+      *i = ~s;
+    } else {
+      orig = i;
+    }
+  }
+
+  return orig - SA;
+}
+
+/* Constructs the burrows-wheeler transformed string directly
+   by using the sorted order of type B* suffixes. */
+static
+int
+construct_BWT_indexes(const unsigned char *T, int *SA,
+                      int *bucket_A, int *bucket_B,
+                      int n, int m,
+                      unsigned char * num_indexes, int * indexes) {
+  int *i, *j, *k, *orig;
+  int s;
+  int c0, c1, c2;
+
+  int mod = n / 8;
+  {
+      mod |= mod >> 1;  mod |= mod >> 2;
+      mod |= mod >> 4;  mod |= mod >> 8;
+      mod |= mod >> 16; mod >>= 1;
+
+      *num_indexes = (unsigned char)((n - 1) / (mod + 1));
+  }
+
+  if(0 < m) {
+    /* Construct the sorted order of type B suffixes by using
+       the sorted order of type B* suffixes. */
+    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+      /* Scan the suffix array from right to left. */
+      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+          i <= j;
+          --j) {
+        if(0 < (s = *j)) {
+          assert(T[s] == c1);
+          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+          assert(T[s - 1] <= T[s]);
+
+          if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA;
+
+          c0 = T[--s];
+          *j = ~((int)c0);
+          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+          if(c0 != c2) {
+            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+            k = SA + BUCKET_B(c2 = c0, c1);
+          }
+          assert(k < j);
+          *k-- = s;
+        } else if(s != 0) {
+          *j = ~s;
+#ifndef NDEBUG
+        } else {
+          assert(T[s] == c1);
+#endif
+        }
+      }
+    }
+  }
+
+  /* Construct the BWTed string by using
+     the sorted order of type B suffixes. */
+  k = SA + BUCKET_A(c2 = T[n - 1]);
+  if (T[n - 2] < c2) {
+    if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA;
+    *k++ = ~((int)T[n - 2]);
+  }
+  else {
+    *k++ = n - 1;
+  }
+
+  /* Scan the suffix array from left to right. */
+  for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
+    if(0 < (s = *i)) {
+      assert(T[s - 1] >= T[s]);
+
+      if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA;
+
+      c0 = T[--s];
+      *i = c0;
+      if(c0 != c2) {
+        BUCKET_A(c2) = k - SA;
+        k = SA + BUCKET_A(c2 = c0);
+      }
+      assert(i < k);
+      if((0 < s) && (T[s - 1] < c0)) {
+          if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA;
+          *k++ = ~((int)T[s - 1]);
+      } else
+        *k++ = s;
+    } else if(s != 0) {
+      *i = ~s;
+    } else {
+      orig = i;
+    }
+  }
+
+  return orig - SA;
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/*- Function -*/
+
+int
+divsufsort(const unsigned char *T, int *SA, int n, int openMP) {
+  int *bucket_A, *bucket_B;
+  int m;
+  int err = 0;
+
+  /* Check arguments. */
+  if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
+  else if(n == 0) { return 0; }
+  else if(n == 1) { SA[0] = 0; return 0; }
+  else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
+
+  bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
+  bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
+
+  /* Suffixsort. */
+  if((bucket_A != NULL) && (bucket_B != NULL)) {
+    m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP);
+    construct_SA(T, SA, bucket_A, bucket_B, n, m);
+  } else {
+    err = -2;
+  }
+
+  free(bucket_B);
+  free(bucket_A);
+
+  return err;
+}
+
+int
+divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) {
+  int *B;
+  int *bucket_A, *bucket_B;
+  int m, pidx, i;
+
+  /* Check arguments. */
+  if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
+  else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
+
+  if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); }
+  bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
+  bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
+
+  /* Burrows-Wheeler Transform. */
+  if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
+    m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP);
+
+    if (num_indexes == NULL || indexes == NULL) {
+        pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
+    } else {
+        pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes);
+    }
+
+    /* Copy to output string. */
+    U[0] = T[n - 1];
+    for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; }
+    for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; }
+    pidx += 1;
+  } else {
+    pidx = -2;
+  }
+
+  free(bucket_B);
+  free(bucket_A);
+  if(A == NULL) { free(B); }
+
+  return pidx;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/dictBuilder/divsufsort.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,67 @@
+/*
+ * divsufsort.h for libdivsufsort-lite
+ * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DIVSUFSORT_H
+#define _DIVSUFSORT_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/*- Prototypes -*/
+
+/**
+ * Constructs the suffix array of a given string.
+ * @param T [0..n-1] The input string.
+ * @param SA [0..n-1] The output array of suffixes.
+ * @param n The length of the given string.
+ * @param openMP enables OpenMP optimization.
+ * @return 0 if no error occurred, -1 or -2 otherwise.
+ */
+int
+divsufsort(const unsigned char *T, int *SA, int n, int openMP);
+
+/**
+ * Constructs the burrows-wheeler transformed string of a given string.
+ * @param T [0..n-1] The input string.
+ * @param U [0..n-1] The output string. (can be T)
+ * @param A [0..n-1] The temporary array. (can be NULL)
+ * @param n The length of the given string.
+ * @param num_indexes The length of secondary indexes array. (can be NULL)
+ * @param indexes The secondary indexes array. (can be NULL)
+ * @param openMP enables OpenMP optimization.
+ * @return The primary index if no error occurred, -1 or -2 otherwise.
+ */
+int
+divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif /* __cplusplus */
+
+#endif /* _DIVSUFSORT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,1012 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+/*-**************************************
+*  Tuning parameters
+****************************************/
+#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)
+#define ZDICT_MIN_SAMPLES_SIZE 512
+
+
+/*-**************************************
+*  Compiler Options
+****************************************/
+/* Unix Large Files support (>4GB) */
+#define _FILE_OFFSET_BITS 64
+#if (defined(__sun__) && (!defined(__LP64__)))   /* Sun Solaris 32-bits requires specific definitions */
+#  define _LARGEFILE_SOURCE
+#elif ! defined(__LP64__)                        /* No point defining Large file for 64 bit */
+#  define _LARGEFILE64_SOURCE
+#endif
+
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdlib.h>        /* malloc, free */
+#include <string.h>        /* memset */
+#include <stdio.h>         /* fprintf, fopen, ftello64 */
+#include <time.h>          /* clock */
+
+#include "mem.h"           /* read */
+#include "error_private.h"
+#include "fse.h"           /* FSE_normalizeCount, FSE_writeNCount */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#include "xxhash.h"
+#include "divsufsort.h"
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#  define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+
+/*-*************************************
+*  Constants
+***************************************/
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define DICTLISTSIZE_DEFAULT 10000
+
+#define NOISELENGTH 32
+
+#define MINRATIO 4
+static const int g_compressionLevel_default = 5;
+static const U32 g_selectivity_default = 9;
+static const size_t g_provision_entropySize = 200;
+static const size_t g_min_fast_dictContent = 192;
+
+
+/*-*************************************
+*  Console display
+***************************************/
+#define DISPLAY(...)         { fprintf(stderr, __VA_ARGS__); fflush( stderr ); }
+#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); }    /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
+
+static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
+
+static void ZDICT_printHex(const void* ptr, size_t length)
+{
+    const BYTE* const b = (const BYTE*)ptr;
+    size_t u;
+    for (u=0; u<length; u++) {
+        BYTE c = b[u];
+        if (c<32 || c>126) c = '.';   /* non-printable char */
+        DISPLAY("%c", c);
+    }
+}
+
+
+/*-********************************************************
+*  Helper functions
+**********************************************************/
+unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); }
+
+const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)
+{
+    if (dictSize < 8) return 0;
+    if (MEM_readLE32(dictBuffer) != ZSTD_DICT_MAGIC) return 0;
+    return MEM_readLE32((const char*)dictBuffer + 4);
+}
+
+
+/*-********************************************************
+*  Dictionary training functions
+**********************************************************/
+static unsigned ZDICT_NbCommonBytes (register size_t val)
+{
+    if (MEM_isLittleEndian()) {
+        if (MEM_64bits()) {
+#       if defined(_MSC_VER) && defined(_WIN64)
+            unsigned long r = 0;
+            _BitScanForward64( &r, (U64)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_ctzll((U64)val) >> 3);
+#       else
+            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+#       endif
+        } else { /* 32 bits */
+#       if defined(_MSC_VER)
+            unsigned long r=0;
+            _BitScanForward( &r, (U32)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_ctz((U32)val) >> 3);
+#       else
+            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+#       endif
+        }
+    } else {  /* Big Endian CPU */
+        if (MEM_64bits()) {
+#       if defined(_MSC_VER) && defined(_WIN64)
+            unsigned long r = 0;
+            _BitScanReverse64( &r, val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_clzll(val) >> 3);
+#       else
+            unsigned r;
+            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
+            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+            r += (!val);
+            return r;
+#       endif
+        } else { /* 32 bits */
+#       if defined(_MSC_VER)
+            unsigned long r = 0;
+            _BitScanReverse( &r, (unsigned long)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_clz((U32)val) >> 3);
+#       else
+            unsigned r;
+            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+            r += (!val);
+            return r;
+#       endif
+    }   }
+}
+
+
+/*! ZDICT_count() :
+    Count the nb of common bytes between 2 pointers.
+    Note : this function presumes end of buffer followed by noisy guard band.
+*/
+static size_t ZDICT_count(const void* pIn, const void* pMatch)
+{
+    const char* const pStart = (const char*)pIn;
+    for (;;) {
+        size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+        if (!diff) {
+            pIn = (const char*)pIn+sizeof(size_t);
+            pMatch = (const char*)pMatch+sizeof(size_t);
+            continue;
+        }
+        pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff);
+        return (size_t)((const char*)pIn - pStart);
+    }
+}
+
+
+typedef struct {
+    U32 pos;
+    U32 length;
+    U32 savings;
+} dictItem;
+
+static void ZDICT_initDictItem(dictItem* d)
+{
+    d->pos = 1;
+    d->length = 0;
+    d->savings = (U32)(-1);
+}
+
+
+#define LLIMIT 64          /* heuristic determined experimentally */
+#define MINMATCHLENGTH 7   /* heuristic determined experimentally */
+static dictItem ZDICT_analyzePos(
+                       BYTE* doneMarks,
+                       const int* suffix, U32 start,
+                       const void* buffer, U32 minRatio, U32 notificationLevel)
+{
+    U32 lengthList[LLIMIT] = {0};
+    U32 cumulLength[LLIMIT] = {0};
+    U32 savings[LLIMIT] = {0};
+    const BYTE* b = (const BYTE*)buffer;
+    size_t length;
+    size_t maxLength = LLIMIT;
+    size_t pos = suffix[start];
+    U32 end = start;
+    dictItem solution;
+
+    /* init */
+    memset(&solution, 0, sizeof(solution));
+    doneMarks[pos] = 1;
+
+    /* trivial repetition cases */
+    if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2))
+       ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))
+       ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {
+        /* skip and mark segment */
+        U16 u16 = MEM_read16(b+pos+4);
+        U32 u, e = 6;
+        while (MEM_read16(b+pos+e) == u16) e+=2 ;
+        if (b[pos+e] == b[pos+e-1]) e++;
+        for (u=1; u<e; u++)
+            doneMarks[pos+u] = 1;
+        return solution;
+    }
+
+    /* look forward */
+    do {
+        end++;
+        length = ZDICT_count(b + pos, b + suffix[end]);
+    } while (length >=MINMATCHLENGTH);
+
+    /* look backward */
+    do {
+        length = ZDICT_count(b + pos, b + *(suffix+start-1));
+        if (length >=MINMATCHLENGTH) start--;
+    } while(length >= MINMATCHLENGTH);
+
+    /* exit if not found a minimum nb of repetitions */
+    if (end-start < minRatio) {
+        U32 idx;
+        for(idx=start; idx<end; idx++)
+            doneMarks[suffix[idx]] = 1;
+        return solution;
+    }
+
+    {   int i;
+        U32 searchLength;
+        U32 refinedStart = start;
+        U32 refinedEnd = end;
+
+        DISPLAYLEVEL(4, "\n");
+        DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u  ", (U32)(end-start), MINMATCHLENGTH, (U32)pos);
+        DISPLAYLEVEL(4, "\n");
+
+        for (searchLength = MINMATCHLENGTH ; ; searchLength++) {
+            BYTE currentChar = 0;
+            U32 currentCount = 0;
+            U32 currentID = refinedStart;
+            U32 id;
+            U32 selectedCount = 0;
+            U32 selectedID = currentID;
+            for (id =refinedStart; id < refinedEnd; id++) {
+                if (b[ suffix[id] + searchLength] != currentChar) {
+                    if (currentCount > selectedCount) {
+                        selectedCount = currentCount;
+                        selectedID = currentID;
+                    }
+                    currentID = id;
+                    currentChar = b[ suffix[id] + searchLength];
+                    currentCount = 0;
+                }
+                currentCount ++;
+            }
+            if (currentCount > selectedCount) {  /* for last */
+                selectedCount = currentCount;
+                selectedID = currentID;
+            }
+
+            if (selectedCount < minRatio)
+                break;
+            refinedStart = selectedID;
+            refinedEnd = refinedStart + selectedCount;
+        }
+
+        /* evaluate gain based on new ref */
+        start = refinedStart;
+        pos = suffix[refinedStart];
+        end = start;
+        memset(lengthList, 0, sizeof(lengthList));
+
+        /* look forward */
+        do {
+            end++;
+            length = ZDICT_count(b + pos, b + suffix[end]);
+            if (length >= LLIMIT) length = LLIMIT-1;
+            lengthList[length]++;
+        } while (length >=MINMATCHLENGTH);
+
+        /* look backward */
+		length = MINMATCHLENGTH;
+		while ((length >= MINMATCHLENGTH) & (start > 0)) {
+			length = ZDICT_count(b + pos, b + suffix[start - 1]);
+			if (length >= LLIMIT) length = LLIMIT - 1;
+			lengthList[length]++;
+			if (length >= MINMATCHLENGTH) start--;
+		}
+
+        /* largest useful length */
+        memset(cumulLength, 0, sizeof(cumulLength));
+        cumulLength[maxLength-1] = lengthList[maxLength-1];
+        for (i=(int)(maxLength-2); i>=0; i--)
+            cumulLength[i] = cumulLength[i+1] + lengthList[i];
+
+        for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break;
+        maxLength = i;
+
+        /* reduce maxLength in case of final into repetitive data */
+        {   U32 l = (U32)maxLength;
+            BYTE const c = b[pos + maxLength-1];
+            while (b[pos+l-2]==c) l--;
+            maxLength = l;
+        }
+        if (maxLength < MINMATCHLENGTH) return solution;   /* skip : no long-enough solution */
+
+        /* calculate savings */
+        savings[5] = 0;
+        for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)
+            savings[i] = savings[i-1] + (lengthList[i] * (i-3));
+
+        DISPLAYLEVEL(4, "Selected ref at position %u, of length %u : saves %u (ratio: %.2f)  \n",
+                     (U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength);
+
+        solution.pos = (U32)pos;
+        solution.length = (U32)maxLength;
+        solution.savings = savings[maxLength];
+
+        /* mark positions done */
+        {   U32 id;
+            for (id=start; id<end; id++) {
+                U32 p, pEnd;
+                U32 const testedPos = suffix[id];
+                if (testedPos == pos)
+                    length = solution.length;
+                else {
+                    length = ZDICT_count(b+pos, b+testedPos);
+                    if (length > solution.length) length = solution.length;
+                }
+                pEnd = (U32)(testedPos + length);
+                for (p=testedPos; p<pEnd; p++)
+                    doneMarks[p] = 1;
+    }   }   }
+
+    return solution;
+}
+
+
+/*! ZDICT_checkMerge
+    check if dictItem can be merged, do it if possible
+    @return : id of destination elt, 0 if not merged
+*/
+static U32 ZDICT_checkMerge(dictItem* table, dictItem elt, U32 eltNbToSkip)
+{
+    const U32 tableSize = table->pos;
+    const U32 eltEnd = elt.pos + elt.length;
+
+    /* tail overlap */
+    U32 u; for (u=1; u<tableSize; u++) {
+        if (u==eltNbToSkip) continue;
+        if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) {  /* overlap, existing > new */
+            /* append */
+            U32 addedLength = table[u].pos - elt.pos;
+            table[u].length += addedLength;
+            table[u].pos = elt.pos;
+            table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */
+            table[u].savings += elt.length / 8;    /* rough approx bonus */
+            elt = table[u];
+            /* sort : improve rank */
+            while ((u>1) && (table[u-1].savings < elt.savings))
+            table[u] = table[u-1], u--;
+            table[u] = elt;
+            return u;
+    }   }
+
+    /* front overlap */
+    for (u=1; u<tableSize; u++) {
+        if (u==eltNbToSkip) continue;
+        if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) {  /* overlap, existing < new */
+            /* append */
+            int addedLength = (int)eltEnd - (table[u].pos + table[u].length);
+            table[u].savings += elt.length / 8;    /* rough approx bonus */
+            if (addedLength > 0) {   /* otherwise, elt fully included into existing */
+                table[u].length += addedLength;
+                table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */
+            }
+            /* sort : improve rank */
+            elt = table[u];
+            while ((u>1) && (table[u-1].savings < elt.savings))
+                table[u] = table[u-1], u--;
+            table[u] = elt;
+            return u;
+    }   }
+
+    return 0;
+}
+
+
+static void ZDICT_removeDictItem(dictItem* table, U32 id)
+{
+    /* convention : first element is nb of elts */
+    U32 const max = table->pos;
+    U32 u;
+    if (!id) return;   /* protection, should never happen */
+    for (u=id; u<max-1; u++)
+        table[u] = table[u+1];
+    table->pos--;
+}
+
+
+static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt)
+{
+    /* merge if possible */
+    U32 mergeId = ZDICT_checkMerge(table, elt, 0);
+    if (mergeId) {
+        U32 newMerge = 1;
+        while (newMerge) {
+            newMerge = ZDICT_checkMerge(table, table[mergeId], mergeId);
+            if (newMerge) ZDICT_removeDictItem(table, mergeId);
+            mergeId = newMerge;
+        }
+        return;
+    }
+
+    /* insert */
+    {   U32 current;
+        U32 nextElt = table->pos;
+        if (nextElt >= maxSize) nextElt = maxSize-1;
+        current = nextElt-1;
+        while (table[current].savings < elt.savings) {
+            table[current+1] = table[current];
+            current--;
+        }
+        table[current+1] = elt;
+        table->pos = nextElt+1;
+    }
+}
+
+
+static U32 ZDICT_dictSize(const dictItem* dictList)
+{
+    U32 u, dictSize = 0;
+    for (u=1; u<dictList[0].pos; u++)
+        dictSize += dictList[u].length;
+    return dictSize;
+}
+
+
+static size_t ZDICT_trainBuffer(dictItem* dictList, U32 dictListSize,
+                            const void* const buffer, size_t bufferSize,   /* buffer must end with noisy guard band */
+                            const size_t* fileSizes, unsigned nbFiles,
+                            U32 minRatio, U32 notificationLevel)
+{
+    int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
+    int* const suffix = suffix0+1;
+    U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix));
+    BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks));   /* +16 for overflow security */
+    U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos));
+    size_t result = 0;
+    clock_t displayClock = 0;
+    clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;
+
+#   define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
+            if (ZDICT_clockSpan(displayClock) > refreshRate)  \
+            { displayClock = clock(); DISPLAY(__VA_ARGS__); \
+            if (notificationLevel>=4) fflush(stdout); } }
+
+    /* init */
+    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
+    if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) {
+        result = ERROR(memory_allocation);
+        goto _cleanup;
+    }
+    if (minRatio < MINRATIO) minRatio = MINRATIO;
+    memset(doneMarks, 0, bufferSize+16);
+
+    /* limit sample set size (divsufsort limitation)*/
+    if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (U32)(ZDICT_MAX_SAMPLES_SIZE>>20));
+    while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
+
+    /* sort */
+    DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (U32)(bufferSize>>20));
+    {   int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
+        if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
+    }
+    suffix[bufferSize] = (int)bufferSize;   /* leads into noise */
+    suffix0[0] = (int)bufferSize;           /* leads into noise */
+    /* build reverse suffix sort */
+    {   size_t pos;
+        for (pos=0; pos < bufferSize; pos++)
+            reverseSuffix[suffix[pos]] = (U32)pos;
+        /* note filePos tracks borders between samples.
+           It's not used at this stage, but planned to become useful in a later update */
+        filePos[0] = 0;
+        for (pos=1; pos<nbFiles; pos++)
+            filePos[pos] = (U32)(filePos[pos-1] + fileSizes[pos-1]);
+    }
+
+    DISPLAYLEVEL(2, "finding patterns ... \n");
+    DISPLAYLEVEL(3, "minimum ratio : %u \n", minRatio);
+
+    {   U32 cursor; for (cursor=0; cursor < bufferSize; ) {
+            dictItem solution;
+            if (doneMarks[cursor]) { cursor++; continue; }
+            solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);
+            if (solution.length==0) { cursor++; continue; }
+            ZDICT_insertDictItem(dictList, dictListSize, solution);
+            cursor += solution.length;
+            DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
+    }   }
+
+_cleanup:
+    free(suffix0);
+    free(reverseSuffix);
+    free(doneMarks);
+    free(filePos);
+    return result;
+}
+
+
+static void ZDICT_fillNoise(void* buffer, size_t length)
+{
+    unsigned const prime1 = 2654435761U;
+    unsigned const prime2 = 2246822519U;
+    unsigned acc = prime1;
+    size_t p=0;;
+    for (p=0; p<length; p++) {
+        acc *= prime2;
+        ((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
+    }
+}
+
+
+typedef struct
+{
+    ZSTD_CCtx* ref;
+    ZSTD_CCtx* zc;
+    void* workPlace;   /* must be ZSTD_BLOCKSIZE_ABSOLUTEMAX allocated */
+} EStats_ress_t;
+
+#define MAXREPOFFSET 1024
+
+static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
+                            U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets,
+                            const void* src, size_t srcSize, U32 notificationLevel)
+{
+    size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << params.cParams.windowLog);
+    size_t cSize;
+
+    if (srcSize > blockSizeMax) srcSize = blockSizeMax;   /* protection vs large samples */
+    {  size_t const errorCode = ZSTD_copyCCtx(esr.zc, esr.ref, 0);
+            if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; }
+    }
+    cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_ABSOLUTEMAX, src, srcSize);
+    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(1, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
+
+    if (cSize) {  /* if == 0; block is not compressible */
+        const seqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc);
+
+        /* literals stats */
+        {   const BYTE* bytePtr;
+            for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)
+                countLit[*bytePtr]++;
+        }
+
+        /* seqStats */
+        {   U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+            ZSTD_seqToCodes(seqStorePtr);
+
+            {   const BYTE* codePtr = seqStorePtr->ofCode;
+                U32 u;
+                for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;
+            }
+
+            {   const BYTE* codePtr = seqStorePtr->mlCode;
+                U32 u;
+                for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;
+            }
+
+            {   const BYTE* codePtr = seqStorePtr->llCode;
+                U32 u;
+                for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;
+            }
+
+            if (nbSeq >= 2) { /* rep offsets */
+                const seqDef* const seq = seqStorePtr->sequencesStart;
+                U32 offset1 = seq[0].offset - 3;
+                U32 offset2 = seq[1].offset - 3;
+                if (offset1 >= MAXREPOFFSET) offset1 = 0;
+                if (offset2 >= MAXREPOFFSET) offset2 = 0;
+                repOffsets[offset1] += 3;
+                repOffsets[offset2] += 1;
+    }   }   }
+}
+
+/*
+static size_t ZDICT_maxSampleSize(const size_t* fileSizes, unsigned nbFiles)
+{
+    unsigned u;
+    size_t max=0;
+    for (u=0; u<nbFiles; u++)
+        if (max < fileSizes[u]) max = fileSizes[u];
+    return max;
+}
+*/
+
+static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)
+{
+    size_t total=0;
+    unsigned u;
+    for (u=0; u<nbFiles; u++) total += fileSizes[u];
+    return total;
+}
+
+typedef struct { U32 offset; U32 count; } offsetCount_t;
+
+static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count)
+{
+    U32 u;
+    table[ZSTD_REP_NUM].offset = val;
+    table[ZSTD_REP_NUM].count = count;
+    for (u=ZSTD_REP_NUM; u>0; u--) {
+        offsetCount_t tmp;
+        if (table[u-1].count >= table[u].count) break;
+        tmp = table[u-1];
+        table[u-1] = table[u];
+        table[u] = tmp;
+    }
+}
+
+
+#define OFFCODE_MAX 30  /* only applicable to first block */
+static size_t ZDICT_analyzeEntropy(void*  dstBuffer, size_t maxDstSize,
+                                   unsigned compressionLevel,
+                             const void*  srcBuffer, const size_t* fileSizes, unsigned nbFiles,
+                             const void* dictBuffer, size_t  dictBufferSize,
+                                   unsigned notificationLevel)
+{
+    U32 countLit[256];
+    HUF_CREATE_STATIC_CTABLE(hufTable, 255);
+    U32 offcodeCount[OFFCODE_MAX+1];
+    short offcodeNCount[OFFCODE_MAX+1];
+    U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
+    U32 matchLengthCount[MaxML+1];
+    short matchLengthNCount[MaxML+1];
+    U32 litLengthCount[MaxLL+1];
+    short litLengthNCount[MaxLL+1];
+    U32 repOffset[MAXREPOFFSET];
+    offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
+    EStats_ress_t esr;
+    ZSTD_parameters params;
+    U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;
+    size_t pos = 0, errorCode;
+    size_t eSize = 0;
+    size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles);
+    size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles);
+    BYTE* dstPtr = (BYTE*)dstBuffer;
+
+    /* init */
+    esr.ref = ZSTD_createCCtx();
+    esr.zc = ZSTD_createCCtx();
+    esr.workPlace = malloc(ZSTD_BLOCKSIZE_ABSOLUTEMAX);
+    if (!esr.ref || !esr.zc || !esr.workPlace) {
+        eSize = ERROR(memory_allocation);
+        DISPLAYLEVEL(1, "Not enough memory \n");
+        goto _cleanup;
+    }
+    if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionary_wrong); goto _cleanup; }   /* too large dictionary */
+    for (u=0; u<256; u++) countLit[u]=1;   /* any character must be described */
+    for (u=0; u<=offcodeMax; u++) offcodeCount[u]=1;
+    for (u=0; u<=MaxML; u++) matchLengthCount[u]=1;
+    for (u=0; u<=MaxLL; u++) litLengthCount[u]=1;
+    memset(repOffset, 0, sizeof(repOffset));
+    repOffset[1] = repOffset[4] = repOffset[8] = 1;
+    memset(bestRepOffset, 0, sizeof(bestRepOffset));
+    if (compressionLevel==0) compressionLevel=g_compressionLevel_default;
+    params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
+    {   size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0);
+            if (ZSTD_isError(beginResult)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced failed \n");
+            goto _cleanup;
+    }   }
+
+    /* collect stats on all files */
+    for (u=0; u<nbFiles; u++) {
+        ZDICT_countEStats(esr, params,
+                          countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,
+                         (const char*)srcBuffer + pos, fileSizes[u],
+                          notificationLevel);
+        pos += fileSizes[u];
+    }
+
+    /* analyze */
+    errorCode = HUF_buildCTable (hufTable, countLit, 255, huffLog);
+    if (HUF_isError(errorCode)) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "HUF_buildCTable error \n");
+        goto _cleanup;
+    }
+    huffLog = (U32)errorCode;
+
+    /* looking for most common first offsets */
+    {   U32 offset;
+        for (offset=1; offset<MAXREPOFFSET; offset++)
+            ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]);
+    }
+    /* note : the result of this phase should be used to better appreciate the impact on statistics */
+
+    total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
+    errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
+    if (FSE_isError(errorCode)) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
+        goto _cleanup;
+    }
+    Offlog = (U32)errorCode;
+
+    total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
+    errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
+    if (FSE_isError(errorCode)) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
+        goto _cleanup;
+    }
+    mlLog = (U32)errorCode;
+
+    total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
+    errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
+    if (FSE_isError(errorCode)) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
+        goto _cleanup;
+    }
+    llLog = (U32)errorCode;
+
+    /* write result to buffer */
+    {   size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);
+        if (HUF_isError(hhSize)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "HUF_writeCTable error \n");
+            goto _cleanup;
+        }
+        dstPtr += hhSize;
+        maxDstSize -= hhSize;
+        eSize += hhSize;
+    }
+
+    {   size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);
+        if (FSE_isError(ohSize)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");
+            goto _cleanup;
+        }
+        dstPtr += ohSize;
+        maxDstSize -= ohSize;
+        eSize += ohSize;
+    }
+
+    {   size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);
+        if (FSE_isError(mhSize)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");
+            goto _cleanup;
+        }
+        dstPtr += mhSize;
+        maxDstSize -= mhSize;
+        eSize += mhSize;
+    }
+
+    {   size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);
+        if (FSE_isError(lhSize)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");
+            goto _cleanup;
+        }
+        dstPtr += lhSize;
+        maxDstSize -= lhSize;
+        eSize += lhSize;
+    }
+
+    if (maxDstSize<12) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");
+        goto _cleanup;
+    }
+# if 0
+    MEM_writeLE32(dstPtr+0, bestRepOffset[0].offset);
+    MEM_writeLE32(dstPtr+4, bestRepOffset[1].offset);
+    MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);
+#else
+    /* at this stage, we don't use the result of "most common first offset",
+       as the impact of statistics is not properly evaluated */
+    MEM_writeLE32(dstPtr+0, repStartValue[0]);
+    MEM_writeLE32(dstPtr+4, repStartValue[1]);
+    MEM_writeLE32(dstPtr+8, repStartValue[2]);
+#endif
+    //dstPtr += 12;
+    eSize += 12;
+
+_cleanup:
+    ZSTD_freeCCtx(esr.ref);
+    ZSTD_freeCCtx(esr.zc);
+    free(esr.workPlace);
+
+    return eSize;
+}
+
+
+size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+                                                 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                                                 ZDICT_params_t params)
+{
+    size_t hSize;
+    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    U32 const notificationLevel = params.notificationLevel;
+
+    /* dictionary header */
+    MEM_writeLE32(dictBuffer, ZSTD_DICT_MAGIC);
+    {   U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
+        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
+        U32 const dictID = params.dictID ? params.dictID : compliantID;
+        MEM_writeLE32((char*)dictBuffer+4, dictID);
+    }
+    hSize = 8;
+
+    /* entropy tables */
+    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
+    DISPLAYLEVEL(2, "statistics ... \n");
+    {   size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,
+                                  compressionLevel,
+                                  samplesBuffer, samplesSizes, nbSamples,
+                                  (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize,
+                                  notificationLevel);
+        if (ZDICT_isError(eSize)) return eSize;
+        hSize += eSize;
+    }
+
+
+    if (hSize + dictContentSize < dictBufferCapacity)
+        memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);
+    return MIN(dictBufferCapacity, hSize+dictContentSize);
+}
+
+
+/*! ZDICT_trainFromBuffer_unsafe() :
+*   Warning : `samplesBuffer` must be followed by noisy guard band.
+*   @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
+*/
+size_t ZDICT_trainFromBuffer_unsafe(
+                            void* dictBuffer, size_t maxDictSize,
+                            const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                            ZDICT_params_t params)
+{
+    U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));
+    dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));
+    unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel;
+    unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity;
+    size_t const targetDictSize = maxDictSize;
+    size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
+    size_t dictSize = 0;
+    U32 const notificationLevel = params.notificationLevel;
+
+    /* checks */
+    if (!dictList) return ERROR(memory_allocation);
+    if (maxDictSize <= g_provision_entropySize + g_min_fast_dictContent) { free(dictList); return ERROR(dstSize_tooSmall); }
+    if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return 0; }   /* not enough source to create dictionary */
+
+    /* init */
+    ZDICT_initDictItem(dictList);
+
+    /* build dictionary */
+    ZDICT_trainBuffer(dictList, dictListSize,
+                    samplesBuffer, samplesBuffSize,
+                    samplesSizes, nbSamples,
+                    minRep, notificationLevel);
+
+    /* display best matches */
+    if (params.notificationLevel>= 3) {
+        U32 const nb = MIN(25, dictList[0].pos);
+        U32 const dictContentSize = ZDICT_dictSize(dictList);
+        U32 u;
+        DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", dictList[0].pos-1, dictContentSize);
+        DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
+        for (u=1; u<nb; u++) {
+            U32 const pos = dictList[u].pos;
+            U32 const length = dictList[u].length;
+            U32 const printedLength = MIN(40, length);
+            if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize))
+                return ERROR(GENERIC);   /* should never happen */
+            DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
+                         u, length, pos, dictList[u].savings);
+            ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
+            DISPLAYLEVEL(3, "| \n");
+    }   }
+
+
+    /* create dictionary */
+    {   U32 dictContentSize = ZDICT_dictSize(dictList);
+        if (dictContentSize < targetDictSize/3) {
+            DISPLAYLEVEL(2, "!  warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (U32)maxDictSize);
+            if (minRep > MINRATIO) {
+                DISPLAYLEVEL(2, "!  consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
+                DISPLAYLEVEL(2, "!  note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
+            }
+            if (samplesBuffSize < 10 * targetDictSize)
+                DISPLAYLEVEL(2, "!  consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20));
+        }
+
+        if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
+            U32 proposedSelectivity = selectivity-1;
+            while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
+            DISPLAYLEVEL(2, "!  note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (U32)maxDictSize);
+            DISPLAYLEVEL(2, "!  consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
+            DISPLAYLEVEL(2, "!  always test dictionary efficiency on samples \n");
+        }
+
+        /* limit dictionary size */
+        {   U32 const max = dictList->pos;   /* convention : nb of useful elts within dictList */
+            U32 currentSize = 0;
+            U32 n; for (n=1; n<max; n++) {
+                currentSize += dictList[n].length;
+                if (currentSize > targetDictSize) { currentSize -= dictList[n].length; break; }
+            }
+            dictList->pos = n;
+            dictContentSize = currentSize;
+        }
+
+        /* build dict content */
+        {   U32 u;
+            BYTE* ptr = (BYTE*)dictBuffer + maxDictSize;
+            for (u=1; u<dictList->pos; u++) {
+                U32 l = dictList[u].length;
+                ptr -= l;
+                if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); }   /* should not happen */
+                memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l);
+        }   }
+
+        dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,
+                                                             samplesBuffer, samplesSizes, nbSamples,
+                                                             params);
+    }
+
+    /* clean up */
+    free(dictList);
+    return dictSize;
+}
+
+
+/* issue : samplesBuffer need to be followed by a noisy guard band.
+*  work around : duplicate the buffer, and add the noise */
+size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity,
+                                      const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                                      ZDICT_params_t params)
+{
+    size_t result;
+    void* newBuff;
+    size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
+    if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0;   /* not enough content => no dictionary */
+
+    newBuff = malloc(sBuffSize + NOISELENGTH);
+    if (!newBuff) return ERROR(memory_allocation);
+
+    memcpy(newBuff, samplesBuffer, sBuffSize);
+    ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH);   /* guard band, for end of buffer condition */
+
+    result = ZDICT_trainFromBuffer_unsafe(
+                                        dictBuffer, dictBufferCapacity,
+                                        newBuff, samplesSizes, nbSamples,
+                                        params);
+    free(newBuff);
+    return result;
+}
+
+
+size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+                             const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
+{
+    ZDICT_params_t params;
+    memset(&params, 0, sizeof(params));
+    return ZDICT_trainFromBuffer_advanced(dictBuffer, dictBufferCapacity,
+                                          samplesBuffer, samplesSizes, nbSamples,
+                                          params);
+}
+
+size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+                                        const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
+{
+    ZDICT_params_t params;
+    memset(&params, 0, sizeof(params));
+    return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity,
+                                                     samplesBuffer, samplesSizes, nbSamples,
+                                                     params);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,111 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+#ifndef DICTBUILDER_H_001
+#define DICTBUILDER_H_001
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*======  Dependencies  ======*/
+#include <stddef.h>  /* size_t */
+
+
+/*======  Export for Windows  ======*/
+/*!
+*  ZSTD_DLL_EXPORT :
+*  Enable exporting of functions when building a Windows DLL
+*/
+#if defined(_WIN32) && defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZDICTLIB_API __declspec(dllexport)
+#else
+#  define ZDICTLIB_API
+#endif
+
+
+/*! ZDICT_trainFromBuffer() :
+    Train a dictionary from an array of samples.
+    Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+    supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+    The resulting dictionary will be saved into `dictBuffer`.
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+              or an error code, which can be tested with ZDICT_isError().
+    Tips : In general, a reasonable dictionary has a size of ~ 100 KB.
+           It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
+           In general, it's recommended to provide a few thousands samples, but this can vary a lot.
+           It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+*/
+ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+                       const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
+
+
+/*======   Helper functions   ======*/
+ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize);  /**< extracts dictID; @return zero if error (not a valid dictionary) */
+ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
+ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
+
+
+
+#ifdef ZDICT_STATIC_LINKING_ONLY
+
+/* ====================================================================================
+ * The definitions in this section are considered experimental.
+ * They should never be used with a dynamic library, as they may change in the future.
+ * They are provided for advanced usages.
+ * Use them only in association with static linking.
+ * ==================================================================================== */
+
+typedef struct {
+    unsigned selectivityLevel;   /* 0 means default; larger => select more => larger dictionary */
+    int      compressionLevel;   /* 0 means default; target a specific zstd compression level */
+    unsigned notificationLevel;  /* Write to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
+    unsigned dictID;             /* 0 means auto mode (32-bits random value); other : force dictID value */
+    unsigned reserved[2];        /* reserved space for future parameters */
+} ZDICT_params_t;
+
+
+/*! ZDICT_trainFromBuffer_advanced() :
+    Same as ZDICT_trainFromBuffer() with control over more parameters.
+    `parameters` is optional and can be provided with values set to 0 to mean "default".
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferSize`),
+              or an error code, which can be tested by ZDICT_isError().
+    note : ZDICT_trainFromBuffer_advanced() will send notifications into stderr if instructed to, using notificationLevel>0.
+*/
+size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity,
+                                const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                                ZDICT_params_t parameters);
+
+
+/*! ZDICT_addEntropyTablesFromBuffer() :
+
+    Given a content-only dictionary (built using any 3rd party algorithm),
+    add entropy tables computed from an array of samples.
+    Samples must be stored concatenated in a flat buffer `samplesBuffer`,
+    supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
+
+    The input dictionary content must be stored *at the end* of `dictBuffer`.
+    Its size is `dictContentSize`.
+    The resulting dictionary with added entropy tables will be *written back to `dictBuffer`*,
+    starting from its beginning.
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`).
+*/
+size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+                                        const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
+
+
+
+#endif   /* ZDICT_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* DICTBUILDER_H_001 */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/zstd.h	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef ZSTD_H_235446
+#define ZSTD_H_235446
+
+/* ======   Dependency   ======*/
+#include <stddef.h>   /* size_t */
+
+
+/* =====   ZSTDLIB_API : control library symbols visibility   ===== */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#  define ZSTDLIB_API __attribute__ ((visibility ("default")))
+#elif defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZSTDLIB_API __declspec(dllexport)
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+#  define ZSTDLIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define ZSTDLIB_API
+#endif
+
+
+/*******************************************************************************************************
+  Introduction
+
+  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting real-time compression scenarios
+  at zlib-level and better compression ratios. The zstd compression library provides in-memory compression and
+  decompression functions. The library supports compression levels from 1 up to ZSTD_maxCLevel() which is 22.
+  Levels >= 20, labelled `--ultra`, should be used with caution, as they require more memory.
+  Compression can be done in:
+    - a single step (described as Simple API)
+    - a single step, reusing a context (described as Explicit memory management)
+    - unbounded multiple steps (described as Streaming compression)
+  The compression ratio achievable on small data can be highly improved using compression with a dictionary in:
+    - a single step (described as Simple dictionary API)
+    - a single step, reusing a dictionary (described as Fast dictionary API)
+
+  Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h.
+  These APIs shall never be used with a dynamic library.
+  They are not "stable", their definition may change in the future. Only static linking is allowed.
+*********************************************************************************************************/
+
+/*------   Version   ------*/
+#define ZSTD_VERSION_MAJOR    1
+#define ZSTD_VERSION_MINOR    1
+#define ZSTD_VERSION_RELEASE  2
+
+#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
+#define ZSTD_QUOTE(str) #str
+#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
+#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
+
+#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< library version number; to be used when checking dll version */
+
+
+/***************************************
+*  Simple API
+***************************************/
+/*! ZSTD_compress() :
+    Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+    Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
+    @return : compressed size written into `dst` (<= `dstCapacity),
+              or an error code if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
+                            const void* src, size_t srcSize,
+                                  int compressionLevel);
+
+/*! ZSTD_decompress() :
+    `compressedSize` : must be the _exact_ size of a single compressed frame.
+    `dstCapacity` is an upper bound of originalSize.
+    If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+              or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
+                              const void* src, size_t compressedSize);
+
+/*! ZSTD_getDecompressedSize() :
+*   'src' is the start of a zstd compressed frame.
+*   @return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise.
+*    note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+*             When `return==0`, data to decompress could be any size.
+*             In which case, it's necessary to use streaming mode to decompress data.
+*             Optionally, application can still use ZSTD_decompress() while relying on implied limits.
+*             (For example, data may be necessarily cut into blocks <= 16 KB).
+*    note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+*    note 3 : decompressed size can be very large (64-bits value),
+*             potentially larger than what local system can handle as a single memory segment.
+*             In which case, it's necessary to use streaming mode to decompress data.
+*    note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+*             Always ensure result fits within application's authorized limits.
+*             Each application can set its own limits.
+*    note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more. */
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
+
+/*======  Helper functions  ======*/
+ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
+ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case scenario */
+ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
+
+
+/***************************************
+*  Explicit memory management
+***************************************/
+/*= Compression context
+*   When compressing many times,
+*   it is recommended to allocate a context just once, and re-use it for each successive compression operation.
+*   This will make workload friendlier for system's memory.
+*   Use one context per thread for parallel execution in multi-threaded environments. */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
+ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
+
+/*! ZSTD_compressCCtx() :
+    Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel);
+
+/*= Decompression context */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
+ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
+
+/*! ZSTD_decompressDCtx() :
+*   Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()). */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/**************************
+*  Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+*   Compression using a predefined Dictionary (see dictBuilder/zdict.h).
+*   Note : This function loads the dictionary, resulting in significant startup delay.
+*   Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+                                           void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize,
+                                     const void* dict,size_t dictSize,
+                                           int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+*   Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
+*   Dictionary must be identical to the one used during compression.
+*   Note : This function loads the dictionary, resulting in significant startup delay.
+*   Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+                                             void* dst, size_t dstCapacity,
+                                       const void* src, size_t srcSize,
+                                       const void* dict,size_t dictSize);
+
+
+/****************************
+*  Fast dictionary API
+****************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+*   When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
+*   ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+*   ZSTD_CDict can be created once and used by multiple threads concurrently, as its usage is read-only.
+*   `dict` can be released after ZSTD_CDict creation. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+*   Function frees memory allocated by ZSTD_createCDict(). */
+ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+*   Compression using a digested Dictionary.
+*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+*   Note that compression level is decided during dictionary creation. */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+                                            void* dst, size_t dstCapacity,
+                                      const void* src, size_t srcSize,
+                                      const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+*   Create a digested dictionary, ready to start decompression operation without startup delay.
+*   `dict` can be released after creation. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+*   Function frees memory allocated with ZSTD_createDDict() */
+ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+*   Decompression using a digested Dictionary.
+*   Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+                                              void* dst, size_t dstCapacity,
+                                        const void* src, size_t srcSize,
+                                        const ZSTD_DDict* ddict);
+
+
+/****************************
+*  Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+  const void* src;    /**< start of input buffer */
+  size_t size;        /**< size of input buffer */
+  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+  void*  dst;         /**< start of output buffer */
+  size_t size;        /**< size of output buffer */
+  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+*  Streaming compression - HowTo
+*
+*  A ZSTD_CStream object is required to track streaming operation.
+*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+*  It is recommended to re-use ZSTD_CStream in situations where many streaming operations will be achieved consecutively,
+*  since it will play nicer with system's memory, by re-using already allocated memory.
+*  Use one separate ZSTD_CStream per thread for parallel execution.
+*
+*  Start a new compression by initializing ZSTD_CStream.
+*  Use ZSTD_initCStream() to start a new compression operation.
+*  Use ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for a compression which requires a dictionary (experimental section)
+*
+*  Use ZSTD_compressStream() repetitively to consume input stream.
+*  The function will automatically update both `pos` fields.
+*  Note that it may not consume the entire input, in which case `pos < size`,
+*  and it's up to the caller to present again remaining data.
+*  @return : a size hint, preferred nb of bytes to use as input for next function call
+*            or an error code, which can be tested using ZSTD_isError().
+*            Note 1 : it's just a hint, to help latency a little, any other value will work fine.
+*            Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
+*
+*  At any moment, it's possible to flush whatever data remains within internal buffer, using ZSTD_flushStream().
+*  `output->pos` will be updated.
+*  Note that some content might still be left within internal buffer if `output->size` is too small.
+*  @return : nb of bytes still present within internal buffer (0 if it's empty)
+*            or an error code, which can be tested using ZSTD_isError().
+*
+*  ZSTD_endStream() instructs to finish a frame.
+*  It will perform a flush and write frame epilogue.
+*  The epilogue is required for decoders to consider a frame completed.
+*  Similar to ZSTD_flushStream(), it may not be able to flush the full content if `output->size` is too small.
+*  In which case, call again ZSTD_endStream() to complete the flush.
+*  @return : nb of bytes still present within internal buffer (0 if it's empty, hence compression completed)
+*            or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef struct ZSTD_CStream_s ZSTD_CStream;
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
+
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
+
+
+
+/*-***************************************************************************
+*  Streaming decompression - HowTo
+*
+*  A ZSTD_DStream object is required to track streaming operations.
+*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+*  ZSTD_DStream objects can be re-used multiple times.
+*
+*  Use ZSTD_initDStream() to start a new decompression operation,
+*   or ZSTD_initDStream_usingDict() if decompression requires a dictionary.
+*   @return : recommended first input size
+*
+*  Use ZSTD_decompressStream() repetitively to consume your input.
+*  The function will update both `pos` fields.
+*  If `input.pos < input.size`, some input has not been consumed.
+*  It's up to the caller to present again remaining data.
+*  If `output.pos < output.size`, decoder has flushed everything it could.
+*  @return : 0 when a frame is completely decoded and fully flushed,
+*            an error code, which can be tested using ZSTD_isError(),
+*            any other value > 0, which means there is still some decoding to do to complete current frame.
+*            The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame.
+* *******************************************************************************/
+
+typedef struct ZSTD_DStream_s ZSTD_DStream;
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
+
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+#endif  /* ZSTD_H_235446 */
+
+
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/****************************************************************************************
+ * START OF ADVANCED AND EXPERIMENTAL FUNCTIONS
+ * The definitions in this section are considered experimental.
+ * They should never be used with a dynamic library, as they may change in the future.
+ * They are provided for advanced usages.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+/* --- Constants ---*/
+#define ZSTD_MAGICNUMBER            0xFD2FB528   /* v0.8 */
+#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
+
+#define ZSTD_WINDOWLOG_MAX_32  25
+#define ZSTD_WINDOWLOG_MAX_64  27
+#define ZSTD_WINDOWLOG_MAX    ((U32)(MEM_32bits() ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN     10
+#define ZSTD_HASHLOG_MAX       ZSTD_WINDOWLOG_MAX
+#define ZSTD_HASHLOG_MIN        6
+#define ZSTD_CHAINLOG_MAX     (ZSTD_WINDOWLOG_MAX+1)
+#define ZSTD_CHAINLOG_MIN      ZSTD_HASHLOG_MIN
+#define ZSTD_HASHLOG3_MAX      17
+#define ZSTD_SEARCHLOG_MAX    (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN      1
+#define ZSTD_SEARCHLENGTH_MAX   7   /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_SEARCHLENGTH_MIN   3   /* only for ZSTD_btopt, other strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MIN   4
+#define ZSTD_TARGETLENGTH_MAX 999
+
+#define ZSTD_FRAMEHEADERSIZE_MAX 18    /* for static allocation */
+static const size_t ZSTD_frameHeaderSize_prefix = 5;
+static const size_t ZSTD_frameHeaderSize_min = 6;
+static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
+static const size_t ZSTD_skippableHeaderSize = 8;  /* magic number + skippable frame length */
+
+
+/*--- Advanced types ---*/
+typedef enum { ZSTD_fast, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2, ZSTD_btopt, ZSTD_btopt2 } ZSTD_strategy;   /* from faster to stronger */
+
+typedef struct {
+    unsigned windowLog;      /**< largest match distance : larger == more compression, more memory needed during decompression */
+    unsigned chainLog;       /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+    unsigned hashLog;        /**< dispatch table : larger == faster, more memory */
+    unsigned searchLog;      /**< nb of searches : larger == more compression, slower */
+    unsigned searchLength;   /**< match length searched : larger == faster decompression, sometimes less compression */
+    unsigned targetLength;   /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
+    ZSTD_strategy strategy;
+} ZSTD_compressionParameters;
+
+typedef struct {
+    unsigned contentSizeFlag; /**< 1: content size will be in frame header (if known). */
+    unsigned checksumFlag;    /**< 1: will generate a 22-bits checksum at end of frame, to be used for error detection by decompressor */
+    unsigned noDictIDFlag;    /**< 1: no dict ID will be saved into frame header (if dictionary compression) */
+} ZSTD_frameParameters;
+
+typedef struct {
+    ZSTD_compressionParameters cParams;
+    ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+/*= Custom memory allocation functions */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+
+
+/***************************************
+*  Advanced compression functions
+***************************************/
+/*! ZSTD_estimateCCtxSize() :
+ *  Gives the amount of memory allocated for a ZSTD_CCtx given a set of compression parameters.
+ *  `frameContentSize` is an optional parameter, provide `0` if unknown */
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
+
+/*! ZSTD_createCCtx_advanced() :
+ *  Create a ZSTD compression context using external alloc and free functions */
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+
+/*! ZSTD_sizeofCCtx() :
+ *  Gives the amount of memory used by a given ZSTD_CCtx */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+
+/*! ZSTD_createCDict_advanced() :
+ *  Create a ZSTD_CDict using external alloc and free, and customized compression parameters */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+                                                  ZSTD_parameters params, ZSTD_customMem customMem);
+
+/*! ZSTD_sizeof_CDict() :
+ *  Gives the amount of memory used by a given ZSTD_sizeof_CDict */
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+
+/*! ZSTD_getCParams() :
+*   @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+*   `estimatedSrcSize` value is optional, select 0 if not known */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_getParams() :
+*   same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+*   All fields of `ZSTD_frameParameters` are set to default (0) */
+ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_checkCParams() :
+*   Ensure param values remain within authorized range */
+ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+
+/*! ZSTD_adjustCParams() :
+*   optimize params for a given `srcSize` and `dictSize`.
+*   both values are optional, select `0` if unknown. */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_compress_advanced() :
+*   Same as ZSTD_compress_usingDict(), with fine-tune control of each compression parameter */
+ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
+                                           void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize,
+                                     const void* dict,size_t dictSize,
+                                           ZSTD_parameters params);
+
+
+/*--- Advanced decompression functions ---*/
+
+/*! ZSTD_isFrame() :
+ *  Tells if the content of `buffer` starts with a valid Frame Identifier.
+ *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ *  Note 3 : Skippable Frame Identifiers are considered valid. */
+ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+
+/*! ZSTD_estimateDCtxSize() :
+ *  Gives the potential amount of memory allocated to create a ZSTD_DCtx */
+ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
+
+/*! ZSTD_createDCtx_advanced() :
+ *  Create a ZSTD decompression context using external alloc and free functions */
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+
+/*! ZSTD_sizeof_DCtx() :
+ *  Gives the amount of memory used by a given ZSTD_DCtx */
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+
+/*! ZSTD_sizeof_DDict() :
+ *  Gives the amount of memory used by a given ZSTD_DDict */
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromDict() :
+ *  Provides the dictID stored within dictionary.
+ *  if @return == 0, the dictionary is not conformant with Zstandard specification.
+ *  It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ *  Provides the dictID of the dictionary loaded into `ddict`.
+ *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ *  Provides the dictID required to decompressed the frame stored within `src`.
+ *  If @return == 0, the dictID could not be decoded.
+ *  This could for one of the following reasons :
+ *  - The frame does not require a dictionary to be decoded (most common case).
+ *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ *    Note : this use case also happens when using a non-conformant dictionary.
+ *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ *  - This is not a Zstandard frame.
+ *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/********************************************************************
+*  Advanced streaming functions
+********************************************************************/
+
+/*=====   Advanced Streaming compression functions  =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct */
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
+                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be zero == unknown */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
+ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);  /**< re-use compression parameters from previous init; skip dictionary loading stage; zcs must be init at least once before */
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+
+
+/*=====   Advanced Streaming decompression functions  =====*/
+typedef enum { ZSTDdsp_maxWindowSize } ZSTD_DStreamParameter_e;
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict will just be referenced, and must outlive decompression session */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+
+
+/*********************************************************************
+*  Buffer-less and synchronous inner streaming functions
+*
+*  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
+*  But it's also a complex one, with many restrictions (documented below).
+*  Prefer using normal streaming API for an easier experience
+********************************************************************* */
+
+/**
+  Buffer-less streaming compression (synchronous mode)
+
+  A ZSTD_CCtx object is required to track streaming operations.
+  Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+
+  Start by initializing a context.
+  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
+  or ZSTD_compressBegin_advanced(), for finer parameter control.
+  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+
+  Then, consume your input using ZSTD_compressContinue().
+  There are some important considerations to keep in mind when using this advanced function :
+  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffer only.
+  - Interface is synchronous : input is consumed entirely and produce 1+ (or more) compressed blocks.
+  - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
+    Worst case evaluation is provided by ZSTD_compressBound().
+    ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
+  - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
+    It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
+  - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
+    In which case, it will "discard" the relevant memory section from its history.
+
+  Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
+  It's possible to use a NULL,0 src content, in which case, it will write a final empty block to end the frame,
+  Without last block mark, frames will be considered unfinished (broken) by decoders.
+
+  You can then reuse `ZSTD_CCtx` (ZSTD_compressBegin()) to compress some new frame.
+*/
+
+/*=====   Buffer-less streaming compression functions  =====*/
+ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize);
+ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize);
+ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+
+/*-
+  Buffer-less streaming decompression (synchronous mode)
+
+  A ZSTD_DCtx object is required to track streaming operations.
+  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+  A ZSTD_DCtx object can be re-used multiple times.
+
+  First typical operation is to retrieve frame parameters, using ZSTD_getFrameParams().
+  It fills a ZSTD_frameParams structure which provide important information to correctly decode the frame,
+  such as the minimum rolling buffer size to allocate to decompress data (`windowSize`),
+  and the dictionary ID used.
+  (Note : content size is optional, it may not be present. 0 means : content size unknown).
+  Note that these values could be wrong, either because of data malformation, or because an attacker is spoofing deliberate false information.
+  As a consequence, check that values remain within valid application range, especially `windowSize`, before allocation.
+  Each application can set its own limit, depending on local restrictions. For extended interoperability, it is recommended to support at least 8 MB.
+  Frame parameters are extracted from the beginning of the compressed frame.
+  Data fragment must be large enough to ensure successful decoding, typically `ZSTD_frameHeaderSize_max` bytes.
+  @result : 0 : successful decoding, the `ZSTD_frameParams` structure is correctly filled.
+           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+           errorCode, which can be tested using ZSTD_isError().
+
+  Start decompression, with ZSTD_decompressBegin() or ZSTD_decompressBegin_usingDict().
+  Alternatively, you can copy a prepared context, using ZSTD_copyDCtx().
+
+  Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
+
+  @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some metadata item.
+  It can also be an error code, which can be tested with ZSTD_isError().
+
+  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
+  They should preferably be located contiguously, prior to current block.
+  Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.
+  ZSTD_decompressContinue() is very sensitive to contiguity,
+  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+  or that previous contiguous segment is large enough to properly handle maximum back-reference.
+
+  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+  Context can then be reset to start a new decompression.
+
+  Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
+  This information is not required to properly decode a frame.
+
+  == Special case : skippable frames ==
+
+  Skippable frames allow integration of user-defined data into a flow of concatenated frames.
+  Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frames is as follows :
+  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+  c) Frame Content - any content (User Data) of length equal to Frame Size
+  For skippable frames ZSTD_decompressContinue() always returns 0.
+  For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
+  It also returns Frame Size as fparamsPtr->frameContentSize.
+*/
+
+typedef struct {
+    unsigned long long frameContentSize;
+    unsigned windowSize;
+    unsigned dictID;
+    unsigned checksumFlag;
+} ZSTD_frameParams;
+
+/*=====   Buffer-less streaming decompression functions  =====*/
+ZSTDLIB_API size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input, see details below */
+ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
+ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+
+/**
+    Block functions
+
+    Block functions produce and decode raw zstd blocks, without frame metadata.
+    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+    User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+
+    A few rules to respect :
+    - Compressing and decompressing require a context structure
+      + Use ZSTD_createCCtx() and ZSTD_createDCtx()
+    - It is necessary to init context before starting
+      + compression : ZSTD_compressBegin()
+      + decompression : ZSTD_decompressBegin()
+      + variants _usingDict() are also allowed
+      + copyCCtx() and copyDCtx() work too
+    - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
+      + If you need to compress more, cut data into multiple blocks
+      + Consider using the regular ZSTD_compress() instead, as frame metadata costs become negligible when source size is large.
+    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
+      In which case, nothing is produced into `dst`.
+      + User must test for such outcome and deal directly with uncompressed data
+      + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
+      + In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.
+        Use ZSTD_insertBlock() in such a case.
+*/
+
+#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)   /* define, for static allocation */
+/*=====   Raw zstd block functions  =====*/
+ZSTDLIB_API size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert block into `dctx` history. Useful for uncompressed blocks */
+
+
+#endif   /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd_cffi.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,152 @@
+# Copyright (c) 2016-present, Gregory Szorc
+# All rights reserved.
+#
+# This software may be modified and distributed under the terms
+# of the BSD license. See the LICENSE file for details.
+
+"""Python interface to the Zstandard (zstd) compression library."""
+
+from __future__ import absolute_import, unicode_literals
+
+import io
+
+from _zstd_cffi import (
+    ffi,
+    lib,
+)
+
+
+_CSTREAM_IN_SIZE = lib.ZSTD_CStreamInSize()
+_CSTREAM_OUT_SIZE = lib.ZSTD_CStreamOutSize()
+
+
+class _ZstdCompressionWriter(object):
+    def __init__(self, cstream, writer):
+        self._cstream = cstream
+        self._writer = writer
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        if not exc_type and not exc_value and not exc_tb:
+            out_buffer = ffi.new('ZSTD_outBuffer *')
+            out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
+            out_buffer.size = _CSTREAM_OUT_SIZE
+            out_buffer.pos = 0
+
+            while True:
+                res = lib.ZSTD_endStream(self._cstream, out_buffer)
+                if lib.ZSTD_isError(res):
+                    raise Exception('error ending compression stream: %s' % lib.ZSTD_getErrorName)
+
+                if out_buffer.pos:
+                    self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                    out_buffer.pos = 0
+
+                if res == 0:
+                    break
+
+        return False
+
+    def write(self, data):
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+        out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
+        out_buffer.size = _CSTREAM_OUT_SIZE
+        out_buffer.pos = 0
+
+        # TODO can we reuse existing memory?
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        in_buffer.src = ffi.new('char[]', data)
+        in_buffer.size = len(data)
+        in_buffer.pos = 0
+        while in_buffer.pos < in_buffer.size:
+            res = lib.ZSTD_compressStream(self._cstream, out_buffer, in_buffer)
+            if lib.ZSTD_isError(res):
+                raise Exception('zstd compress error: %s' % lib.ZSTD_getErrorName(res))
+
+            if out_buffer.pos:
+                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                out_buffer.pos = 0
+
+
+class ZstdCompressor(object):
+    def __init__(self, level=3, dict_data=None, compression_params=None):
+        if dict_data:
+            raise Exception('dict_data not yet supported')
+        if compression_params:
+            raise Exception('compression_params not yet supported')
+
+        self._compression_level = level
+
+    def compress(self, data):
+        # Just use the stream API for now.
+        output = io.BytesIO()
+        with self.write_to(output) as compressor:
+            compressor.write(data)
+        return output.getvalue()
+
+    def copy_stream(self, ifh, ofh):
+        cstream = self._get_cstream()
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
+        out_buffer.size = _CSTREAM_OUT_SIZE
+        out_buffer.pos = 0
+
+        total_read, total_write = 0, 0
+
+        while True:
+            data = ifh.read(_CSTREAM_IN_SIZE)
+            if not data:
+                break
+
+            total_read += len(data)
+
+            in_buffer.src = ffi.new('char[]', data)
+            in_buffer.size = len(data)
+            in_buffer.pos = 0
+
+            while in_buffer.pos < in_buffer.size:
+                res = lib.ZSTD_compressStream(cstream, out_buffer, in_buffer)
+                if lib.ZSTD_isError(res):
+                    raise Exception('zstd compress error: %s' %
+                                    lib.ZSTD_getErrorName(res))
+
+                if out_buffer.pos:
+                    ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                    total_write = out_buffer.pos
+                    out_buffer.pos = 0
+
+        # We've finished reading. Flush the compressor.
+        while True:
+            res = lib.ZSTD_endStream(cstream, out_buffer)
+            if lib.ZSTD_isError(res):
+                raise Exception('error ending compression stream: %s' %
+                                lib.ZSTD_getErrorName(res))
+
+            if out_buffer.pos:
+                ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                total_write += out_buffer.pos
+                out_buffer.pos = 0
+
+            if res == 0:
+                break
+
+        return total_read, total_write
+
+    def write_to(self, writer):
+        return _ZstdCompressionWriter(self._get_cstream(), writer)
+
+    def _get_cstream(self):
+        cstream = lib.ZSTD_createCStream()
+        cstream = ffi.gc(cstream, lib.ZSTD_freeCStream)
+
+        res = lib.ZSTD_initCStream(cstream, self._compression_level)
+        if lib.ZSTD_isError(res):
+            raise Exception('cannot init CStream: %s' %
+                            lib.ZSTD_getErrorName(res))
+
+        return cstream
--- a/contrib/simplemerge	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/simplemerge	Wed Jan 18 11:43:36 2017 -0500
@@ -3,6 +3,7 @@
 from mercurial import demandimport
 demandimport.enable()
 
+import getopt
 import sys
 from mercurial.i18n import _
 from mercurial import error, simplemerge, fancyopts, util, ui
@@ -47,14 +48,14 @@
     opts = {}
     try:
         args = fancyopts.fancyopts(sys.argv[1:], options, opts)
-    except fancyopts.getopt.GetoptError as e:
+    except getopt.GetoptError as e:
         raise ParseError(e)
     if opts['help']:
         showhelp()
         sys.exit(0)
     if len(args) != 3:
             raise ParseError(_('wrong number of arguments'))
-    sys.exit(simplemerge.simplemerge(ui.ui(), *args, **opts))
+    sys.exit(simplemerge.simplemerge(ui.ui.load(), *args, **opts))
 except ParseError as e:
     sys.stdout.write("%s: %s\n" % (sys.argv[0], e))
     showhelp()
--- a/contrib/vim/patchreview.vim	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/vim/patchreview.vim	Wed Jan 18 11:43:36 2017 -0500
@@ -720,7 +720,7 @@
     let s:origtabpagenr = tabpagenr()
     silent! exe 'tabedit ' . StrippedRelativeFilePath
     if exists('patchcmd')
-      " modelines in loaded files mess with diff comparision
+      " modelines in loaded files mess with diff comparison
       let s:keep_modeline=&modeline
       let &modeline=0
       silent! exe 'vert diffsplit ' . tmpname . '.file'
--- a/contrib/wix/help.wxs	Wed Jan 04 10:51:37 2017 -0600
+++ b/contrib/wix/help.wxs	Wed Jan 18 11:43:36 2017 -0500
@@ -25,11 +25,9 @@
           <File Name="hgignore.txt" />
           <File Name="hgweb.txt" />
           <File Name="merge-tools.txt" />
-          <File Name="multirevs.txt" />
           <File Name="patterns.txt" />
           <File Name="phases.txt" />
           <File Name="revisions.txt" />
-          <File Name="revsets.txt" />
           <File Name="scripting.txt" />
           <File Name="subrepos.txt" />
           <File Name="templates.txt" />
--- a/doc/check-seclevel.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/doc/check-seclevel.py	Wed Jan 18 11:43:36 2017 -0500
@@ -158,7 +158,7 @@
 
     (options, args) = optparser.parse_args()
 
-    ui = uimod.ui()
+    ui = uimod.ui.load()
     ui.setconfig('ui', 'verbose', options.verbose, '--verbose')
     ui.setconfig('ui', 'debug', options.debug, '--debug')
 
--- a/doc/gendoc.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/doc/gendoc.py	Wed Jan 18 11:43:36 2017 -0500
@@ -217,7 +217,7 @@
     if len(sys.argv) > 1:
         doc = sys.argv[1]
 
-    ui = uimod.ui()
+    ui = uimod.ui.load()
     if doc == 'hg.1.gendoc':
         showdoc(ui)
     else:
--- a/hgext/bugzilla.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/bugzilla.py	Wed Jan 18 11:43:36 2017 -0500
@@ -571,9 +571,9 @@
         self.send_user_agent(h)
         self.send_content(h, request_body)
 
-        # Deal with differences between Python 2.4-2.6 and 2.7.
+        # Deal with differences between Python 2.6 and 2.7.
         # In the former h is a HTTP(S). In the latter it's a
-        # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
+        # HTTP(S)Connection. Luckily, the 2.6 implementation of
         # HTTP(S) has an underlying HTTP(S)Connection, so extract
         # that and use it.
         try:
--- a/hgext/chgserver.py	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,643 +0,0 @@
-# chgserver.py - command server extension for cHg
-#
-# Copyright 2011 Yuya Nishihara <yuya@tcha.org>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-"""command server extension for cHg (EXPERIMENTAL)
-
-'S' channel (read/write)
-    propagate ui.system() request to client
-
-'attachio' command
-    attach client's stdio passed by sendmsg()
-
-'chdir' command
-    change current directory
-
-'getpager' command
-    checks if pager is enabled and which pager should be executed
-
-'setenv' command
-    replace os.environ completely
-
-'setumask' command
-    set umask
-
-'validate' command
-    reload the config and check if the server is up to date
-
-Config
-------
-
-::
-
-  [chgserver]
-  idletimeout = 3600 # seconds, after which an idle server will exit
-  skiphash = False   # whether to skip config or env change checks
-"""
-
-from __future__ import absolute_import
-
-import errno
-import hashlib
-import inspect
-import os
-import re
-import signal
-import struct
-import sys
-import time
-
-from mercurial.i18n import _
-
-from mercurial import (
-    cmdutil,
-    commands,
-    commandserver,
-    dispatch,
-    error,
-    extensions,
-    osutil,
-    util,
-)
-
-# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
-# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
-# be specifying the version(s) of Mercurial they are tested with, or
-# leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
-
-_log = commandserver.log
-
-def _hashlist(items):
-    """return sha1 hexdigest for a list"""
-    return hashlib.sha1(str(items)).hexdigest()
-
-# sensitive config sections affecting confighash
-_configsections = [
-    'alias',  # affects global state commands.table
-    'extdiff',  # uisetup will register new commands
-    'extensions',
-]
-
-# sensitive environment variables affecting confighash
-_envre = re.compile(r'''\A(?:
-                    CHGHG
-                    |HG.*
-                    |LANG(?:UAGE)?
-                    |LC_.*
-                    |LD_.*
-                    |PATH
-                    |PYTHON.*
-                    |TERM(?:INFO)?
-                    |TZ
-                    )\Z''', re.X)
-
-def _confighash(ui):
-    """return a quick hash for detecting config/env changes
-
-    confighash is the hash of sensitive config items and environment variables.
-
-    for chgserver, it is designed that once confighash changes, the server is
-    not qualified to serve its client and should redirect the client to a new
-    server. different from mtimehash, confighash change will not mark the
-    server outdated and exit since the user can have different configs at the
-    same time.
-    """
-    sectionitems = []
-    for section in _configsections:
-        sectionitems.append(ui.configitems(section))
-    sectionhash = _hashlist(sectionitems)
-    envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)]
-    envhash = _hashlist(sorted(envitems))
-    return sectionhash[:6] + envhash[:6]
-
-def _getmtimepaths(ui):
-    """get a list of paths that should be checked to detect change
-
-    The list will include:
-    - extensions (will not cover all files for complex extensions)
-    - mercurial/__version__.py
-    - python binary
-    """
-    modules = [m for n, m in extensions.extensions(ui)]
-    try:
-        from mercurial import __version__
-        modules.append(__version__)
-    except ImportError:
-        pass
-    files = [sys.executable]
-    for m in modules:
-        try:
-            files.append(inspect.getabsfile(m))
-        except TypeError:
-            pass
-    return sorted(set(files))
-
-def _mtimehash(paths):
-    """return a quick hash for detecting file changes
-
-    mtimehash calls stat on given paths and calculate a hash based on size and
-    mtime of each file. mtimehash does not read file content because reading is
-    expensive. therefore it's not 100% reliable for detecting content changes.
-    it's possible to return different hashes for same file contents.
-    it's also possible to return a same hash for different file contents for
-    some carefully crafted situation.
-
-    for chgserver, it is designed that once mtimehash changes, the server is
-    considered outdated immediately and should no longer provide service.
-
-    mtimehash is not included in confighash because we only know the paths of
-    extensions after importing them (there is imp.find_module but that faces
-    race conditions). We need to calculate confighash without importing.
-    """
-    def trystat(path):
-        try:
-            st = os.stat(path)
-            return (st.st_mtime, st.st_size)
-        except OSError:
-            # could be ENOENT, EPERM etc. not fatal in any case
-            pass
-    return _hashlist(map(trystat, paths))[:12]
-
-class hashstate(object):
-    """a structure storing confighash, mtimehash, paths used for mtimehash"""
-    def __init__(self, confighash, mtimehash, mtimepaths):
-        self.confighash = confighash
-        self.mtimehash = mtimehash
-        self.mtimepaths = mtimepaths
-
-    @staticmethod
-    def fromui(ui, mtimepaths=None):
-        if mtimepaths is None:
-            mtimepaths = _getmtimepaths(ui)
-        confighash = _confighash(ui)
-        mtimehash = _mtimehash(mtimepaths)
-        _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
-        return hashstate(confighash, mtimehash, mtimepaths)
-
-# copied from hgext/pager.py:uisetup()
-def _setuppagercmd(ui, options, cmd):
-    if not ui.formatted():
-        return
-
-    p = ui.config("pager", "pager", os.environ.get("PAGER"))
-    usepager = False
-    always = util.parsebool(options['pager'])
-    auto = options['pager'] == 'auto'
-
-    if not p:
-        pass
-    elif always:
-        usepager = True
-    elif not auto:
-        usepager = False
-    else:
-        attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
-        attend = ui.configlist('pager', 'attend', attended)
-        ignore = ui.configlist('pager', 'ignore')
-        cmds, _ = cmdutil.findcmd(cmd, commands.table)
-
-        for cmd in cmds:
-            var = 'attend-%s' % cmd
-            if ui.config('pager', var):
-                usepager = ui.configbool('pager', var)
-                break
-            if (cmd in attend or
-                (cmd not in ignore and not attend)):
-                usepager = True
-                break
-
-    if usepager:
-        ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
-        ui.setconfig('ui', 'interactive', False, 'pager')
-        return p
-
-def _newchgui(srcui, csystem):
-    class chgui(srcui.__class__):
-        def __init__(self, src=None):
-            super(chgui, self).__init__(src)
-            if src:
-                self._csystem = getattr(src, '_csystem', csystem)
-            else:
-                self._csystem = csystem
-
-        def system(self, cmd, environ=None, cwd=None, onerr=None,
-                   errprefix=None):
-            # fallback to the original system method if the output needs to be
-            # captured (to self._buffers), or the output stream is not stdout
-            # (e.g. stderr, cStringIO), because the chg client is not aware of
-            # these situations and will behave differently (write to stdout).
-            if (any(s[1] for s in self._bufferstates)
-                or not util.safehasattr(self.fout, 'fileno')
-                or self.fout.fileno() != sys.stdout.fileno()):
-                return super(chgui, self).system(cmd, environ, cwd, onerr,
-                                                 errprefix)
-            # copied from mercurial/util.py:system()
-            self.flush()
-            def py2shell(val):
-                if val is None or val is False:
-                    return '0'
-                if val is True:
-                    return '1'
-                return str(val)
-            env = os.environ.copy()
-            if environ:
-                env.update((k, py2shell(v)) for k, v in environ.iteritems())
-            env['HG'] = util.hgexecutable()
-            rc = self._csystem(cmd, env, cwd)
-            if rc and onerr:
-                errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
-                                    util.explainexit(rc)[0])
-                if errprefix:
-                    errmsg = '%s: %s' % (errprefix, errmsg)
-                raise onerr(errmsg)
-            return rc
-
-    return chgui(srcui)
-
-def _loadnewui(srcui, args):
-    newui = srcui.__class__()
-    for a in ['fin', 'fout', 'ferr', 'environ']:
-        setattr(newui, a, getattr(srcui, a))
-    if util.safehasattr(srcui, '_csystem'):
-        newui._csystem = srcui._csystem
-
-    # internal config: extensions.chgserver
-    newui.setconfig('extensions', 'chgserver',
-                    srcui.config('extensions', 'chgserver'), '--config')
-
-    # command line args
-    args = args[:]
-    dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
-
-    # stolen from tortoisehg.util.copydynamicconfig()
-    for section, name, value in srcui.walkconfig():
-        source = srcui.configsource(section, name)
-        if ':' in source or source == '--config':
-            # path:line or command line
-            continue
-        if source == 'none':
-            # ui.configsource returns 'none' by default
-            source = ''
-        newui.setconfig(section, name, value, source)
-
-    # load wd and repo config, copied from dispatch.py
-    cwds = dispatch._earlygetopt(['--cwd'], args)
-    cwd = cwds and os.path.realpath(cwds[-1]) or None
-    rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
-    path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
-
-    return (newui, newlui)
-
-class channeledsystem(object):
-    """Propagate ui.system() request in the following format:
-
-    payload length (unsigned int),
-    cmd, '\0',
-    cwd, '\0',
-    envkey, '=', val, '\0',
-    ...
-    envkey, '=', val
-
-    and waits:
-
-    exitcode length (unsigned int),
-    exitcode (int)
-    """
-    def __init__(self, in_, out, channel):
-        self.in_ = in_
-        self.out = out
-        self.channel = channel
-
-    def __call__(self, cmd, environ, cwd):
-        args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')]
-        args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
-        data = '\0'.join(args)
-        self.out.write(struct.pack('>cI', self.channel, len(data)))
-        self.out.write(data)
-        self.out.flush()
-
-        length = self.in_.read(4)
-        length, = struct.unpack('>I', length)
-        if length != 4:
-            raise error.Abort(_('invalid response'))
-        rc, = struct.unpack('>i', self.in_.read(4))
-        return rc
-
-_iochannels = [
-    # server.ch, ui.fp, mode
-    ('cin', 'fin', 'rb'),
-    ('cout', 'fout', 'wb'),
-    ('cerr', 'ferr', 'wb'),
-]
-
-class chgcmdserver(commandserver.server):
-    def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
-        super(chgcmdserver, self).__init__(
-            _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout)
-        self.clientsock = sock
-        self._oldios = []  # original (self.ch, ui.fp, fd) before "attachio"
-        self.hashstate = hashstate
-        self.baseaddress = baseaddress
-        if hashstate is not None:
-            self.capabilities = self.capabilities.copy()
-            self.capabilities['validate'] = chgcmdserver.validate
-
-    def cleanup(self):
-        super(chgcmdserver, self).cleanup()
-        # dispatch._runcatch() does not flush outputs if exception is not
-        # handled by dispatch._dispatch()
-        self.ui.flush()
-        self._restoreio()
-
-    def attachio(self):
-        """Attach to client's stdio passed via unix domain socket; all
-        channels except cresult will no longer be used
-        """
-        # tell client to sendmsg() with 1-byte payload, which makes it
-        # distinctive from "attachio\n" command consumed by client.read()
-        self.clientsock.sendall(struct.pack('>cI', 'I', 1))
-        clientfds = osutil.recvfds(self.clientsock.fileno())
-        _log('received fds: %r\n' % clientfds)
-
-        ui = self.ui
-        ui.flush()
-        first = self._saveio()
-        for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
-            assert fd > 0
-            fp = getattr(ui, fn)
-            os.dup2(fd, fp.fileno())
-            os.close(fd)
-            if not first:
-                continue
-            # reset buffering mode when client is first attached. as we want
-            # to see output immediately on pager, the mode stays unchanged
-            # when client re-attached. ferr is unchanged because it should
-            # be unbuffered no matter if it is a tty or not.
-            if fn == 'ferr':
-                newfp = fp
-            else:
-                # make it line buffered explicitly because the default is
-                # decided on first write(), where fout could be a pager.
-                if fp.isatty():
-                    bufsize = 1  # line buffered
-                else:
-                    bufsize = -1  # system default
-                newfp = os.fdopen(fp.fileno(), mode, bufsize)
-                setattr(ui, fn, newfp)
-            setattr(self, cn, newfp)
-
-        self.cresult.write(struct.pack('>i', len(clientfds)))
-
-    def _saveio(self):
-        if self._oldios:
-            return False
-        ui = self.ui
-        for cn, fn, _mode in _iochannels:
-            ch = getattr(self, cn)
-            fp = getattr(ui, fn)
-            fd = os.dup(fp.fileno())
-            self._oldios.append((ch, fp, fd))
-        return True
-
-    def _restoreio(self):
-        ui = self.ui
-        for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
-            newfp = getattr(ui, fn)
-            # close newfp while it's associated with client; otherwise it
-            # would be closed when newfp is deleted
-            if newfp is not fp:
-                newfp.close()
-            # restore original fd: fp is open again
-            os.dup2(fd, fp.fileno())
-            os.close(fd)
-            setattr(self, cn, ch)
-            setattr(ui, fn, fp)
-        del self._oldios[:]
-
-    def validate(self):
-        """Reload the config and check if the server is up to date
-
-        Read a list of '\0' separated arguments.
-        Write a non-empty list of '\0' separated instruction strings or '\0'
-        if the list is empty.
-        An instruction string could be either:
-            - "unlink $path", the client should unlink the path to stop the
-              outdated server.
-            - "redirect $path", the client should attempt to connect to $path
-              first. If it does not work, start a new server. It implies
-              "reconnect".
-            - "exit $n", the client should exit directly with code n.
-              This may happen if we cannot parse the config.
-            - "reconnect", the client should close the connection and
-              reconnect.
-        If neither "reconnect" nor "redirect" is included in the instruction
-        list, the client can continue with this server after completing all
-        the instructions.
-        """
-        args = self._readlist()
-        try:
-            self.ui, lui = _loadnewui(self.ui, args)
-        except error.ParseError as inst:
-            dispatch._formatparse(self.ui.warn, inst)
-            self.ui.flush()
-            self.cresult.write('exit 255')
-            return
-        newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
-        insts = []
-        if newhash.mtimehash != self.hashstate.mtimehash:
-            addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
-            insts.append('unlink %s' % addr)
-            # mtimehash is empty if one or more extensions fail to load.
-            # to be compatible with hg, still serve the client this time.
-            if self.hashstate.mtimehash:
-                insts.append('reconnect')
-        if newhash.confighash != self.hashstate.confighash:
-            addr = _hashaddress(self.baseaddress, newhash.confighash)
-            insts.append('redirect %s' % addr)
-        _log('validate: %s\n' % insts)
-        self.cresult.write('\0'.join(insts) or '\0')
-
-    def chdir(self):
-        """Change current directory
-
-        Note that the behavior of --cwd option is bit different from this.
-        It does not affect --config parameter.
-        """
-        path = self._readstr()
-        if not path:
-            return
-        _log('chdir to %r\n' % path)
-        os.chdir(path)
-
-    def setumask(self):
-        """Change umask"""
-        mask = struct.unpack('>I', self._read(4))[0]
-        _log('setumask %r\n' % mask)
-        os.umask(mask)
-
-    def getpager(self):
-        """Read cmdargs and write pager command to r-channel if enabled
-
-        If pager isn't enabled, this writes '\0' because channeledoutput
-        does not allow to write empty data.
-        """
-        args = self._readlist()
-        try:
-            cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
-                                                                     args)
-        except (error.Abort, error.AmbiguousCommand, error.CommandError,
-                error.UnknownCommand):
-            cmd = None
-            options = {}
-        if not cmd or 'pager' not in options:
-            self.cresult.write('\0')
-            return
-
-        pagercmd = _setuppagercmd(self.ui, options, cmd)
-        if pagercmd:
-            # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
-            # we can exit if the pipe to the pager is closed
-            if util.safehasattr(signal, 'SIGPIPE') and \
-                    signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
-                signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-            self.cresult.write(pagercmd)
-        else:
-            self.cresult.write('\0')
-
-    def setenv(self):
-        """Clear and update os.environ
-
-        Note that not all variables can make an effect on the running process.
-        """
-        l = self._readlist()
-        try:
-            newenv = dict(s.split('=', 1) for s in l)
-        except ValueError:
-            raise ValueError('unexpected value in setenv request')
-        _log('setenv: %r\n' % sorted(newenv.keys()))
-        os.environ.clear()
-        os.environ.update(newenv)
-
-    capabilities = commandserver.server.capabilities.copy()
-    capabilities.update({'attachio': attachio,
-                         'chdir': chdir,
-                         'getpager': getpager,
-                         'setenv': setenv,
-                         'setumask': setumask})
-
-def _tempaddress(address):
-    return '%s.%d.tmp' % (address, os.getpid())
-
-def _hashaddress(address, hashstr):
-    return '%s-%s' % (address, hashstr)
-
-class chgunixservicehandler(object):
-    """Set of operations for chg services"""
-
-    pollinterval = 1  # [sec]
-
-    def __init__(self, ui):
-        self.ui = ui
-        self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600)
-        self._lastactive = time.time()
-
-    def bindsocket(self, sock, address):
-        self._inithashstate(address)
-        self._checkextensions()
-        self._bind(sock)
-        self._createsymlink()
-
-    def _inithashstate(self, address):
-        self._baseaddress = address
-        if self.ui.configbool('chgserver', 'skiphash', False):
-            self._hashstate = None
-            self._realaddress = address
-            return
-        self._hashstate = hashstate.fromui(self.ui)
-        self._realaddress = _hashaddress(address, self._hashstate.confighash)
-
-    def _checkextensions(self):
-        if not self._hashstate:
-            return
-        if extensions.notloaded():
-            # one or more extensions failed to load. mtimehash becomes
-            # meaningless because we do not know the paths of those extensions.
-            # set mtimehash to an illegal hash value to invalidate the server.
-            self._hashstate.mtimehash = ''
-
-    def _bind(self, sock):
-        # use a unique temp address so we can stat the file and do ownership
-        # check later
-        tempaddress = _tempaddress(self._realaddress)
-        util.bindunixsocket(sock, tempaddress)
-        self._socketstat = os.stat(tempaddress)
-        # rename will replace the old socket file if exists atomically. the
-        # old server will detect ownership change and exit.
-        util.rename(tempaddress, self._realaddress)
-
-    def _createsymlink(self):
-        if self._baseaddress == self._realaddress:
-            return
-        tempaddress = _tempaddress(self._baseaddress)
-        os.symlink(os.path.basename(self._realaddress), tempaddress)
-        util.rename(tempaddress, self._baseaddress)
-
-    def _issocketowner(self):
-        try:
-            stat = os.stat(self._realaddress)
-            return (stat.st_ino == self._socketstat.st_ino and
-                    stat.st_mtime == self._socketstat.st_mtime)
-        except OSError:
-            return False
-
-    def unlinksocket(self, address):
-        if not self._issocketowner():
-            return
-        # it is possible to have a race condition here that we may
-        # remove another server's socket file. but that's okay
-        # since that server will detect and exit automatically and
-        # the client will start a new server on demand.
-        try:
-            os.unlink(self._realaddress)
-        except OSError as exc:
-            if exc.errno != errno.ENOENT:
-                raise
-
-    def printbanner(self, address):
-        # no "listening at" message should be printed to simulate hg behavior
-        pass
-
-    def shouldexit(self):
-        if not self._issocketowner():
-            self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
-            return True
-        if time.time() - self._lastactive > self._idletimeout:
-            self.ui.debug('being idle too long. exiting.\n')
-            return True
-        return False
-
-    def newconnection(self):
-        self._lastactive = time.time()
-
-    def createcmdserver(self, repo, conn, fin, fout):
-        return chgcmdserver(self.ui, repo, fin, fout, conn,
-                            self._hashstate, self._baseaddress)
-
-def chgunixservice(ui, repo, opts):
-    if repo:
-        # one chgserver can serve multiple repos. drop repo infomation
-        ui.setconfig('bundle', 'mainreporoot', '', 'repo')
-    h = chgunixservicehandler(ui)
-    return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
-
-def uisetup(ui):
-    commandserver._servicemap['chgunix'] = chgunixservice
-
-    # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
-    # start another chg. drop it to avoid possible side effects.
-    if 'CHGINTERNALMARK' in os.environ:
-        del os.environ['CHGINTERNALMARK']
--- a/hgext/color.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/color.py	Wed Jan 18 11:43:36 2017 -0500
@@ -164,14 +164,15 @@
 
 from __future__ import absolute_import
 
-import os
-
 from mercurial.i18n import _
 from mercurial import (
     cmdutil,
+    color,
     commands,
     dispatch,
+    encoding,
     extensions,
+    pycompat,
     subrepo,
     ui as uimod,
     util,
@@ -197,7 +198,6 @@
 def _terminfosetup(ui, mode):
     '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
 
-    global _terminfo_params
     # If we failed to load curses, we go ahead and return.
     if not _terminfo_params:
         return
@@ -215,7 +215,7 @@
     try:
         curses.setupterm()
     except curses.error as e:
-        _terminfo_params = {}
+        _terminfo_params.clear()
         return
 
     for key, (b, e, c) in _terminfo_params.items():
@@ -232,11 +232,9 @@
         if mode == "terminfo":
             ui.warn(_("no terminfo entry for setab/setaf: reverting to "
               "ECMA-48 color\n"))
-        _terminfo_params = {}
+        _terminfo_params.clear()
 
 def _modesetup(ui, coloropt):
-    global _terminfo_params
-
     if coloropt == 'debug':
         return 'debug'
 
@@ -245,7 +243,8 @@
     if not always and not auto:
         return None
 
-    formatted = always or (os.environ.get('TERM') != 'dumb' and ui.formatted())
+    formatted = (always or (encoding.environ.get('TERM') != 'dumb'
+                 and ui.formatted()))
 
     mode = ui.config('color', 'mode', 'auto')
 
@@ -255,8 +254,8 @@
 
     realmode = mode
     if mode == 'auto':
-        if os.name == 'nt':
-            term = os.environ.get('TERM')
+        if pycompat.osname == 'nt':
+            term = encoding.environ.get('TERM')
             # TERM won't be defined in a vanilla cmd.exe environment.
 
             # UNIX-like environments on Windows such as Cygwin and MSYS will
@@ -275,18 +274,18 @@
 
     def modewarn():
         # only warn if color.mode was explicitly set and we're in
-        # an interactive terminal
-        if mode == realmode and ui.interactive():
+        # a formatted terminal
+        if mode == realmode and ui.formatted():
             ui.warn(_('warning: failed to set color mode to %s\n') % mode)
 
     if realmode == 'win32':
-        _terminfo_params = {}
+        _terminfo_params.clear()
         if not w32effects:
             modewarn()
             return None
         _effects.update(w32effects)
     elif realmode == 'ansi':
-        _terminfo_params = {}
+        _terminfo_params.clear()
     elif realmode == 'terminfo':
         _terminfosetup(ui, mode)
         if not _terminfo_params:
@@ -325,61 +324,6 @@
 except ImportError:
     _terminfo_params = {}
 
-_styles = {'grep.match': 'red bold',
-           'grep.linenumber': 'green',
-           'grep.rev': 'green',
-           'grep.change': 'green',
-           'grep.sep': 'cyan',
-           'grep.filename': 'magenta',
-           'grep.user': 'magenta',
-           'grep.date': 'magenta',
-           'bookmarks.active': 'green',
-           'branches.active': 'none',
-           'branches.closed': 'black bold',
-           'branches.current': 'green',
-           'branches.inactive': 'none',
-           'diff.changed': 'white',
-           'diff.deleted': 'red',
-           'diff.diffline': 'bold',
-           'diff.extended': 'cyan bold',
-           'diff.file_a': 'red bold',
-           'diff.file_b': 'green bold',
-           'diff.hunk': 'magenta',
-           'diff.inserted': 'green',
-           'diff.tab': '',
-           'diff.trailingwhitespace': 'bold red_background',
-           'changeset.public' : '',
-           'changeset.draft' : '',
-           'changeset.secret' : '',
-           'diffstat.deleted': 'red',
-           'diffstat.inserted': 'green',
-           'histedit.remaining': 'red bold',
-           'ui.prompt': 'yellow',
-           'log.changeset': 'yellow',
-           'patchbomb.finalsummary': '',
-           'patchbomb.from': 'magenta',
-           'patchbomb.to': 'cyan',
-           'patchbomb.subject': 'green',
-           'patchbomb.diffstats': '',
-           'rebase.rebased': 'blue',
-           'rebase.remaining': 'red bold',
-           'resolve.resolved': 'green bold',
-           'resolve.unresolved': 'red bold',
-           'shelve.age': 'cyan',
-           'shelve.newest': 'green bold',
-           'shelve.name': 'blue bold',
-           'status.added': 'green bold',
-           'status.clean': 'none',
-           'status.copied': 'none',
-           'status.deleted': 'cyan bold underline',
-           'status.ignored': 'black bold',
-           'status.modified': 'blue bold',
-           'status.removed': 'red bold',
-           'status.unknown': 'magenta bold underline',
-           'tags.normal': 'green',
-           'tags.local': 'black bold'}
-
-
 def _effect_str(effect):
     '''Helper function for render_effects().'''
 
@@ -415,10 +359,6 @@
         stop = _effect_str('none')
     return ''.join([start, text, stop])
 
-def extstyles():
-    for name, ext in extensions.extensions():
-        _styles.update(getattr(ext, 'colortable', {}))
-
 def valideffect(effect):
     'Determine if the effect is valid or not.'
     good = False
@@ -442,7 +382,7 @@
                     ui.warn(_("ignoring unknown color/effect %r "
                               "(configured in color.%s)\n")
                             % (e, status))
-            _styles[status] = ' '.join(good)
+            color._styles[status] = ' '.join(good)
 
 class colorui(uimod.ui):
     _colormode = 'ansi'
@@ -495,15 +435,15 @@
 
         effects = []
         for l in label.split():
-            s = _styles.get(l, '')
+            s = color._styles.get(l, '')
             if s:
                 effects.append(s)
             elif valideffect(l):
                 effects.append(l)
         effects = ' '.join(effects)
         if effects:
-            return '\n'.join([render_effects(s, effects)
-                              for s in msg.split('\n')])
+            return '\n'.join([render_effects(line, effects)
+                              for line in msg.split('\n')])
         return msg
 
 def uisetup(ui):
@@ -516,7 +456,6 @@
         mode = _modesetup(ui_, opts['color'])
         colorui._colormode = mode
         if mode and mode != 'debug':
-            extstyles()
             configstyles(ui_)
         return orig(ui_, opts, cmd, cmdfunc)
     def colorgit(orig, gitsub, commands, env=None, stream=False, cwd=None):
@@ -536,24 +475,52 @@
          _("when to colorize (boolean, always, auto, never, or debug)"),
          _('TYPE')))
 
-@command('debugcolor', [], 'hg debugcolor')
+@command('debugcolor',
+        [('', 'style', None, _('show all configured styles'))],
+        'hg debugcolor')
 def debugcolor(ui, repo, **opts):
-    global _styles
-    _styles = {}
-    for effect in _effects.keys():
-        _styles[effect] = effect
-    if _terminfo_params:
-        for k, v in ui.configitems('color'):
-            if k.startswith('color.'):
-                _styles[k] = k[6:]
-            elif k.startswith('terminfo.'):
-                _styles[k] = k[9:]
+    """show available color, effects or style"""
     ui.write(('color mode: %s\n') % ui._colormode)
-    ui.write(_('available colors:\n'))
-    for colorname, label in _styles.items():
-        ui.write(('%s\n') % colorname, label=label)
+    if opts.get('style'):
+        return _debugdisplaystyle(ui)
+    else:
+        return _debugdisplaycolor(ui)
 
-if os.name != 'nt':
+def _debugdisplaycolor(ui):
+    oldstyle = color._styles.copy()
+    try:
+        color._styles.clear()
+        for effect in _effects.keys():
+            color._styles[effect] = effect
+        if _terminfo_params:
+            for k, v in ui.configitems('color'):
+                if k.startswith('color.'):
+                    color._styles[k] = k[6:]
+                elif k.startswith('terminfo.'):
+                    color._styles[k] = k[9:]
+        ui.write(_('available colors:\n'))
+        # sort label with a '_' after the other to group '_background' entry.
+        items = sorted(color._styles.items(),
+                       key=lambda i: ('_' in i[0], i[0], i[1]))
+        for colorname, label in items:
+            ui.write(('%s\n') % colorname, label=label)
+    finally:
+        color._styles.clear()
+        color._styles.update(oldstyle)
+
+def _debugdisplaystyle(ui):
+    ui.write(_('available style:\n'))
+    width = max(len(s) for s in color._styles)
+    for label, effects in sorted(color._styles.items()):
+        ui.write('%s' % label, label=label)
+        if effects:
+            # 50
+            ui.write(': ')
+            ui.write(' ' * (max(0, width - len(label))))
+            ui.write(', '.join(ui.label(e, e) for e in effects.split()))
+        ui.write('\n')
+
+if pycompat.osname != 'nt':
     w32effects = None
 else:
     import ctypes
@@ -661,7 +628,7 @@
 
         # determine console attributes based on labels
         for l in label.split():
-            style = _styles.get(l, '')
+            style = color._styles.get(l, '')
             for effect in style.split():
                 try:
                     attr = mapcolor(w32effects[effect], attr)
--- a/hgext/convert/__init__.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/__init__.py	Wed Jan 18 11:43:36 2017 -0500
@@ -320,10 +320,56 @@
         is very expensive for large projects, and is only effective when
         ``convert.git.similarity`` is greater than 0. The default is False.
 
+    :convert.git.renamelimit: perform rename and copy detection up to this
+        many changed files in a commit. Increasing this will make rename
+        and copy detection more accurate but will significantly slow down
+        computation on large projects. The option is only relevant if
+        ``convert.git.similarity`` is greater than 0. The default is
+        ``400``.
+
+    :convert.git.committeractions: list of actions to take when processing
+        author and committer values.
+
+        Git commits have separate author (who wrote the commit) and committer
+        (who applied the commit) fields. Not all destinations support separate
+        author and committer fields (including Mercurial). This config option
+        controls what to do with these author and committer fields during
+        conversion.
+
+        A value of ``messagedifferent`` will append a ``committer: ...``
+        line to the commit message if the Git committer is different from the
+        author. The prefix of that line can be specified using the syntax
+        ``messagedifferent=<prefix>``. e.g. ``messagedifferent=git-committer:``.
+        When a prefix is specified, a space will always be inserted between the
+        prefix and the value.
+
+        ``messagealways`` behaves like ``messagedifferent`` except it will
+        always result in a ``committer: ...`` line being appended to the commit
+        message. This value is mutually exclusive with ``messagedifferent``.
+
+        ``dropcommitter`` will remove references to the committer. Only
+        references to the author will remain. Actions that add references
+        to the committer will have no effect when this is set.
+
+        ``replaceauthor`` will replace the value of the author field with
+        the committer. Other actions that add references to the committer
+        will still take effect when this is set.
+
+        The default is ``messagedifferent``.
+
+    :convert.git.extrakeys: list of extra keys from commit metadata to copy to
+        the destination. Some Git repositories store extra metadata in commits.
+        By default, this non-default metadata will be lost during conversion.
+        Setting this config option can retain that metadata. Some built-in
+        keys such as ``parent`` and ``branch`` are not allowed to be copied.
+
     :convert.git.remoteprefix: remote refs are converted as bookmarks with
         ``convert.git.remoteprefix`` as a prefix followed by a /. The default
         is 'remote'.
 
+    :convert.git.saverev: whether to store the original Git commit ID in the
+        metadata of the destination commit. The default is True.
+
     :convert.git.skipsubmodules: does not convert root level .gitmodules files
         or files with 160000 mode indicating a submodule. Default is False.
 
--- a/hgext/convert/common.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/common.py	Wed Jan 18 11:43:36 2017 -0500
@@ -55,7 +55,7 @@
 
 class commit(object):
     def __init__(self, author, date, desc, parents, branch=None, rev=None,
-                 extra={}, sortkey=None, saverev=True, phase=phases.draft,
+                 extra=None, sortkey=None, saverev=True, phase=phases.draft,
                  optparents=None):
         self.author = author or 'unknown'
         self.date = date or '0 0'
@@ -64,7 +64,7 @@
         self.optparents = optparents or [] # will be used if already converted
         self.branch = branch
         self.rev = rev
-        self.extra = extra
+        self.extra = extra or {}
         self.sortkey = sortkey
         self.saverev = saverev
         self.phase = phase
@@ -454,7 +454,7 @@
             if err.errno != errno.ENOENT:
                 raise
             return
-        for i, line in enumerate(fp):
+        for i, line in enumerate(util.iterfile(fp)):
             line = line.splitlines()[0].rstrip()
             if not line:
                 # Ignore blank lines
--- a/hgext/convert/convcmd.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/convcmd.py	Wed Jan 18 11:43:36 2017 -0500
@@ -201,7 +201,7 @@
         m = {}
         try:
             fp = open(path, 'r')
-            for i, line in enumerate(fp):
+            for i, line in enumerate(util.iterfile(fp)):
                 line = line.splitlines()[0].rstrip()
                 if not line:
                     # Ignore blank lines
--- a/hgext/convert/cvs.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/cvs.py	Wed Jan 18 11:43:36 2017 -0500
@@ -15,6 +15,7 @@
 from mercurial import (
     encoding,
     error,
+    pycompat,
     util,
 )
 
@@ -69,7 +70,7 @@
                 raise error.Abort(_('revision %s is not a patchset number')
                                  % self.revs[0])
 
-        d = os.getcwd()
+        d = pycompat.getcwd()
         try:
             os.chdir(self.path)
             id = None
@@ -188,7 +189,7 @@
 
         if conntype != "pserver":
             if conntype == "rsh":
-                rsh = os.environ.get("CVS_RSH") or "ssh"
+                rsh = encoding.environ.get("CVS_RSH") or "ssh"
                 if user:
                     cmd = [rsh, '-l', user, host] + cmd
                 else:
--- a/hgext/convert/cvsps.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/cvsps.py	Wed Jan 18 11:43:36 2017 -0500
@@ -11,7 +11,9 @@
 
 from mercurial.i18n import _
 from mercurial import (
+    encoding,
     hook,
+    pycompat,
     util,
 )
 
@@ -136,8 +138,8 @@
         except IOError:
             raise logerror(_('not a CVS sandbox'))
 
-        if prefix and not prefix.endswith(os.sep):
-            prefix += os.sep
+        if prefix and not prefix.endswith(pycompat.ossep):
+            prefix += pycompat.ossep
 
         # Use the Root file in the sandbox, if it exists
         try:
@@ -146,7 +148,7 @@
             pass
 
     if not root:
-        root = os.environ.get('CVSROOT', '')
+        root = encoding.environ.get('CVSROOT', '')
 
     # read log cache if one exists
     oldlog = []
--- a/hgext/convert/git.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/git.py	Wed Jan 18 11:43:36 2017 -0500
@@ -31,6 +31,18 @@
     def hgsubstate(self):
         return "%s %s" % (self.node, self.path)
 
+# Keys in extra fields that should not be copied if the user requests.
+bannedextrakeys = set([
+    # Git commit object built-ins.
+    'tree',
+    'parent',
+    'author',
+    'committer',
+    # Mercurial built-ins.
+    'branch',
+    'close',
+])
+
 class convert_git(common.converter_source, common.commandline):
     # Windows does not support GIT_DIR= construct while other systems
     # cannot remove environment variable. Just assume none have
@@ -78,6 +90,10 @@
                                              False)
             if findcopiesharder:
                 self.simopt.append('--find-copies-harder')
+
+            renamelimit = ui.configint('convert', 'git.renamelimit',
+                                       default=400)
+            self.simopt.append('-l%d' % renamelimit)
         else:
             self.simopt = []
 
@@ -88,6 +104,54 @@
 
         self.catfilepipe = self.gitpipe('cat-file', '--batch')
 
+        self.copyextrakeys = self.ui.configlist('convert', 'git.extrakeys')
+        banned = set(self.copyextrakeys) & bannedextrakeys
+        if banned:
+            raise error.Abort(_('copying of extra key is forbidden: %s') %
+                              _(', ').join(sorted(banned)))
+
+        committeractions = self.ui.configlist('convert', 'git.committeractions',
+                                              'messagedifferent')
+
+        messagedifferent = None
+        messagealways = None
+        for a in committeractions:
+            if a.startswith(('messagedifferent', 'messagealways')):
+                k = a
+                v = None
+                if '=' in a:
+                    k, v = a.split('=', 1)
+
+                if k == 'messagedifferent':
+                    messagedifferent = v or 'committer:'
+                elif k == 'messagealways':
+                    messagealways = v or 'committer:'
+
+        if messagedifferent and messagealways:
+            raise error.Abort(_('committeractions cannot define both '
+                                'messagedifferent and messagealways'))
+
+        dropcommitter = 'dropcommitter' in committeractions
+        replaceauthor = 'replaceauthor' in committeractions
+
+        if dropcommitter and replaceauthor:
+            raise error.Abort(_('committeractions cannot define both '
+                                'dropcommitter and replaceauthor'))
+
+        if dropcommitter and messagealways:
+            raise error.Abort(_('committeractions cannot define both '
+                                'dropcommitter and messagealways'))
+
+        if not messagedifferent and not messagealways:
+            messagedifferent = 'committer:'
+
+        self.committeractions = {
+            'dropcommitter': dropcommitter,
+            'replaceauthor': replaceauthor,
+            'messagedifferent': messagedifferent,
+            'messagealways': messagealways,
+        }
+
     def after(self):
         for f in self.catfilepipe:
             f.close()
@@ -275,6 +339,7 @@
         l = c[:end].splitlines()
         parents = []
         author = committer = None
+        extra = {}
         for e in l[1:]:
             n, v = e.split(" ", 1)
             if n == "author":
@@ -291,16 +356,32 @@
                 committer = self.recode(committer)
             if n == "parent":
                 parents.append(v)
+            if n in self.copyextrakeys:
+                extra[n] = v
 
-        if committer and committer != author:
-            message += "\ncommitter: %s\n" % committer
+        if self.committeractions['dropcommitter']:
+            committer = None
+        elif self.committeractions['replaceauthor']:
+            author = committer
+
+        if committer:
+            messagealways = self.committeractions['messagealways']
+            messagedifferent = self.committeractions['messagedifferent']
+            if messagealways:
+                message += '\n%s %s\n' % (messagealways, committer)
+            elif messagedifferent and author != committer:
+                message += '\n%s %s\n' % (messagedifferent, committer)
+
         tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
         tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
         date = tm + " " + str(tz)
+        saverev = self.ui.configbool('convert', 'git.saverev', True)
 
         c = common.commit(parents=parents, date=date, author=author,
                           desc=message,
-                          rev=version)
+                          rev=version,
+                          extra=extra,
+                          saverev=saverev)
         return c
 
     def numcommits(self):
--- a/hgext/convert/hg.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/hg.py	Wed Jan 18 11:43:36 2017 -0500
@@ -352,7 +352,7 @@
             p2 = node
 
         if self.filemapmode and nparents == 1:
-            man = self.repo.manifest
+            man = self.repo.manifestlog._revlog
             mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
             closed = 'close' in commit.extra
             if not closed and not man.cmp(m1node, man.revision(mnode)):
--- a/hgext/convert/p4.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/p4.py	Wed Jan 18 11:43:36 2017 -0500
@@ -55,18 +55,9 @@
 
         common.checktool('p4', abort=False)
 
-        self.p4changes = {}
-        self.heads = {}
-        self.changeset = {}
-        self.files = {}
-        self.copies = {}
-        self.tags = {}
-        self.lastbranch = {}
-        self.parent = {}
+        self.revmap = {}
         self.encoding = self.ui.config('convert', 'p4.encoding',
                                        default=convcmd.orig_encoding)
-        self.depotname = {}           # mapping from local name to depot name
-        self.localname = {} # mapping from depot name to local name
         self.re_type = re.compile(
             "([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
             "(\+\w+)?$")
@@ -78,24 +69,46 @@
         if revs and len(revs) > 1:
             raise error.Abort(_("p4 source does not support specifying "
                                "multiple revisions"))
-        self._parse(ui, path)
+
+    def setrevmap(self, revmap):
+        """Sets the parsed revmap dictionary.
+
+        Revmap stores mappings from a source revision to a target revision.
+        It is set in convertcmd.convert and provided by the user as a file
+        on the commandline.
+
+        Revisions in the map are considered beeing present in the
+        repository and ignored during _parse(). This allows for incremental
+        imports if a revmap is provided.
+        """
+        self.revmap = revmap
 
     def _parse_view(self, path):
         "Read changes affecting the path"
         cmd = 'p4 -G changes -s submitted %s' % util.shellquote(path)
         stdout = util.popen(cmd, mode='rb')
+        p4changes = {}
         for d in loaditer(stdout):
             c = d.get("change", None)
             if c:
-                self.p4changes[c] = True
+                p4changes[c] = True
+        return p4changes
 
     def _parse(self, ui, path):
         "Prepare list of P4 filenames and revisions to import"
+        p4changes = {}
+        changeset = {}
+        files_map = {}
+        copies_map = {}
+        localname = {}
+        depotname = {}
+        heads = []
+
         ui.status(_('reading p4 views\n'))
 
         # read client spec or view
         if "/" in path:
-            self._parse_view(path)
+            p4changes.update(self._parse_view(path))
             if path.startswith("//") and path.endswith("/..."):
                 views = {path[:-3]:""}
             else:
@@ -108,7 +121,7 @@
             for client in clientspec:
                 if client.startswith("View"):
                     sview, cview = clientspec[client].split()
-                    self._parse_view(sview)
+                    p4changes.update(self._parse_view(sview))
                     if sview.endswith("...") and cview.endswith("..."):
                         sview = sview[:-3]
                         cview = cview[:-3]
@@ -117,8 +130,8 @@
                     views[sview] = cview
 
         # list of changes that affect our source files
-        self.p4changes = self.p4changes.keys()
-        self.p4changes.sort(key=int)
+        p4changes = p4changes.keys()
+        p4changes.sort(key=int)
 
         # list with depot pathnames, longest first
         vieworder = views.keys()
@@ -126,32 +139,31 @@
 
         # handle revision limiting
         startrev = self.ui.config('convert', 'p4.startrev', default=0)
-        self.p4changes = [x for x in self.p4changes
-                          if ((not startrev or int(x) >= int(startrev)) and
-                              (not self.revs or int(x) <= int(self.revs[0])))]
 
         # now read the full changelists to get the list of file revisions
         ui.status(_('collecting p4 changelists\n'))
         lastid = None
-        for change in self.p4changes:
-            cmd = "p4 -G describe -s %s" % change
-            stdout = util.popen(cmd, mode='rb')
-            d = marshal.load(stdout)
-            desc = self.recode(d.get("desc", ""))
-            shortdesc = desc.split("\n", 1)[0]
-            t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
-            ui.status(util.ellipsis(t, 80) + '\n')
+        for change in p4changes:
+            if startrev and int(change) < int(startrev):
+                continue
+            if self.revs and int(change) > int(self.revs[0]):
+                continue
+            if change in self.revmap:
+                # Ignore already present revisions, but set the parent pointer.
+                lastid = change
+                continue
 
             if lastid:
                 parents = [lastid]
             else:
                 parents = []
 
-            date = (int(d["time"]), 0)     # timezone not set
-            c = common.commit(author=self.recode(d["user"]),
-                              date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
-                              parents=parents, desc=desc, branch=None,
-                              extra={"p4": change})
+            d = self._fetch_revision(change)
+            c = self._construct_commit(d, parents)
+
+            shortdesc = c.desc.splitlines(True)[0].rstrip('\r\n')
+            t = '%s %s' % (c.rev, repr(shortdesc)[1:-1])
+            ui.status(util.ellipsis(t, 80) + '\n')
 
             files = []
             copies = {}
@@ -166,15 +178,15 @@
                         break
                 if filename:
                     files.append((filename, d["rev%d" % i]))
-                    self.depotname[filename] = oldname
+                    depotname[filename] = oldname
                     if (d.get("action%d" % i) == "move/add"):
                         copiedfiles.append(filename)
-                    self.localname[oldname] = filename
+                    localname[oldname] = filename
                 i += 1
 
             # Collect information about copied files
             for filename in copiedfiles:
-                oldname = self.depotname[filename]
+                oldname = depotname[filename]
 
                 flcmd = 'p4 -G filelog %s' \
                       % util.shellquote(oldname)
@@ -196,8 +208,8 @@
                                 j += 1
                         i += 1
 
-                    if copiedoldname and copiedoldname in self.localname:
-                        copiedfilename = self.localname[copiedoldname]
+                    if copiedoldname and copiedoldname in localname:
+                        copiedfilename = localname[copiedoldname]
                         break
 
                 if copiedfilename:
@@ -206,13 +218,45 @@
                     ui.warn(_("cannot find source for copied file: %s@%s\n")
                             % (filename, change))
 
-            self.changeset[change] = c
-            self.files[change] = files
-            self.copies[change] = copies
+            changeset[change] = c
+            files_map[change] = files
+            copies_map[change] = copies
             lastid = change
 
-        if lastid:
-            self.heads = [lastid]
+        if lastid and len(changeset) > 0:
+            heads = [lastid]
+
+        return {
+            'changeset': changeset,
+            'files': files_map,
+            'copies': copies_map,
+            'heads': heads,
+            'depotname': depotname,
+        }
+
+    @util.propertycache
+    def _parse_once(self):
+        return self._parse(self.ui, self.path)
+
+    @util.propertycache
+    def copies(self):
+        return self._parse_once['copies']
+
+    @util.propertycache
+    def files(self):
+        return self._parse_once['files']
+
+    @util.propertycache
+    def changeset(self):
+        return self._parse_once['changeset']
+
+    @util.propertycache
+    def heads(self):
+        return self._parse_once['heads']
+
+    @util.propertycache
+    def depotname(self):
+        return self._parse_once['depotname']
 
     def getheads(self):
         return self.heads
@@ -286,11 +330,39 @@
             raise error.Abort(_("convert from p4 does not support --full"))
         return self.files[rev], self.copies[rev], set()
 
+    def _construct_commit(self, obj, parents=None):
+        """
+        Constructs a common.commit object from an unmarshalled
+        `p4 describe` output
+        """
+        desc = self.recode(obj.get("desc", ""))
+        date = (int(obj["time"]), 0)     # timezone not set
+        if parents is None:
+            parents = []
+
+        return common.commit(author=self.recode(obj["user"]),
+            date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
+            parents=parents, desc=desc, branch=None, rev=obj['change'],
+            extra={"p4": obj['change'], "convert_revision": obj['change']})
+
+    def _fetch_revision(self, rev):
+        """Return an output of `p4 describe` including author, commit date as
+        a dictionary."""
+        cmd = "p4 -G describe -s %s" % rev
+        stdout = util.popen(cmd, mode='rb')
+        return marshal.load(stdout)
+
     def getcommit(self, rev):
-        return self.changeset[rev]
+        if rev in self.changeset:
+            return self.changeset[rev]
+        elif rev in self.revmap:
+            d = self._fetch_revision(rev)
+            return self._construct_commit(d, parents=None)
+        raise error.Abort(
+            _("cannot find %s in the revmap or parsed changesets") % rev)
 
     def gettags(self):
-        return self.tags
+        return {}
 
     def getchangedfiles(self, rev, i):
         return sorted([x[0] for x in self.files[rev]])
--- a/hgext/convert/subversion.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/convert/subversion.py	Wed Jan 18 11:43:36 2017 -0500
@@ -5,7 +5,6 @@
 
 import os
 import re
-import sys
 import tempfile
 import xml.dom.minidom
 
@@ -13,8 +12,8 @@
 from mercurial import (
     encoding,
     error,
+    pycompat,
     scmutil,
-    strutil,
     util,
 )
 
@@ -104,7 +103,7 @@
         pass
     if os.path.isdir(path):
         path = os.path.normpath(os.path.abspath(path))
-        if os.name == 'nt':
+        if pycompat.osname == 'nt':
             path = '/' + util.normpath(path)
         # Module URL is later compared with the repository URL returned
         # by svn API, which is UTF-8.
@@ -164,10 +163,8 @@
         raise error.Abort(_('debugsvnlog could not load Subversion python '
                            'bindings'))
 
-    util.setbinary(sys.stdin)
-    util.setbinary(sys.stdout)
-    args = decodeargs(sys.stdin.read())
-    get_log_child(sys.stdout, *args)
+    args = decodeargs(ui.fin.read())
+    get_log_child(ui.fout, *args)
 
 class logstream(object):
     """Interruptible revision log iterator."""
@@ -257,8 +254,8 @@
     try:
         proto, path = url.split('://', 1)
         if proto == 'file':
-            if (os.name == 'nt' and path[:1] == '/' and path[1:2].isalpha()
-                and path[2:6].lower() == '%3a/'):
+            if (pycompat.osname == 'nt' and path[:1] == '/'
+                  and path[1:2].isalpha() and path[2:6].lower() == '%3a/'):
                 path = path[:2] + ':/' + path[6:]
             path = urlreq.url2pathname(path)
     except ValueError:
@@ -1122,7 +1119,7 @@
         self.delexec = []
         self.copies = []
         self.wc = None
-        self.cwd = os.getcwd()
+        self.cwd = pycompat.getcwd()
 
         created = False
         if os.path.isfile(os.path.join(path, '.svn', 'entries')):
@@ -1142,7 +1139,8 @@
                         path = '/' + path
                     path = 'file://' + path
 
-            wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
+            wcpath = os.path.join(pycompat.getcwd(), os.path.basename(path) +
+                                '-wc')
             ui.status(_('initializing svn working copy %r\n')
                       % os.path.basename(wcpath))
             self.run0('checkout', path, wcpath)
@@ -1240,7 +1238,8 @@
         for f in files:
             if os.path.isdir(self.wjoin(f)):
                 dirs.add(f)
-            for i in strutil.rfindall(f, '/'):
+            i = len(f)
+            for i in iter(lambda: f.rfind('/', 0, i), -1):
                 dirs.add(f[:i])
         return dirs
 
--- a/hgext/extdiff.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/extdiff.py	Wed Jan 18 11:43:36 2017 -0500
@@ -64,7 +64,6 @@
 
 import os
 import re
-import shlex
 import shutil
 import tempfile
 from mercurial.i18n import _
@@ -78,6 +77,7 @@
     commands,
     error,
     filemerge,
+    pycompat,
     scmutil,
     util,
 )
@@ -371,7 +371,7 @@
             if path:
                 # case "cmd = path opts"
                 cmdline = path
-                diffopts = len(shlex.split(cmdline)) > 1
+                diffopts = len(pycompat.shlexsplit(cmdline)) > 1
             else:
                 # case "cmd ="
                 path = util.findexe(cmd)
--- a/hgext/fsmonitor/__init__.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/fsmonitor/__init__.py	Wed Jan 18 11:43:36 2017 -0500
@@ -94,15 +94,16 @@
 import hashlib
 import os
 import stat
-import sys
 
 from mercurial.i18n import _
 from mercurial import (
     context,
+    encoding,
     extensions,
     localrepo,
     merge,
     pathutil,
+    pycompat,
     scmutil,
     util,
 )
@@ -292,7 +293,7 @@
     if normalize:
         foldmap = dict((normcase(k), k) for k in results)
 
-    switch_slashes = os.sep == '\\'
+    switch_slashes = pycompat.ossep == '\\'
     # The order of the results is, strictly speaking, undefined.
     # For case changes on a case insensitive filesystem we may receive
     # two entries, one with exists=True and another with exists=False.
@@ -392,8 +393,8 @@
 
     def _cmpsets(l1, l2):
         try:
-            if 'FSMONITOR_LOG_FILE' in os.environ:
-                fn = os.environ['FSMONITOR_LOG_FILE']
+            if 'FSMONITOR_LOG_FILE' in encoding.environ:
+                fn = encoding.environ['FSMONITOR_LOG_FILE']
                 f = open(fn, 'wb')
             else:
                 fn = 'fsmonitorfail.log'
@@ -434,7 +435,7 @@
     updatestate = (parentworking and match.always() and
                    not isinstance(ctx2, (context.workingcommitctx,
                                          context.memctx)) and
-                   'HG_PENDING' not in os.environ)
+                   'HG_PENDING' not in encoding.environ)
 
     try:
         if self._fsmonitorstate.walk_on_invalidate:
@@ -545,7 +546,7 @@
 
 def extsetup(ui):
     wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate)
-    if sys.platform == 'darwin':
+    if pycompat.sysplatform == 'darwin':
         # An assist for avoiding the dangling-symlink fsevents bug
         extensions.wrapfunction(os, 'symlink', wrapsymlink)
 
@@ -563,7 +564,7 @@
             pass
 
 class state_update(object):
-    ''' This context mananger is responsible for dispatching the state-enter
+    ''' This context manager is responsible for dispatching the state-enter
         and state-leave signals to the watchman service '''
 
     def __init__(self, repo, node, distance, partial):
--- a/hgext/fsmonitor/pywatchman/__init__.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/fsmonitor/pywatchman/__init__.py	Wed Jan 18 11:43:36 2017 -0500
@@ -26,9 +26,14 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+# no unicode literals
+
+import inspect
+import math
 import os
-import errno
-import math
 import socket
 import subprocess
 import time
@@ -36,11 +41,20 @@
 # Sometimes it's really hard to get Python extensions to compile,
 # so fall back to a pure Python implementation.
 try:
-    import bser
+    from . import bser
+    # Demandimport causes modules to be loaded lazily. Force the load now
+    # so that we can fall back on pybser if bser doesn't exist
+    bser.pdu_info
 except ImportError:
-    import pybser as bser
+    from . import pybser as bser
 
-import capabilities
+from . import (
+    capabilities,
+    compat,
+    encoding,
+    load,
+)
+
 
 if os.name == 'nt':
     import ctypes
@@ -55,18 +69,29 @@
     FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
     FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
     FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
+    WAIT_FAILED = 0xFFFFFFFF
     WAIT_TIMEOUT = 0x00000102
     WAIT_OBJECT_0 = 0x00000000
-    ERROR_IO_PENDING = 997
+    WAIT_IO_COMPLETION = 0x000000C0
+    INFINITE = 0xFFFFFFFF
+
+    # Overlapped I/O operation is in progress. (997)
+    ERROR_IO_PENDING = 0x000003E5
+
+    # The pointer size follows the architecture
+    # We use WPARAM since this type is already conditionally defined
+    ULONG_PTR = ctypes.wintypes.WPARAM
 
     class OVERLAPPED(ctypes.Structure):
         _fields_ = [
-            ("Internal", wintypes.ULONG), ("InternalHigh", wintypes.ULONG),
+            ("Internal", ULONG_PTR), ("InternalHigh", ULONG_PTR),
             ("Offset", wintypes.DWORD), ("OffsetHigh", wintypes.DWORD),
             ("hEvent", wintypes.HANDLE)
         ]
 
         def __init__(self):
+            self.Internal = 0
+            self.InternalHigh = 0
             self.Offset = 0
             self.OffsetHigh = 0
             self.hEvent = 0
@@ -97,6 +122,10 @@
     GetLastError.argtypes = []
     GetLastError.restype = wintypes.DWORD
 
+    SetLastError = ctypes.windll.kernel32.SetLastError
+    SetLastError.argtypes = [wintypes.DWORD]
+    SetLastError.restype = None
+
     FormatMessage = ctypes.windll.kernel32.FormatMessageA
     FormatMessage.argtypes = [wintypes.DWORD, wintypes.LPVOID, wintypes.DWORD,
                               wintypes.DWORD, ctypes.POINTER(wintypes.LPSTR),
@@ -105,12 +134,30 @@
 
     LocalFree = ctypes.windll.kernel32.LocalFree
 
-    GetOverlappedResultEx = ctypes.windll.kernel32.GetOverlappedResultEx
-    GetOverlappedResultEx.argtypes = [wintypes.HANDLE,
-                                      ctypes.POINTER(OVERLAPPED), LPDWORD,
-                                      wintypes.DWORD, wintypes.BOOL]
-    GetOverlappedResultEx.restype = wintypes.BOOL
+    GetOverlappedResult = ctypes.windll.kernel32.GetOverlappedResult
+    GetOverlappedResult.argtypes = [wintypes.HANDLE,
+                                    ctypes.POINTER(OVERLAPPED), LPDWORD,
+                                    wintypes.BOOL]
+    GetOverlappedResult.restype = wintypes.BOOL
 
+    GetOverlappedResultEx = getattr(ctypes.windll.kernel32,
+                                    'GetOverlappedResultEx', None)
+    if GetOverlappedResultEx is not None:
+        GetOverlappedResultEx.argtypes = [wintypes.HANDLE,
+                                          ctypes.POINTER(OVERLAPPED), LPDWORD,
+                                          wintypes.DWORD, wintypes.BOOL]
+        GetOverlappedResultEx.restype = wintypes.BOOL
+
+    WaitForSingleObjectEx = ctypes.windll.kernel32.WaitForSingleObjectEx
+    WaitForSingleObjectEx.argtypes = [wintypes.HANDLE, wintypes.DWORD, wintypes.BOOL]
+    WaitForSingleObjectEx.restype = wintypes.DWORD
+
+    CreateEvent = ctypes.windll.kernel32.CreateEventA
+    CreateEvent.argtypes = [LPDWORD, wintypes.BOOL, wintypes.BOOL,
+                            wintypes.LPSTR]
+    CreateEvent.restype = wintypes.HANDLE
+
+    # Windows Vista is the minimum supported client for CancelIoEx.
     CancelIoEx = ctypes.windll.kernel32.CancelIoEx
     CancelIoEx.argtypes = [wintypes.HANDLE, ctypes.POINTER(OVERLAPPED)]
     CancelIoEx.restype = wintypes.BOOL
@@ -132,8 +179,47 @@
         pass
 
 
+def _win32_strerror(err):
+    """ expand a win32 error code into a human readable message """
+
+    # FormatMessage will allocate memory and assign it here
+    buf = ctypes.c_char_p()
+    FormatMessage(
+        FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER
+        | FORMAT_MESSAGE_IGNORE_INSERTS, None, err, 0, buf, 0, None)
+    try:
+        return buf.value
+    finally:
+        LocalFree(buf)
+
+
 class WatchmanError(Exception):
-    pass
+    def __init__(self, msg=None, cmd=None):
+        self.msg = msg
+        self.cmd = cmd
+
+    def setCommand(self, cmd):
+        self.cmd = cmd
+
+    def __str__(self):
+        if self.cmd:
+            return '%s, while executing %s' % (self.msg, self.cmd)
+        return self.msg
+
+
+class WatchmanEnvironmentError(WatchmanError):
+    def __init__(self, msg, errno, errmsg, cmd=None):
+        super(WatchmanEnvironmentError, self).__init__(
+            '{0}: errno={1} errmsg={2}'.format(msg, errno, errmsg),
+            cmd)
+
+
+class SocketConnectError(WatchmanError):
+    def __init__(self, sockpath, exc):
+        super(SocketConnectError, self).__init__(
+            'unable to connect to %s: %s' % (sockpath, exc))
+        self.sockpath = sockpath
+        self.exc = exc
 
 
 class SocketTimeout(WatchmanError):
@@ -151,19 +237,11 @@
 
     self.msg is the message returned by watchman.
     """
-
     def __init__(self, msg, cmd=None):
-        self.msg = msg
-        self.cmd = cmd
-        super(CommandError, self).__init__('watchman command error: %s' % msg)
-
-    def setCommand(self, cmd):
-        self.cmd = cmd
-
-    def __str__(self):
-        if self.cmd:
-            return '%s, while executing %s' % (self.msg, self.cmd)
-        return self.msg
+        super(CommandError, self).__init__(
+            'watchman command error: %s' % (msg, ),
+            cmd,
+        )
 
 
 class Transport(object):
@@ -195,16 +273,16 @@
 
         # Buffer may already have a line if we've received unilateral
         # response(s) from the server
-        if len(self.buf) == 1 and "\n" in self.buf[0]:
-            (line, b) = self.buf[0].split("\n", 1)
+        if len(self.buf) == 1 and b"\n" in self.buf[0]:
+            (line, b) = self.buf[0].split(b"\n", 1)
             self.buf = [b]
             return line
 
         while True:
             b = self.readBytes(4096)
-            if "\n" in b:
-                result = ''.join(self.buf)
-                (line, b) = b.split("\n", 1)
+            if b"\n" in b:
+                result = b''.join(self.buf)
+                (line, b) = b.split(b"\n", 1)
                 self.buf = [b]
                 return result + line
             self.buf.append(b)
@@ -241,8 +319,8 @@
             sock.connect(self.sockpath)
             self.sock = sock
         except socket.error as e:
-            raise WatchmanError('unable to connect to %s: %s' %
-                                (self.sockpath, e))
+            sock.close()
+            raise SocketConnectError(self.sockpath, e)
 
     def close(self):
         self.sock.close()
@@ -268,6 +346,46 @@
             raise SocketTimeout('timed out sending query command')
 
 
+def _get_overlapped_result_ex_impl(pipe, olap, nbytes, millis, alertable):
+    """ Windows 7 and earlier does not support GetOverlappedResultEx. The
+    alternative is to use GetOverlappedResult and wait for read or write
+    operation to complete. This is done be using CreateEvent and
+    WaitForSingleObjectEx. CreateEvent, WaitForSingleObjectEx
+    and GetOverlappedResult are all part of Windows API since WindowsXP.
+    This is the exact same implementation that can be found in the watchman
+    source code (see get_overlapped_result_ex_impl in stream_win.c). This
+    way, maintenance should be simplified.
+    """
+    log('Preparing to wait for maximum %dms', millis )
+    if millis != 0:
+        waitReturnCode = WaitForSingleObjectEx(olap.hEvent, millis, alertable)
+        if waitReturnCode == WAIT_OBJECT_0:
+            # Event is signaled, overlapped IO operation result should be available.
+            pass
+        elif waitReturnCode == WAIT_IO_COMPLETION:
+            # WaitForSingleObjectEx returnes because the system added an I/O completion
+            # routine or an asynchronous procedure call (APC) to the thread queue.
+            SetLastError(WAIT_IO_COMPLETION)
+            pass
+        elif waitReturnCode == WAIT_TIMEOUT:
+            # We reached the maximum allowed wait time, the IO operation failed
+            # to complete in timely fashion.
+            SetLastError(WAIT_TIMEOUT)
+            return False
+        elif waitReturnCode == WAIT_FAILED:
+            # something went wrong calling WaitForSingleObjectEx
+            err = GetLastError()
+            log('WaitForSingleObjectEx failed: %s', _win32_strerror(err))
+            return False
+        else:
+            # unexpected situation deserving investigation.
+            err = GetLastError()
+            log('Unexpected error: %s', _win32_strerror(err))
+            return False
+
+    return GetOverlappedResult(pipe, olap, nbytes, False)
+
+
 class WindowsNamedPipeTransport(Transport):
     """ connect to a named pipe """
 
@@ -284,28 +402,35 @@
             self._raise_win_err('failed to open pipe %s' % sockpath,
                                 GetLastError())
 
-    def _win32_strerror(self, err):
-        """ expand a win32 error code into a human readable message """
+        # event for the overlapped I/O operations
+        self._waitable = CreateEvent(None, True, False, None)
+        if self._waitable is None:
+            self._raise_win_err('CreateEvent failed', GetLastError())
 
-        # FormatMessage will allocate memory and assign it here
-        buf = ctypes.c_char_p()
-        FormatMessage(
-            FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER
-            | FORMAT_MESSAGE_IGNORE_INSERTS, None, err, 0, buf, 0, None)
-        try:
-            return buf.value
-        finally:
-            LocalFree(buf)
+        self._get_overlapped_result_ex = GetOverlappedResultEx
+        if (os.getenv('WATCHMAN_WIN7_COMPAT') == '1' or
+            self._get_overlapped_result_ex is None):
+            self._get_overlapped_result_ex = _get_overlapped_result_ex_impl
 
     def _raise_win_err(self, msg, err):
         raise IOError('%s win32 error code: %d %s' %
-                      (msg, err, self._win32_strerror(err)))
+                      (msg, err, _win32_strerror(err)))
 
     def close(self):
         if self.pipe:
+            log('Closing pipe')
             CloseHandle(self.pipe)
         self.pipe = None
 
+        if self._waitable is not None:
+            # We release the handle for the event
+            CloseHandle(self._waitable)
+        self._waitable = None
+
+    def setTimeout(self, value):
+        # convert to milliseconds
+        self.timeout = int(value * 1000)
+
     def readBytes(self, size):
         """ A read can block for an unbounded amount of time, even if the
             kernel reports that the pipe handle is signalled, so we need to
@@ -325,6 +450,7 @@
         # We need to initiate a read
         buf = ctypes.create_string_buffer(size)
         olap = OVERLAPPED()
+        olap.hEvent = self._waitable
 
         log('made read buff of size %d', size)
 
@@ -339,8 +465,9 @@
                                     GetLastError())
 
         nread = wintypes.DWORD()
-        if not GetOverlappedResultEx(self.pipe, olap, nread,
-                                     0 if immediate else self.timeout, True):
+        if not self._get_overlapped_result_ex(self.pipe, olap, nread,
+                                              0 if immediate else self.timeout,
+                                              True):
             err = GetLastError()
             CancelIoEx(self.pipe, olap)
 
@@ -374,6 +501,8 @@
 
     def write(self, data):
         olap = OVERLAPPED()
+        olap.hEvent = self._waitable
+
         immediate = WriteFile(self.pipe, ctypes.c_char_p(data), len(data),
                               None, olap)
 
@@ -385,8 +514,10 @@
 
         # Obtain results, waiting if needed
         nwrote = wintypes.DWORD()
-        if GetOverlappedResultEx(self.pipe, olap, nwrote, 0 if immediate else
-                                 self.timeout, True):
+        if self._get_overlapped_result_ex(self.pipe, olap, nwrote,
+                                          0 if immediate else self.timeout,
+                                          True):
+            log('made write of %d bytes', nwrote.value)
             return nwrote.value
 
         err = GetLastError()
@@ -430,7 +561,10 @@
 
     def close(self):
         if self.proc:
-            self.proc.kill()
+            if self.proc.pid is not None:
+                self.proc.kill()
+            self.proc.stdin.close()
+            self.proc.stdout.close()
             self.proc = None
 
     def _connect(self):
@@ -438,7 +572,7 @@
             return self.proc
         args = [
             'watchman',
-            '--sockname={}'.format(self.sockpath),
+            '--sockname={0}'.format(self.sockpath),
             '--logfile=/BOGUS',
             '--statefile=/BOGUS',
             '--no-spawn',
@@ -460,8 +594,8 @@
 
     def write(self, data):
         if self.closed:
+            self.close()
             self.closed = False
-            self.proc = None
         self._connect()
         res = self.proc.stdin.write(data)
         self.proc.stdin.close()
@@ -473,21 +607,21 @@
     """ use the BSER encoding.  This is the default, preferred codec """
 
     def _loads(self, response):
-        return bser.loads(response)
+        return bser.loads(response) # Defaults to BSER v1
 
     def receive(self):
         buf = [self.transport.readBytes(sniff_len)]
         if not buf[0]:
             raise WatchmanError('empty watchman response')
 
-        elen = bser.pdu_len(buf[0])
+        _1, _2, elen = bser.pdu_info(buf[0])
 
         rlen = len(buf[0])
         while elen > rlen:
             buf.append(self.transport.readBytes(elen - rlen))
             rlen += len(buf[-1])
 
-        response = ''.join(buf)
+        response = b''.join(buf)
         try:
             res = self._loads(response)
             return res
@@ -495,7 +629,7 @@
             raise WatchmanError('watchman response decode error: %s' % e)
 
     def send(self, *args):
-        cmd = bser.dumps(*args)
+        cmd = bser.dumps(*args) # Defaults to BSER v1
         self.transport.write(cmd)
 
 
@@ -504,7 +638,64 @@
         immutable object support """
 
     def _loads(self, response):
-        return bser.loads(response, False)
+        return bser.loads(response, False) # Defaults to BSER v1
+
+
+class Bser2WithFallbackCodec(BserCodec):
+    """ use BSER v2 encoding """
+
+    def __init__(self, transport):
+        super(Bser2WithFallbackCodec, self).__init__(transport)
+        # Once the server advertises support for bser-v2 we should switch this
+        # to 'required' on Python 3.
+        self.send(["version", {"optional": ["bser-v2"]}])
+
+        capabilities = self.receive()
+
+        if 'error' in capabilities:
+          raise Exception('Unsupported BSER version')
+
+        if capabilities['capabilities']['bser-v2']:
+            self.bser_version = 2
+            self.bser_capabilities = 0
+        else:
+            self.bser_version = 1
+            self.bser_capabilities = 0
+
+    def _loads(self, response):
+        return bser.loads(response)
+
+    def receive(self):
+        buf = [self.transport.readBytes(sniff_len)]
+        if not buf[0]:
+            raise WatchmanError('empty watchman response')
+
+        recv_bser_version, recv_bser_capabilities, elen = bser.pdu_info(buf[0])
+
+        if hasattr(self, 'bser_version'):
+          # Readjust BSER version and capabilities if necessary
+          self.bser_version = max(self.bser_version, recv_bser_version)
+          self.capabilities = self.bser_capabilities & recv_bser_capabilities
+
+        rlen = len(buf[0])
+        while elen > rlen:
+            buf.append(self.transport.readBytes(elen - rlen))
+            rlen += len(buf[-1])
+
+        response = b''.join(buf)
+        try:
+            res = self._loads(response)
+            return res
+        except ValueError as e:
+            raise WatchmanError('watchman response decode error: %s' % e)
+
+    def send(self, *args):
+        if hasattr(self, 'bser_version'):
+            cmd = bser.dumps(*args, version=self.bser_version,
+                capabilities=self.bser_capabilities)
+        else:
+            cmd = bser.dumps(*args)
+        self.transport.write(cmd)
 
 
 class JsonCodec(Codec):
@@ -520,6 +711,13 @@
     def receive(self):
         line = self.transport.readLine()
         try:
+            # In Python 3, json.loads is a transformation from Unicode string to
+            # objects possibly containing Unicode strings. We typically expect
+            # the JSON blob to be ASCII-only with non-ASCII characters escaped,
+            # but it's possible we might get non-ASCII bytes that are valid
+            # UTF-8.
+            if compat.PYTHON3:
+                line = line.decode('utf-8')
             return self.json.loads(line)
         except Exception as e:
             print(e, line)
@@ -527,7 +725,12 @@
 
     def send(self, *args):
         cmd = self.json.dumps(*args)
-        self.transport.write(cmd + "\n")
+        # In Python 3, json.dumps is a transformation from objects possibly
+        # containing Unicode strings to Unicode string. Even with (the default)
+        # ensure_ascii=True, dumps returns a Unicode string.
+        if compat.PYTHON3:
+            cmd = cmd.encode('ascii')
+        self.transport.write(cmd + b"\n")
 
 
 class client(object):
@@ -556,22 +759,27 @@
         self.timeout = timeout
         self.useImmutableBser = useImmutableBser
 
-        transport = transport or os.getenv('WATCHMAN_TRANSPORT') or 'local'
-        if transport == 'local' and os.name == 'nt':
-            self.transport = WindowsNamedPipeTransport
-        elif transport == 'local':
-            self.transport = UnixSocketTransport
-        elif transport == 'cli':
-            self.transport = CLIProcessTransport
-            if sendEncoding is None:
-                sendEncoding = 'json'
-            if recvEncoding is None:
-                recvEncoding = sendEncoding
+        if inspect.isclass(transport) and issubclass(transport, Transport):
+            self.transport = transport
         else:
-            raise WatchmanError('invalid transport %s' % transport)
+            transport = transport or os.getenv('WATCHMAN_TRANSPORT') or 'local'
+            if transport == 'local' and os.name == 'nt':
+                self.transport = WindowsNamedPipeTransport
+            elif transport == 'local':
+                self.transport = UnixSocketTransport
+            elif transport == 'cli':
+                self.transport = CLIProcessTransport
+                if sendEncoding is None:
+                    sendEncoding = 'json'
+                if recvEncoding is None:
+                    recvEncoding = sendEncoding
+            else:
+                raise WatchmanError('invalid transport %s' % transport)
 
-        sendEncoding = sendEncoding or os.getenv('WATCHMAN_ENCODING') or 'bser'
-        recvEncoding = recvEncoding or os.getenv('WATCHMAN_ENCODING') or 'bser'
+        sendEncoding = str(sendEncoding or os.getenv('WATCHMAN_ENCODING') or
+                           'bser')
+        recvEncoding = str(recvEncoding or os.getenv('WATCHMAN_ENCODING') or
+                           'bser')
 
         self.recvCodec = self._parseEncoding(recvEncoding)
         self.sendCodec = self._parseEncoding(sendEncoding)
@@ -581,6 +789,8 @@
             if self.useImmutableBser:
                 return ImmutableBserCodec
             return BserCodec
+        elif enc == 'experimental-bser-v2':
+          return Bser2WithFallbackCodec
         elif enc == 'json':
             return JsonCodec
         else:
@@ -600,10 +810,20 @@
 
         cmd = ['watchman', '--output-encoding=bser', 'get-sockname']
         try:
-            p = subprocess.Popen(cmd,
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.PIPE,
-                                 close_fds=os.name != 'nt')
+            args = dict(stdout=subprocess.PIPE,
+                        stderr=subprocess.PIPE,
+                        close_fds=os.name != 'nt')
+
+            if os.name == 'nt':
+                # if invoked via an application with graphical user interface,
+                # this call will cause a brief command window pop-up.
+                # Using the flag STARTF_USESHOWWINDOW to avoid this behavior.
+                startupinfo = subprocess.STARTUPINFO()
+                startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+                args['startupinfo'] = startupinfo
+
+            p = subprocess.Popen(cmd, **args)
+
         except OSError as e:
             raise WatchmanError('"watchman" executable not in PATH (%s)', e)
 
@@ -614,10 +834,10 @@
             raise WatchmanError("watchman exited with code %d" % exitcode)
 
         result = bser.loads(stdout)
-        if 'error' in result:
+        if b'error' in result:
             raise WatchmanError('get-sockname error: %s' % result['error'])
 
-        return result['sockname']
+        return result[b'sockname']
 
     def _connect(self):
         """ establish transport connection """
@@ -660,10 +880,16 @@
         self._connect()
         result = self.recvConn.receive()
         if self._hasprop(result, 'error'):
-            raise CommandError(result['error'])
+            error = result['error']
+            if compat.PYTHON3 and isinstance(self.recvConn, BserCodec):
+                error = result['error'].decode('utf-8', 'surrogateescape')
+            raise CommandError(error)
 
         if self._hasprop(result, 'log'):
-            self.logs.append(result['log'])
+            log = result['log']
+            if compat.PYTHON3 and isinstance(self.recvConn, BserCodec):
+                log = log.decode('utf-8', 'surrogateescape')
+            self.logs.append(log)
 
         if self._hasprop(result, 'subscription'):
             sub = result['subscription']
@@ -682,6 +908,9 @@
         return result
 
     def isUnilateralResponse(self, res):
+        if 'unilateral' in res and res['unilateral']:
+            return True
+        # Fall back to checking for known unilateral responses
         for k in self.unilateral:
             if k in res:
                 return True
@@ -712,6 +941,13 @@
         remove processing impacts both the unscoped and scoped stores
         for the subscription data.
         """
+        if compat.PYTHON3 and issubclass(self.recvCodec, BserCodec):
+            # People may pass in Unicode strings here -- but currently BSER only
+            # returns bytestrings. Deal with that.
+            if isinstance(root, str):
+                root = encoding.encode_local(root)
+            if isinstance(name, str):
+                name = name.encode('utf-8')
 
         if root is not None:
             if not root in self.sub_by_root:
@@ -752,9 +988,17 @@
                 res = self.receive()
 
             return res
-        except CommandError as ex:
+        except EnvironmentError as ee:
+            # When we can depend on Python 3, we can use PEP 3134
+            # exception chaining here.
+            raise WatchmanEnvironmentError(
+                'I/O error communicating with watchman daemon',
+                ee.errno,
+                ee.strerror,
+                args)
+        except WatchmanError as ex:
             ex.setCommand(args)
-            raise ex
+            raise
 
     def capabilityCheck(self, optional=None, required=None):
         """ Perform a server capability check """
@@ -775,5 +1019,3 @@
     def setTimeout(self, value):
         self.recvConn.setTimeout(value)
         self.sendConn.setTimeout(value)
-
-# no-check-code -- this is a 3rd party library
--- a/hgext/fsmonitor/pywatchman/bser.c	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/fsmonitor/pywatchman/bser.c	Wed Jan 18 11:43:36 2017 -0500
@@ -29,11 +29,27 @@
 */
 
 #include <Python.h>
+#include <bytesobject.h>
 #ifdef _MSC_VER
 #define inline __inline
-#include "msc_stdint.h"
+#if _MSC_VER >= 1800
+#include <stdint.h>
+#else
+// The compiler associated with Python 2.7 on Windows doesn't ship
+// with stdint.h, so define the small subset that we use here.
+typedef __int8 int8_t;
+typedef __int16 int16_t;
+typedef __int32 int32_t;
+typedef __int64 int64_t;
+typedef unsigned __int8 uint8_t;
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+#define UINT32_MAX 4294967295U
+#endif
 #endif
 
+// clang-format off
 /* Return the smallest size int that can store the value */
 #define INT_SIZE(x) (((x) == ((int8_t)x))  ? 1 :    \
                      ((x) == ((int16_t)x)) ? 2 :    \
@@ -41,7 +57,7 @@
 
 #define BSER_ARRAY     0x00
 #define BSER_OBJECT    0x01
-#define BSER_STRING    0x02
+#define BSER_BYTESTRING 0x02
 #define BSER_INT8      0x03
 #define BSER_INT16     0x04
 #define BSER_INT32     0x05
@@ -52,6 +68,8 @@
 #define BSER_NULL      0x0a
 #define BSER_TEMPLATE  0x0b
 #define BSER_SKIP      0x0c
+#define BSER_UTF8STRING 0x0d
+// clang-format on
 
 // An immutable object representation of BSER_OBJECT.
 // Rather than build a hash table, key -> value are obtained
@@ -64,24 +82,27 @@
 // approach, this is still faster for the mercurial use case
 // as it helps to eliminate creating N other objects to
 // represent the stat information in the hgwatchman extension
+// clang-format off
 typedef struct {
   PyObject_HEAD
   PyObject *keys;   // tuple of field names
   PyObject *values; // tuple of values
 } bserObject;
+// clang-format on
 
-static Py_ssize_t bserobj_tuple_length(PyObject *o) {
-  bserObject *obj = (bserObject*)o;
+static Py_ssize_t bserobj_tuple_length(PyObject* o) {
+  bserObject* obj = (bserObject*)o;
 
   return PySequence_Length(obj->keys);
 }
 
-static PyObject *bserobj_tuple_item(PyObject *o, Py_ssize_t i) {
-  bserObject *obj = (bserObject*)o;
+static PyObject* bserobj_tuple_item(PyObject* o, Py_ssize_t i) {
+  bserObject* obj = (bserObject*)o;
 
   return PySequence_GetItem(obj->values, i);
 }
 
+// clang-format off
 static PySequenceMethods bserobj_sq = {
   bserobj_tuple_length,      /* sq_length */
   0,                         /* sq_concat */
@@ -92,49 +113,72 @@
   0,                         /* sq_inplace_concat */
   0                          /* sq_inplace_repeat */
 };
+// clang-format on
 
-static void bserobj_dealloc(PyObject *o) {
-  bserObject *obj = (bserObject*)o;
+static void bserobj_dealloc(PyObject* o) {
+  bserObject* obj = (bserObject*)o;
 
   Py_CLEAR(obj->keys);
   Py_CLEAR(obj->values);
   PyObject_Del(o);
 }
 
-static PyObject *bserobj_getattrro(PyObject *o, PyObject *name) {
-  bserObject *obj = (bserObject*)o;
+static PyObject* bserobj_getattrro(PyObject* o, PyObject* name) {
+  bserObject* obj = (bserObject*)o;
   Py_ssize_t i, n;
-  const char *namestr;
+  PyObject* name_bytes = NULL;
+  PyObject* ret = NULL;
+  const char* namestr;
 
   if (PyIndex_Check(name)) {
     i = PyNumber_AsSsize_t(name, PyExc_IndexError);
     if (i == -1 && PyErr_Occurred()) {
-      return NULL;
+      goto bail;
     }
-    return PySequence_GetItem(obj->values, i);
+    ret = PySequence_GetItem(obj->values, i);
+    goto bail;
   }
 
+  // We can be passed in Unicode objects here -- we don't support anything other
+  // than UTF-8 for keys.
+  if (PyUnicode_Check(name)) {
+    name_bytes = PyUnicode_AsUTF8String(name);
+    if (name_bytes == NULL) {
+      goto bail;
+    }
+    namestr = PyBytes_AsString(name_bytes);
+  } else {
+    namestr = PyBytes_AsString(name);
+  }
+
+  if (namestr == NULL) {
+    goto bail;
+  }
   // hack^Wfeature to allow mercurial to use "st_size" to reference "size"
-  namestr = PyString_AsString(name);
   if (!strncmp(namestr, "st_", 3)) {
     namestr += 3;
   }
 
   n = PyTuple_GET_SIZE(obj->keys);
   for (i = 0; i < n; i++) {
-    const char *item_name = NULL;
-    PyObject *key = PyTuple_GET_ITEM(obj->keys, i);
+    const char* item_name = NULL;
+    PyObject* key = PyTuple_GET_ITEM(obj->keys, i);
 
-    item_name = PyString_AsString(key);
+    item_name = PyBytes_AsString(key);
     if (!strcmp(item_name, namestr)) {
-      return PySequence_GetItem(obj->values, i);
+      ret = PySequence_GetItem(obj->values, i);
+      goto bail;
     }
   }
-  PyErr_Format(PyExc_AttributeError,
-              "bserobject has no attribute '%.400s'", namestr);
-  return NULL;
+
+  PyErr_Format(
+      PyExc_AttributeError, "bserobject has no attribute '%.400s'", namestr);
+bail:
+  Py_XDECREF(name_bytes);
+  return ret;
 }
 
+// clang-format off
 static PyMappingMethods bserobj_map = {
   bserobj_tuple_length,     /* mp_length */
   bserobj_getattrro,        /* mp_subscript */
@@ -181,20 +225,27 @@
   0,                         /* tp_alloc */
   0,                         /* tp_new */
 };
-
+// clang-format on
 
-static PyObject *bser_loads_recursive(const char **ptr, const char *end,
-    int mutable);
+typedef struct loads_ctx {
+  int mutable;
+  const char* value_encoding;
+  const char* value_errors;
+  uint32_t bser_version;
+  uint32_t bser_capabilities;
+} unser_ctx_t;
+
+static PyObject*
+bser_loads_recursive(const char** ptr, const char* end, const unser_ctx_t* ctx);
 
 static const char bser_true = BSER_TRUE;
 static const char bser_false = BSER_FALSE;
 static const char bser_null = BSER_NULL;
-static const char bser_string_hdr = BSER_STRING;
+static const char bser_bytestring_hdr = BSER_BYTESTRING;
 static const char bser_array_hdr = BSER_ARRAY;
 static const char bser_object_hdr = BSER_OBJECT;
 
-static inline uint32_t next_power_2(uint32_t n)
-{
+static inline uint32_t next_power_2(uint32_t n) {
   n |= (n >> 16);
   n |= (n >> 8);
   n |= (n >> 4);
@@ -205,16 +256,17 @@
 
 // A buffer we use for building up the serialized result
 struct bser_buffer {
-  char *buf;
+  char* buf;
   int wpos, allocd;
+  uint32_t bser_version;
+  uint32_t capabilities;
 };
 typedef struct bser_buffer bser_t;
 
-static int bser_append(bser_t *bser, const char *data, uint32_t len)
-{
+static int bser_append(bser_t* bser, const char* data, uint32_t len) {
   int newlen = next_power_2(bser->wpos + len);
   if (newlen > bser->allocd) {
-    char *nbuf = realloc(bser->buf, newlen);
+    char* nbuf = realloc(bser->buf, newlen);
     if (!nbuf) {
       return 0;
     }
@@ -228,40 +280,46 @@
   return 1;
 }
 
-static int bser_init(bser_t *bser)
-{
+static int bser_init(bser_t* bser, uint32_t version, uint32_t capabilities) {
   bser->allocd = 8192;
   bser->wpos = 0;
   bser->buf = malloc(bser->allocd);
-
+  bser->bser_version = version;
+  bser->capabilities = capabilities;
   if (!bser->buf) {
     return 0;
   }
 
-  // Leave room for the serialization header, which includes
-  // our overall length.  To make things simpler, we'll use an
-  // int32 for the header
+// Leave room for the serialization header, which includes
+// our overall length.  To make things simpler, we'll use an
+// int32 for the header
 #define EMPTY_HEADER "\x00\x01\x05\x00\x00\x00\x00"
-  bser_append(bser, EMPTY_HEADER, sizeof(EMPTY_HEADER)-1);
+
+// Version 2 also carries an integer indicating the capabilities. The
+// capabilities integer comes before the PDU size.
+#define EMPTY_HEADER_V2 "\x00\x02\x00\x00\x00\x00\x05\x00\x00\x00\x00"
+  if (version == 2) {
+    bser_append(bser, EMPTY_HEADER_V2, sizeof(EMPTY_HEADER_V2) - 1);
+  } else {
+    bser_append(bser, EMPTY_HEADER, sizeof(EMPTY_HEADER) - 1);
+  }
 
   return 1;
 }
 
-static void bser_dtor(bser_t *bser)
-{
+static void bser_dtor(bser_t* bser) {
   free(bser->buf);
   bser->buf = NULL;
 }
 
-static int bser_long(bser_t *bser, int64_t val)
-{
+static int bser_long(bser_t* bser, int64_t val) {
   int8_t i8;
   int16_t i16;
   int32_t i32;
   int64_t i64;
   char sz;
   int size = INT_SIZE(val);
-  char *iptr;
+  char* iptr;
 
   switch (size) {
     case 1:
@@ -285,8 +343,7 @@
       iptr = (char*)&i64;
       break;
     default:
-      PyErr_SetString(PyExc_RuntimeError,
-          "Cannot represent this long value!?");
+      PyErr_SetString(PyExc_RuntimeError, "Cannot represent this long value!?");
       return 0;
   }
 
@@ -297,25 +354,24 @@
   return bser_append(bser, iptr, size);
 }
 
-static int bser_string(bser_t *bser, PyObject *sval)
-{
-  char *buf = NULL;
+static int bser_bytestring(bser_t* bser, PyObject* sval) {
+  char* buf = NULL;
   Py_ssize_t len;
   int res;
-  PyObject *utf = NULL;
+  PyObject* utf = NULL;
 
   if (PyUnicode_Check(sval)) {
     utf = PyUnicode_AsEncodedString(sval, "utf-8", "ignore");
     sval = utf;
   }
 
-  res = PyString_AsStringAndSize(sval, &buf, &len);
+  res = PyBytes_AsStringAndSize(sval, &buf, &len);
   if (res == -1) {
     res = 0;
     goto out;
   }
 
-  if (!bser_append(bser, &bser_string_hdr, sizeof(bser_string_hdr))) {
+  if (!bser_append(bser, &bser_bytestring_hdr, sizeof(bser_bytestring_hdr))) {
     res = 0;
     goto out;
   }
@@ -341,8 +397,7 @@
   return res;
 }
 
-static int bser_recursive(bser_t *bser, PyObject *val)
-{
+static int bser_recursive(bser_t* bser, PyObject* val) {
   if (PyBool_Check(val)) {
     if (val == Py_True) {
       return bser_append(bser, &bser_true, sizeof(bser_true));
@@ -354,19 +409,21 @@
     return bser_append(bser, &bser_null, sizeof(bser_null));
   }
 
+// Python 3 has one integer type.
+#if PY_MAJOR_VERSION < 3
   if (PyInt_Check(val)) {
     return bser_long(bser, PyInt_AS_LONG(val));
   }
+#endif // PY_MAJOR_VERSION < 3
 
   if (PyLong_Check(val)) {
     return bser_long(bser, PyLong_AsLongLong(val));
   }
 
-  if (PyString_Check(val) || PyUnicode_Check(val)) {
-    return bser_string(bser, val);
+  if (PyBytes_Check(val) || PyUnicode_Check(val)) {
+    return bser_bytestring(bser, val);
   }
 
-
   if (PyFloat_Check(val)) {
     double dval = PyFloat_AS_DOUBLE(val);
     char sz = BSER_REAL;
@@ -390,7 +447,7 @@
     }
 
     for (i = 0; i < len; i++) {
-      PyObject *ele = PyList_GET_ITEM(val, i);
+      PyObject* ele = PyList_GET_ITEM(val, i);
 
       if (!bser_recursive(bser, ele)) {
         return 0;
@@ -412,7 +469,7 @@
     }
 
     for (i = 0; i < len; i++) {
-      PyObject *ele = PyTuple_GET_ITEM(val, i);
+      PyObject* ele = PyTuple_GET_ITEM(val, i);
 
       if (!bser_recursive(bser, ele)) {
         return 0;
@@ -436,7 +493,7 @@
     }
 
     while (PyDict_Next(val, &pos, &key, &ele)) {
-      if (!bser_string(bser, key)) {
+      if (!bser_bytestring(bser, key)) {
         return 0;
       }
       if (!bser_recursive(bser, ele)) {
@@ -451,17 +508,25 @@
   return 0;
 }
 
-static PyObject *bser_dumps(PyObject *self, PyObject *args)
-{
+static PyObject* bser_dumps(PyObject* self, PyObject* args, PyObject* kw) {
   PyObject *val = NULL, *res;
   bser_t bser;
-  uint32_t len;
+  uint32_t len, bser_version = 1, bser_capabilities = 0;
+
+  static char* kw_list[] = {"val", "version", "capabilities", NULL};
 
-  if (!PyArg_ParseTuple(args, "O", &val)) {
+  if (!PyArg_ParseTupleAndKeywords(
+          args,
+          kw,
+          "O|ii:dumps",
+          kw_list,
+          &val,
+          &bser_version,
+          &bser_capabilities)) {
     return NULL;
   }
 
-  if (!bser_init(&bser)) {
+  if (!bser_init(&bser, bser_version, bser_capabilities)) {
     return PyErr_NoMemory();
   }
 
@@ -475,19 +540,25 @@
   }
 
   // Now fill in the overall length
-  len = bser.wpos - (sizeof(EMPTY_HEADER) - 1);
-  memcpy(bser.buf + 3, &len, sizeof(len));
+  if (bser_version == 1) {
+    len = bser.wpos - (sizeof(EMPTY_HEADER) - 1);
+    memcpy(bser.buf + 3, &len, sizeof(len));
+  } else {
+    len = bser.wpos - (sizeof(EMPTY_HEADER_V2) - 1);
+    // The BSER capabilities block comes before the PDU length
+    memcpy(bser.buf + 2, &bser_capabilities, sizeof(bser_capabilities));
+    memcpy(bser.buf + 7, &len, sizeof(len));
+  }
 
-  res = PyString_FromStringAndSize(bser.buf, bser.wpos);
+  res = PyBytes_FromStringAndSize(bser.buf, bser.wpos);
   bser_dtor(&bser);
 
   return res;
 }
 
-int bunser_int(const char **ptr, const char *end, int64_t *val)
-{
+int bunser_int(const char** ptr, const char* end, int64_t* val) {
   int needed;
-  const char *buf = *ptr;
+  const char* buf = *ptr;
   int8_t i8;
   int16_t i16;
   int32_t i32;
@@ -507,8 +578,8 @@
       needed = 9;
       break;
     default:
-      PyErr_Format(PyExc_ValueError,
-          "invalid bser int encoding 0x%02x", buf[0]);
+      PyErr_Format(
+          PyExc_ValueError, "invalid bser int encoding 0x%02x", buf[0]);
       return 0;
   }
   if (end - buf < needed) {
@@ -538,10 +609,12 @@
   }
 }
 
-static int bunser_string(const char **ptr, const char *end,
-    const char **start, int64_t *len)
-{
-  const char *buf = *ptr;
+static int bunser_bytestring(
+    const char** ptr,
+    const char* end,
+    const char** start,
+    int64_t* len) {
+  const char* buf = *ptr;
 
   // skip string marker
   buf++;
@@ -559,11 +632,12 @@
   return 1;
 }
 
-static PyObject *bunser_array(const char **ptr, const char *end, int mutable)
-{
-  const char *buf = *ptr;
+static PyObject*
+bunser_array(const char** ptr, const char* end, const unser_ctx_t* ctx) {
+  const char* buf = *ptr;
   int64_t nitems, i;
-  PyObject *res;
+  int mutable = ctx->mutable;
+  PyObject* res;
 
   // skip array header
   buf++;
@@ -584,7 +658,7 @@
   }
 
   for (i = 0; i < nitems; i++) {
-    PyObject *ele = bser_loads_recursive(ptr, end, mutable);
+    PyObject* ele = bser_loads_recursive(ptr, end, ctx);
 
     if (!ele) {
       Py_DECREF(res);
@@ -602,13 +676,13 @@
   return res;
 }
 
-static PyObject *bunser_object(const char **ptr, const char *end,
-    int mutable)
-{
-  const char *buf = *ptr;
+static PyObject*
+bunser_object(const char** ptr, const char* end, const unser_ctx_t* ctx) {
+  const char* buf = *ptr;
   int64_t nitems, i;
-  PyObject *res;
-  bserObject *obj;
+  int mutable = ctx->mutable;
+  PyObject* res;
+  bserObject* obj;
 
   // skip array header
   buf++;
@@ -627,12 +701,12 @@
   }
 
   for (i = 0; i < nitems; i++) {
-    const char *keystr;
+    const char* keystr;
     int64_t keylen;
-    PyObject *key;
-    PyObject *ele;
+    PyObject* key;
+    PyObject* ele;
 
-    if (!bunser_string(ptr, end, &keystr, &keylen)) {
+    if (!bunser_bytestring(ptr, end, &keystr, &keylen)) {
       Py_DECREF(res);
       return NULL;
     }
@@ -643,13 +717,24 @@
       return NULL;
     }
 
-    key = PyString_FromStringAndSize(keystr, (Py_ssize_t)keylen);
+    if (mutable) {
+      // This will interpret the key as UTF-8.
+      key = PyUnicode_FromStringAndSize(keystr, (Py_ssize_t)keylen);
+    } else {
+      // For immutable objects we'll manage key lookups, so we can avoid going
+      // through the Unicode APIs. This avoids a potentially expensive and
+      // definitely unnecessary conversion to UTF-16 and back for Python 2.
+      // TODO: On Python 3 the Unicode APIs are smarter: we might be able to use
+      // Unicode keys there without an appreciable performance loss.
+      key = PyBytes_FromStringAndSize(keystr, (Py_ssize_t)keylen);
+    }
+
     if (!key) {
       Py_DECREF(res);
       return NULL;
     }
 
-    ele = bser_loads_recursive(ptr, end, mutable);
+    ele = bser_loads_recursive(ptr, end, ctx);
 
     if (!ele) {
       Py_DECREF(key);
@@ -671,14 +756,24 @@
   return res;
 }
 
-static PyObject *bunser_template(const char **ptr, const char *end,
-    int mutable)
-{
-  const char *buf = *ptr;
+static PyObject*
+bunser_template(const char** ptr, const char* end, const unser_ctx_t* ctx) {
+  const char* buf = *ptr;
   int64_t nitems, i;
-  PyObject *arrval;
-  PyObject *keys;
+  int mutable = ctx->mutable;
+  PyObject* arrval;
+  PyObject* keys;
   Py_ssize_t numkeys, keyidx;
+  unser_ctx_t keys_ctx = {0};
+  if (mutable) {
+    keys_ctx.mutable = 1;
+    // Decode keys as UTF-8 in this case.
+    keys_ctx.value_encoding = "utf-8";
+    keys_ctx.value_errors = "strict";
+  } else {
+    // Treat keys as bytestrings in this case -- we'll do Unicode conversions at
+    // lookup time.
+  }
 
   if (buf[1] != BSER_ARRAY) {
     PyErr_Format(PyExc_ValueError, "Expect ARRAY to follow TEMPLATE");
@@ -689,8 +784,9 @@
   buf++;
   *ptr = buf;
 
-  // Load template keys
-  keys = bunser_array(ptr, end, mutable);
+  // Load template keys.
+  // For keys we don't want to do any decoding right now.
+  keys = bunser_array(ptr, end, &keys_ctx);
   if (!keys) {
     return NULL;
   }
@@ -716,8 +812,8 @@
   }
 
   for (i = 0; i < nitems; i++) {
-    PyObject *dict = NULL;
-    bserObject *obj = NULL;
+    PyObject* dict = NULL;
+    bserObject* obj = NULL;
 
     if (mutable) {
       dict = PyDict_New();
@@ -731,22 +827,22 @@
       dict = (PyObject*)obj;
     }
     if (!dict) {
-fail:
+    fail:
       Py_DECREF(keys);
       Py_DECREF(arrval);
       return NULL;
     }
 
     for (keyidx = 0; keyidx < numkeys; keyidx++) {
-      PyObject *key;
-      PyObject *ele;
+      PyObject* key;
+      PyObject* ele;
 
       if (**ptr == BSER_SKIP) {
         *ptr = *ptr + 1;
         ele = Py_None;
         Py_INCREF(ele);
       } else {
-        ele = bser_loads_recursive(ptr, end, mutable);
+        ele = bser_loads_recursive(ptr, end, ctx);
       }
 
       if (!ele) {
@@ -772,34 +868,38 @@
   return arrval;
 }
 
-static PyObject *bser_loads_recursive(const char **ptr, const char *end,
-    int mutable)
-{
-  const char *buf = *ptr;
+static PyObject* bser_loads_recursive(
+    const char** ptr,
+    const char* end,
+    const unser_ctx_t* ctx) {
+  const char* buf = *ptr;
 
   switch (buf[0]) {
     case BSER_INT8:
     case BSER_INT16:
     case BSER_INT32:
-    case BSER_INT64:
-      {
-        int64_t ival;
-        if (!bunser_int(ptr, end, &ival)) {
-          return NULL;
-        }
-        if (ival < LONG_MIN || ival > LONG_MAX) {
-          return PyLong_FromLongLong(ival);
-        }
-        return PyInt_FromSsize_t(Py_SAFE_DOWNCAST(ival, int64_t, Py_ssize_t));
+    case BSER_INT64: {
+      int64_t ival;
+      if (!bunser_int(ptr, end, &ival)) {
+        return NULL;
       }
+// Python 3 has one integer type.
+#if PY_MAJOR_VERSION >= 3
+      return PyLong_FromLongLong(ival);
+#else
+      if (ival < LONG_MIN || ival > LONG_MAX) {
+        return PyLong_FromLongLong(ival);
+      }
+      return PyInt_FromSsize_t(Py_SAFE_DOWNCAST(ival, int64_t, Py_ssize_t));
+#endif // PY_MAJOR_VERSION >= 3
+    }
 
-    case BSER_REAL:
-      {
-        double dval;
-        memcpy(&dval, buf + 1, sizeof(dval));
-        *ptr = buf + 1 + sizeof(double);
-        return PyFloat_FromDouble(dval);
-      }
+    case BSER_REAL: {
+      double dval;
+      memcpy(&dval, buf + 1, sizeof(dval));
+      *ptr = buf + 1 + sizeof(double);
+      return PyFloat_FromDouble(dval);
+    }
 
     case BSER_TRUE:
       *ptr = buf + 1;
@@ -816,31 +916,51 @@
       Py_INCREF(Py_None);
       return Py_None;
 
-    case BSER_STRING:
-      {
-        const char *start;
-        int64_t len;
+    case BSER_BYTESTRING: {
+      const char* start;
+      int64_t len;
 
-        if (!bunser_string(ptr, end, &start, &len)) {
-          return NULL;
-        }
+      if (!bunser_bytestring(ptr, end, &start, &len)) {
+        return NULL;
+      }
 
-        if (len > LONG_MAX) {
-          PyErr_Format(PyExc_ValueError, "string too long for python");
-          return NULL;
-        }
-
-        return PyString_FromStringAndSize(start, (long)len);
+      if (len > LONG_MAX) {
+        PyErr_Format(PyExc_ValueError, "string too long for python");
+        return NULL;
       }
 
+      if (ctx->value_encoding != NULL) {
+        return PyUnicode_Decode(
+            start, (long)len, ctx->value_encoding, ctx->value_errors);
+      } else {
+        return PyBytes_FromStringAndSize(start, (long)len);
+      }
+    }
+
+    case BSER_UTF8STRING: {
+      const char* start;
+      int64_t len;
+
+      if (!bunser_bytestring(ptr, end, &start, &len)) {
+        return NULL;
+      }
+
+      if (len > LONG_MAX) {
+        PyErr_Format(PyExc_ValueError, "string too long for python");
+        return NULL;
+      }
+
+      return PyUnicode_Decode(start, (long)len, "utf-8", "strict");
+    }
+
     case BSER_ARRAY:
-      return bunser_array(ptr, end, mutable);
+      return bunser_array(ptr, end, ctx);
 
     case BSER_OBJECT:
-      return bunser_object(ptr, end, mutable);
+      return bunser_object(ptr, end, ctx);
 
     case BSER_TEMPLATE:
-      return bunser_template(ptr, end, mutable);
+      return bunser_template(ptr, end, ctx);
 
     default:
       PyErr_Format(PyExc_ValueError, "unhandled bser opcode 0x%02x", buf[0]);
@@ -849,102 +969,244 @@
   return NULL;
 }
 
-// Expected use case is to read a packet from the socket and
-// then call bser.pdu_len on the packet.  It returns the total
-// length of the entire response that the peer is sending,
-// including the bytes already received.  This allows the client
-// to compute the data size it needs to read before it can
-// decode the data
-static PyObject *bser_pdu_len(PyObject *self, PyObject *args)
-{
-  const char *start = NULL;
-  const char *data = NULL;
-  int datalen = 0;
-  const char *end;
-  int64_t expected_len, total_len;
+static int _pdu_info_helper(
+    const char* data,
+    const char* end,
+    uint32_t* bser_version_out,
+    uint32_t* bser_capabilities_out,
+    int64_t* expected_len_out,
+    off_t* position_out) {
+  uint32_t bser_version;
+  uint32_t bser_capabilities = 0;
+  int64_t expected_len;
 
-  if (!PyArg_ParseTuple(args, "s#", &start, &datalen)) {
-    return NULL;
-  }
-  data = start;
-  end = data + datalen;
-
+  const char* start;
+  start = data;
   // Validate the header and length
-  if (memcmp(data, EMPTY_HEADER, 2) != 0) {
+  if (memcmp(data, EMPTY_HEADER, 2) == 0) {
+    bser_version = 1;
+  } else if (memcmp(data, EMPTY_HEADER_V2, 2) == 0) {
+    bser_version = 2;
+  } else {
     PyErr_SetString(PyExc_ValueError, "invalid bser header");
-    return NULL;
+    return 0;
   }
 
   data += 2;
 
+  if (bser_version == 2) {
+    // Expect an integer telling us what capabilities are supported by the
+    // remote server (currently unused).
+    if (!memcpy(&bser_capabilities, &data, sizeof(bser_capabilities))) {
+      return 0;
+    }
+    data += sizeof(bser_capabilities);
+  }
+
   // Expect an integer telling us how big the rest of the data
   // should be
   if (!bunser_int(&data, end, &expected_len)) {
+    return 0;
+  }
+
+  *bser_version_out = bser_version;
+  *bser_capabilities_out = (uint32_t)bser_capabilities;
+  *expected_len_out = expected_len;
+  *position_out = (off_t)(data - start);
+  return 1;
+}
+
+// This function parses the PDU header and provides info about the packet
+// Returns false if unsuccessful
+static int pdu_info_helper(
+    PyObject* self,
+    PyObject* args,
+    uint32_t* bser_version_out,
+    uint32_t* bser_capabilities_out,
+    int64_t* total_len_out) {
+  const char* start = NULL;
+  const char* data = NULL;
+  int datalen = 0;
+  const char* end;
+  int64_t expected_len;
+  off_t position;
+
+  if (!PyArg_ParseTuple(args, "s#", &start, &datalen)) {
+    return 0;
+  }
+  data = start;
+  end = data + datalen;
+
+  if (!_pdu_info_helper(
+          data,
+          end,
+          bser_version_out,
+          bser_capabilities_out,
+          &expected_len,
+          &position)) {
+    return 0;
+  }
+  *total_len_out = (int64_t)(expected_len + position);
+  return 1;
+}
+
+// Expected use case is to read a packet from the socket and then call
+// bser.pdu_info on the packet.  It returns the BSER version, BSER capabilities,
+// and the total length of the entire response that the peer is sending,
+// including the bytes already received. This allows the client  to compute the
+// data size it needs to read before it can decode the data.
+static PyObject* bser_pdu_info(PyObject* self, PyObject* args) {
+  uint32_t version, capabilities;
+  int64_t total_len;
+  if (!pdu_info_helper(self, args, &version, &capabilities, &total_len)) {
+    return NULL;
+  }
+  return Py_BuildValue("kkL", version, capabilities, total_len);
+}
+
+static PyObject* bser_pdu_len(PyObject* self, PyObject* args) {
+  uint32_t version, capabilities;
+  int64_t total_len;
+  if (!pdu_info_helper(self, args, &version, &capabilities, &total_len)) {
+    return NULL;
+  }
+  return Py_BuildValue("L", total_len);
+}
+
+static PyObject* bser_loads(PyObject* self, PyObject* args, PyObject* kw) {
+  const char* data = NULL;
+  int datalen = 0;
+  const char* start;
+  const char* end;
+  int64_t expected_len;
+  off_t position;
+  PyObject* mutable_obj = NULL;
+  const char* value_encoding = NULL;
+  const char* value_errors = NULL;
+  unser_ctx_t ctx = {1, 0};
+
+  static char* kw_list[] = {
+      "buf", "mutable", "value_encoding", "value_errors", NULL};
+
+  if (!PyArg_ParseTupleAndKeywords(
+          args,
+          kw,
+          "s#|Ozz:loads",
+          kw_list,
+          &start,
+          &datalen,
+          &mutable_obj,
+          &value_encoding,
+          &value_errors)) {
     return NULL;
   }
 
-  total_len = expected_len + (data - start);
-  if (total_len > LONG_MAX) {
-    return PyLong_FromLongLong(total_len);
+  if (mutable_obj) {
+    ctx.mutable = PyObject_IsTrue(mutable_obj) > 0 ? 1 : 0;
   }
-  return PyInt_FromLong((long)total_len);
-}
-
-static PyObject *bser_loads(PyObject *self, PyObject *args)
-{
-  const char *data = NULL;
-  int datalen = 0;
-  const char *end;
-  int64_t expected_len;
-  int mutable = 1;
-  PyObject *mutable_obj = NULL;
-
-  if (!PyArg_ParseTuple(args, "s#|O:loads", &data, &datalen, &mutable_obj)) {
-    return NULL;
+  ctx.value_encoding = value_encoding;
+  if (value_encoding == NULL) {
+    ctx.value_errors = NULL;
+  } else if (value_errors == NULL) {
+    ctx.value_errors = "strict";
+  } else {
+    ctx.value_errors = value_errors;
   }
-  if (mutable_obj) {
-    mutable = PyObject_IsTrue(mutable_obj) > 0 ? 1 : 0;
-  }
-
+  data = start;
   end = data + datalen;
 
-  // Validate the header and length
-  if (memcmp(data, EMPTY_HEADER, 2) != 0) {
-    PyErr_SetString(PyExc_ValueError, "invalid bser header");
+  if (!_pdu_info_helper(
+          data,
+          end,
+          &ctx.bser_version,
+          &ctx.bser_capabilities,
+          &expected_len,
+          &position)) {
     return NULL;
   }
 
-  data += 2;
-
-  // Expect an integer telling us how big the rest of the data
-  // should be
-  if (!bunser_int(&data, end, &expected_len)) {
-    return NULL;
-  }
-
+  data = start + position;
   // Verify
   if (expected_len + data != end) {
     PyErr_SetString(PyExc_ValueError, "bser data len != header len");
     return NULL;
   }
 
-  return bser_loads_recursive(&data, end, mutable);
+  return bser_loads_recursive(&data, end, &ctx);
 }
 
+static PyObject* bser_load(PyObject* self, PyObject* args, PyObject* kw) {
+  PyObject *load, *string;
+  PyObject* fp = NULL;
+  PyObject* mutable_obj = NULL;
+  const char* value_encoding = NULL;
+  const char* value_errors = NULL;
+
+  static char* kw_list[] = {
+      "fp", "mutable", "value_encoding", "value_errors", NULL};
+
+  if (!PyArg_ParseTupleAndKeywords(
+          args,
+          kw,
+          "OOzz:load",
+          kw_list,
+          &fp,
+          &mutable_obj,
+          &value_encoding,
+          &value_errors)) {
+    return NULL;
+  }
+
+  load = PyImport_ImportModule("pywatchman.load");
+  if (load == NULL) {
+    return NULL;
+  }
+  string = PyObject_CallMethod(
+      load, "load", "OOzz", fp, mutable_obj, value_encoding, value_errors);
+  Py_DECREF(load);
+  return string;
+}
+
+// clang-format off
 static PyMethodDef bser_methods[] = {
-  {"loads",  bser_loads, METH_VARARGS, "Deserialize string."},
-  {"pdu_len", bser_pdu_len, METH_VARARGS, "Extract PDU length."},
-  {"dumps",  bser_dumps, METH_VARARGS, "Serialize string."},
+  {"loads", (PyCFunction)bser_loads, METH_VARARGS | METH_KEYWORDS,
+   "Deserialize string."},
+  {"load", (PyCFunction)bser_load, METH_VARARGS | METH_KEYWORDS,
+   "Deserialize a file object"},
+  {"pdu_info", (PyCFunction)bser_pdu_info, METH_VARARGS,
+   "Extract PDU information."},
+  {"pdu_len", (PyCFunction)bser_pdu_len, METH_VARARGS,
+   "Extract total PDU length."},
+  {"dumps",  (PyCFunction)bser_dumps, METH_VARARGS | METH_KEYWORDS,
+   "Serialize string."},
   {NULL, NULL, 0, NULL}
 };
 
-PyMODINIT_FUNC initbser(void)
-{
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef bser_module = {
+  PyModuleDef_HEAD_INIT,
+  "bser",
+  "Efficient encoding and decoding of BSER.",
+  -1,
+  bser_methods
+};
+// clang-format on
+
+PyMODINIT_FUNC PyInit_bser(void) {
+  PyObject* mod;
+
+  mod = PyModule_Create(&bser_module);
+  PyType_Ready(&bserObjectType);
+
+  return mod;
+}
+#else
+
+PyMODINIT_FUNC initbser(void) {
   (void)Py_InitModule("bser", bser_methods);
   PyType_Ready(&bserObjectType);
 }
+#endif // PY_MAJOR_VERSION >= 3
 
 /* vim:ts=2:sw=2:et:
  */
-
-// no-check-code -- this is a 3rd party library
--- a/hgext/fsmonitor/pywatchman/capabilities.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/fsmonitor/pywatchman/capabilities.py	Wed Jan 18 11:43:36 2017 -0500
@@ -26,6 +26,11 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+# no unicode literals
+
 import re
 
 def parse_version(vstr):
@@ -65,5 +70,3 @@
             vers['error'] = 'client required capability `' + name + \
                             '` is not supported by this server'
     return vers
-
-# no-check-code -- this is a 3rd party library
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fsmonitor/pywatchman/compat.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,65 @@
+# Copyright 2016-present Facebook, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#  * Redistributions of source code must retain the above copyright notice,
+#    this list of conditions and the following disclaimer.
+#
+#  * Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions and the following disclaimer in the documentation
+#    and/or other materials provided with the distribution.
+#
+#  * Neither the name Facebook nor the names of its contributors may be used to
+#    endorse or promote products derived from this software without specific
+#    prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+# no unicode literals
+
+'''Compatibility module across Python 2 and 3.'''
+
+import sys
+
+PYTHON3 = sys.version_info >= (3, 0)
+
+# This is adapted from https://bitbucket.org/gutworth/six, and used under the
+# MIT license. See LICENSE for a full copyright notice.
+if PYTHON3:
+    def reraise(tp, value, tb=None):
+        try:
+            if value is None:
+                value = tp()
+            if value.__traceback__ is not tb:
+                raise value.with_traceback(tb)
+            raise value
+        finally:
+            value = None
+            tb = None
+else:
+    exec('''
+def reraise(tp, value, tb=None):
+    try:
+        raise tp, value, tb
+    finally:
+        tb = None
+'''.strip())
+
+if PYTHON3:
+    UNICODE = str
+else:
+    UNICODE = unicode
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fsmonitor/pywatchman/encoding.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,73 @@
+# Copyright 2016-present Facebook, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#  * Redistributions of source code must retain the above copyright notice,
+#    this list of conditions and the following disclaimer.
+#
+#  * Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions and the following disclaimer in the documentation
+#    and/or other materials provided with the distribution.
+#
+#  * Neither the name Facebook nor the names of its contributors may be used to
+#    endorse or promote products derived from this software without specific
+#    prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+# no unicode literals
+
+'''Module to deal with filename encoding on the local system, as returned by
+Watchman.'''
+
+import sys
+
+from . import (
+    compat,
+)
+
+if compat.PYTHON3:
+    default_local_errors = 'surrogateescape'
+
+    def get_local_encoding():
+        if sys.platform == 'win32':
+            # Watchman always returns UTF-8 encoded strings on Windows.
+            return 'utf-8'
+        # On the Python 3 versions we support, sys.getfilesystemencoding never
+        # returns None.
+        return sys.getfilesystemencoding()
+else:
+    # Python 2 doesn't support surrogateescape, so use 'strict' by
+    # default. Users can register a custom surrogateescape error handler and use
+    # that if they so desire.
+    default_local_errors = 'strict'
+
+    def get_local_encoding():
+        if sys.platform == 'win32':
+            # Watchman always returns UTF-8 encoded strings on Windows.
+            return 'utf-8'
+        fsencoding = sys.getfilesystemencoding()
+        if fsencoding is None:
+            # This is very unlikely to happen, but if it does, just use UTF-8
+            fsencoding = 'utf-8'
+        return fsencoding
+
+def encode_local(s):
+    return s.encode(get_local_encoding(), default_local_errors)
+
+def decode_local(bs):
+    return bs.decode(get_local_encoding(), default_local_errors)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fsmonitor/pywatchman/load.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,107 @@
+# Copyright 2016 Facebook, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#  * Redistributions of source code must retain the above copyright notice,
+#    this list of conditions and the following disclaimer.
+#
+#  * Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions and the following disclaimer in the documentation
+#    and/or other materials provided with the distribution.
+#
+#  * Neither the name Facebook nor the names of its contributors may be used to
+#    endorse or promote products derived from this software without specific
+#    prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+# no unicode literals
+
+try:
+    from . import bser
+except ImportError:
+    from . import pybser as bser
+
+import ctypes
+
+EMPTY_HEADER = b"\x00\x01\x05\x00\x00\x00\x00"
+
+
+def _read_bytes(fp, buf):
+    """Read bytes from a file-like object
+
+    @param fp: File-like object that implements read(int)
+    @type fp: file
+
+    @param buf: Buffer to read into
+    @type buf: bytes
+
+    @return: buf
+    """
+
+    # Do the first read without resizing the input buffer
+    offset = 0
+    remaining = len(buf)
+    while remaining > 0:
+        l = fp.readinto((ctypes.c_char * remaining).from_buffer(buf, offset))
+        if l is None or l == 0:
+            return offset
+        offset += l
+        remaining -= l
+    return offset
+
+
+def load(fp, mutable=True, value_encoding=None, value_errors=None):
+    """Deserialize a BSER-encoded blob.
+
+    @param fp: The file-object to deserialize.
+    @type file:
+
+    @param mutable: Whether to return mutable results.
+    @type mutable: bool
+
+    @param value_encoding: Optional codec to use to decode values. If
+                           unspecified or None, return values as bytestrings.
+    @type value_encoding: str
+
+    @param value_errors: Optional error handler for codec. 'strict' by default.
+                         The other most common argument is 'surrogateescape' on
+                         Python 3. If value_encoding is None, this is ignored.
+    @type value_errors: str
+    """
+    buf = ctypes.create_string_buffer(8192)
+    SNIFF_BUFFER_SIZE = len(EMPTY_HEADER)
+    header = (ctypes.c_char * SNIFF_BUFFER_SIZE).from_buffer(buf)
+    read_len = _read_bytes(fp, header)
+    if read_len < len(header):
+        return None
+
+    total_len = bser.pdu_len(buf)
+    if total_len > len(buf):
+        ctypes.resize(buf, total_len)
+
+    body = (ctypes.c_char * (total_len - len(header))).from_buffer(
+        buf, len(header))
+    read_len = _read_bytes(fp, body)
+    if read_len < len(body):
+        raise RuntimeError('bser data ended early')
+
+    return bser.loads(
+        (ctypes.c_char * total_len).from_buffer(buf, 0),
+        mutable,
+        value_encoding,
+        value_errors)
--- a/hgext/fsmonitor/pywatchman/msc_stdint.h	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,260 +0,0 @@
-// no-check-code
-// ISO C9x  compliant stdint.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 
-// 
-//  Copyright (c) 2006-2013 Alexander Chemeris
-// 
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-// 
-//   1. Redistributions of source code must retain the above copyright notice,
-//      this list of conditions and the following disclaimer.
-// 
-//   2. Redistributions in binary form must reproduce the above copyright
-//      notice, this list of conditions and the following disclaimer in the
-//      documentation and/or other materials provided with the distribution.
-// 
-//   3. Neither the name of the product nor the names of its contributors may
-//      be used to endorse or promote products derived from this software
-//      without specific prior written permission.
-// 
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// 
-///////////////////////////////////////////////////////////////////////////////
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_STDINT_H_ // [
-#define _MSC_STDINT_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-#if _MSC_VER >= 1600 // [
-#include <stdint.h>
-#else // ] _MSC_VER >= 1600 [
-
-#include <limits.h>
-
-// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
-// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
-// or compiler give many errors like this:
-//   error C2733: second C linkage of overloaded function 'wmemchr' not allowed
-#ifdef __cplusplus
-extern "C" {
-#endif
-#  include <wchar.h>
-#ifdef __cplusplus
-}
-#endif
-
-// Define _W64 macros to mark types changing their size, like intptr_t.
-#ifndef _W64
-#  if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
-#     define _W64 __w64
-#  else
-#     define _W64
-#  endif
-#endif
-
-
-// 7.18.1 Integer types
-
-// 7.18.1.1 Exact-width integer types
-
-// Visual Studio 6 and Embedded Visual C++ 4 doesn't
-// realize that, e.g. char has the same size as __int8
-// so we give up on __intX for them.
-#if (_MSC_VER < 1300)
-   typedef signed char       int8_t;
-   typedef signed short      int16_t;
-   typedef signed int        int32_t;
-   typedef unsigned char     uint8_t;
-   typedef unsigned short    uint16_t;
-   typedef unsigned int      uint32_t;
-#else
-   typedef signed __int8     int8_t;
-   typedef signed __int16    int16_t;
-   typedef signed __int32    int32_t;
-   typedef unsigned __int8   uint8_t;
-   typedef unsigned __int16  uint16_t;
-   typedef unsigned __int32  uint32_t;
-#endif
-typedef signed __int64       int64_t;
-typedef unsigned __int64     uint64_t;
-
-
-// 7.18.1.2 Minimum-width integer types
-typedef int8_t    int_least8_t;
-typedef int16_t   int_least16_t;
-typedef int32_t   int_least32_t;
-typedef int64_t   int_least64_t;
-typedef uint8_t   uint_least8_t;
-typedef uint16_t  uint_least16_t;
-typedef uint32_t  uint_least32_t;
-typedef uint64_t  uint_least64_t;
-
-// 7.18.1.3 Fastest minimum-width integer types
-typedef int8_t    int_fast8_t;
-typedef int16_t   int_fast16_t;
-typedef int32_t   int_fast32_t;
-typedef int64_t   int_fast64_t;
-typedef uint8_t   uint_fast8_t;
-typedef uint16_t  uint_fast16_t;
-typedef uint32_t  uint_fast32_t;
-typedef uint64_t  uint_fast64_t;
-
-// 7.18.1.4 Integer types capable of holding object pointers
-#ifdef _WIN64 // [
-   typedef signed __int64    intptr_t;
-   typedef unsigned __int64  uintptr_t;
-#else // _WIN64 ][
-   typedef _W64 signed int   intptr_t;
-   typedef _W64 unsigned int uintptr_t;
-#endif // _WIN64 ]
-
-// 7.18.1.5 Greatest-width integer types
-typedef int64_t   intmax_t;
-typedef uint64_t  uintmax_t;
-
-
-// 7.18.2 Limits of specified-width integer types
-
-#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [   See footnote 220 at page 257 and footnote 221 at page 259
-
-// 7.18.2.1 Limits of exact-width integer types
-#define INT8_MIN     ((int8_t)_I8_MIN)
-#define INT8_MAX     _I8_MAX
-#define INT16_MIN    ((int16_t)_I16_MIN)
-#define INT16_MAX    _I16_MAX
-#define INT32_MIN    ((int32_t)_I32_MIN)
-#define INT32_MAX    _I32_MAX
-#define INT64_MIN    ((int64_t)_I64_MIN)
-#define INT64_MAX    _I64_MAX
-#define UINT8_MAX    _UI8_MAX
-#define UINT16_MAX   _UI16_MAX
-#define UINT32_MAX   _UI32_MAX
-#define UINT64_MAX   _UI64_MAX
-
-// 7.18.2.2 Limits of minimum-width integer types
-#define INT_LEAST8_MIN    INT8_MIN
-#define INT_LEAST8_MAX    INT8_MAX
-#define INT_LEAST16_MIN   INT16_MIN
-#define INT_LEAST16_MAX   INT16_MAX
-#define INT_LEAST32_MIN   INT32_MIN
-#define INT_LEAST32_MAX   INT32_MAX
-#define INT_LEAST64_MIN   INT64_MIN
-#define INT_LEAST64_MAX   INT64_MAX
-#define UINT_LEAST8_MAX   UINT8_MAX
-#define UINT_LEAST16_MAX  UINT16_MAX
-#define UINT_LEAST32_MAX  UINT32_MAX
-#define UINT_LEAST64_MAX  UINT64_MAX
-
-// 7.18.2.3 Limits of fastest minimum-width integer types
-#define INT_FAST8_MIN    INT8_MIN
-#define INT_FAST8_MAX    INT8_MAX
-#define INT_FAST16_MIN   INT16_MIN
-#define INT_FAST16_MAX   INT16_MAX
-#define INT_FAST32_MIN   INT32_MIN
-#define INT_FAST32_MAX   INT32_MAX
-#define INT_FAST64_MIN   INT64_MIN
-#define INT_FAST64_MAX   INT64_MAX
-#define UINT_FAST8_MAX   UINT8_MAX
-#define UINT_FAST16_MAX  UINT16_MAX
-#define UINT_FAST32_MAX  UINT32_MAX
-#define UINT_FAST64_MAX  UINT64_MAX
-
-// 7.18.2.4 Limits of integer types capable of holding object pointers
-#ifdef _WIN64 // [
-#  define INTPTR_MIN   INT64_MIN
-#  define INTPTR_MAX   INT64_MAX
-#  define UINTPTR_MAX  UINT64_MAX
-#else // _WIN64 ][
-#  define INTPTR_MIN   INT32_MIN
-#  define INTPTR_MAX   INT32_MAX
-#  define UINTPTR_MAX  UINT32_MAX
-#endif // _WIN64 ]
-
-// 7.18.2.5 Limits of greatest-width integer types
-#define INTMAX_MIN   INT64_MIN
-#define INTMAX_MAX   INT64_MAX
-#define UINTMAX_MAX  UINT64_MAX
-
-// 7.18.3 Limits of other integer types
-
-#ifdef _WIN64 // [
-#  define PTRDIFF_MIN  _I64_MIN
-#  define PTRDIFF_MAX  _I64_MAX
-#else  // _WIN64 ][
-#  define PTRDIFF_MIN  _I32_MIN
-#  define PTRDIFF_MAX  _I32_MAX
-#endif  // _WIN64 ]
-
-#define SIG_ATOMIC_MIN  INT_MIN
-#define SIG_ATOMIC_MAX  INT_MAX
-
-#ifndef SIZE_MAX // [
-#  ifdef _WIN64 // [
-#     define SIZE_MAX  _UI64_MAX
-#  else // _WIN64 ][
-#     define SIZE_MAX  _UI32_MAX
-#  endif // _WIN64 ]
-#endif // SIZE_MAX ]
-
-// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
-#ifndef WCHAR_MIN // [
-#  define WCHAR_MIN  0
-#endif  // WCHAR_MIN ]
-#ifndef WCHAR_MAX // [
-#  define WCHAR_MAX  _UI16_MAX
-#endif  // WCHAR_MAX ]
-
-#define WINT_MIN  0
-#define WINT_MAX  _UI16_MAX
-
-#endif // __STDC_LIMIT_MACROS ]
-
-
-// 7.18.4 Limits of other integer types
-
-#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [   See footnote 224 at page 260
-
-// 7.18.4.1 Macros for minimum-width integer constants
-
-#define INT8_C(val)  val##i8
-#define INT16_C(val) val##i16
-#define INT32_C(val) val##i32
-#define INT64_C(val) val##i64
-
-#define UINT8_C(val)  val##ui8
-#define UINT16_C(val) val##ui16
-#define UINT32_C(val) val##ui32
-#define UINT64_C(val) val##ui64
-
-// 7.18.4.2 Macros for greatest-width integer constants
-// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>.
-// Check out Issue 9 for the details.
-#ifndef INTMAX_C //   [
-#  define INTMAX_C   INT64_C
-#endif // INTMAX_C    ]
-#ifndef UINTMAX_C //  [
-#  define UINTMAX_C  UINT64_C
-#endif // UINTMAX_C   ]
-
-#endif // __STDC_CONSTANT_MACROS ]
-
-#endif // _MSC_VER >= 1600 ]
-
-#endif // _MSC_STDINT_H_ ]
--- a/hgext/fsmonitor/pywatchman/pybser.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/fsmonitor/pywatchman/pybser.py	Wed Jan 18 11:43:36 2017 -0500
@@ -26,33 +26,51 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+# no unicode literals
+
+import binascii
 import collections
 import ctypes
 import struct
 import sys
 
-BSER_ARRAY = '\x00'
-BSER_OBJECT = '\x01'
-BSER_STRING = '\x02'
-BSER_INT8 = '\x03'
-BSER_INT16 = '\x04'
-BSER_INT32 = '\x05'
-BSER_INT64 = '\x06'
-BSER_REAL = '\x07'
-BSER_TRUE = '\x08'
-BSER_FALSE = '\x09'
-BSER_NULL = '\x0a'
-BSER_TEMPLATE = '\x0b'
-BSER_SKIP = '\x0c'
+from . import (
+    compat,
+)
+
+BSER_ARRAY = b'\x00'
+BSER_OBJECT = b'\x01'
+BSER_BYTESTRING = b'\x02'
+BSER_INT8 = b'\x03'
+BSER_INT16 = b'\x04'
+BSER_INT32 = b'\x05'
+BSER_INT64 = b'\x06'
+BSER_REAL = b'\x07'
+BSER_TRUE = b'\x08'
+BSER_FALSE = b'\x09'
+BSER_NULL = b'\x0a'
+BSER_TEMPLATE = b'\x0b'
+BSER_SKIP = b'\x0c'
+BSER_UTF8STRING = b'\x0d'
+
+if compat.PYTHON3:
+    STRING_TYPES = (str, bytes)
+    unicode = str
+    def tobytes(i):
+        return str(i).encode('ascii')
+    long = int
+else:
+    STRING_TYPES = (unicode, str)
+    tobytes = bytes
 
 # Leave room for the serialization header, which includes
 # our overall length.  To make things simpler, we'll use an
 # int32 for the header
-EMPTY_HEADER = "\x00\x01\x05\x00\x00\x00\x00"
-
-# Python 3 conditional for supporting Python 2's int/long types
-if sys.version_info > (3,):
-    long = int
+EMPTY_HEADER = b"\x00\x01\x05\x00\x00\x00\x00"
+EMPTY_HEADER_V2 = b"\x00\x02\x00\x00\x00\x00\x05\x00\x00\x00\x00"
 
 def _int_size(x):
     """Return the smallest size int that can store the value"""
@@ -67,13 +85,28 @@
     else:
         raise RuntimeError('Cannot represent value: ' + str(x))
 
+def _buf_pos(buf, pos):
+    ret = buf[pos]
+    # In Python 2, buf is a str array so buf[pos] is a string. In Python 3, buf
+    # is a bytes array and buf[pos] is an integer.
+    if compat.PYTHON3:
+        ret = bytes((ret,))
+    return ret
 
 class _bser_buffer(object):
 
-    def __init__(self):
+    def __init__(self, version):
+        self.bser_version = version
         self.buf = ctypes.create_string_buffer(8192)
-        struct.pack_into(str(len(EMPTY_HEADER)) + 's', self.buf, 0, EMPTY_HEADER)
-        self.wpos = len(EMPTY_HEADER)
+        if self.bser_version == 1:
+            struct.pack_into(tobytes(len(EMPTY_HEADER)) + b's', self.buf, 0,
+                             EMPTY_HEADER)
+            self.wpos = len(EMPTY_HEADER)
+        else:
+            assert self.bser_version == 2
+            struct.pack_into(tobytes(len(EMPTY_HEADER_V2)) + b's', self.buf, 0,
+                             EMPTY_HEADER_V2)
+            self.wpos = len(EMPTY_HEADER_V2)
 
     def ensure_size(self, size):
         while ctypes.sizeof(self.buf) - self.wpos < size:
@@ -84,13 +117,13 @@
         to_write = size + 1
         self.ensure_size(to_write)
         if size == 1:
-            struct.pack_into('=cb', self.buf, self.wpos, BSER_INT8, val)
+            struct.pack_into(b'=cb', self.buf, self.wpos, BSER_INT8, val)
         elif size == 2:
-            struct.pack_into('=ch', self.buf, self.wpos, BSER_INT16, val)
+            struct.pack_into(b'=ch', self.buf, self.wpos, BSER_INT16, val)
         elif size == 4:
-            struct.pack_into('=ci', self.buf, self.wpos, BSER_INT32, val)
+            struct.pack_into(b'=ci', self.buf, self.wpos, BSER_INT32, val)
         elif size == 8:
-            struct.pack_into('=cq', self.buf, self.wpos, BSER_INT64, val)
+            struct.pack_into(b'=cq', self.buf, self.wpos, BSER_INT64, val)
         else:
             raise RuntimeError('Cannot represent this long value')
         self.wpos += to_write
@@ -104,13 +137,17 @@
         to_write = 2 + size + s_len
         self.ensure_size(to_write)
         if size == 1:
-            struct.pack_into('=ccb' + str(s_len) + 's', self.buf, self.wpos, BSER_STRING, BSER_INT8, s_len, s)
+            struct.pack_into(b'=ccb' + tobytes(s_len) + b's', self.buf,
+                self.wpos, BSER_BYTESTRING, BSER_INT8, s_len, s)
         elif size == 2:
-            struct.pack_into('=cch' + str(s_len) + 's', self.buf, self.wpos, BSER_STRING, BSER_INT16, s_len, s)
+            struct.pack_into(b'=cch' + tobytes(s_len) + b's', self.buf,
+                self.wpos, BSER_BYTESTRING, BSER_INT16, s_len, s)
         elif size == 4:
-            struct.pack_into('=cci' + str(s_len) + 's', self.buf, self.wpos, BSER_STRING, BSER_INT32, s_len, s)
+            struct.pack_into(b'=cci' + tobytes(s_len) + b's', self.buf,
+                self.wpos, BSER_BYTESTRING, BSER_INT32, s_len, s)
         elif size == 8:
-            struct.pack_into('=ccq' + str(s_len) + 's', self.buf, self.wpos, BSER_STRING, BSER_INT64, s_len, s)
+            struct.pack_into(b'=ccq' + tobytes(s_len) + b's', self.buf,
+                self.wpos, BSER_BYTESTRING, BSER_INT64, s_len, s)
         else:
             raise RuntimeError('Cannot represent this string value')
         self.wpos += to_write
@@ -124,54 +161,68 @@
                 to_encode = BSER_TRUE
             else:
                 to_encode = BSER_FALSE
-            struct.pack_into('=c', self.buf, self.wpos, to_encode)
+            struct.pack_into(b'=c', self.buf, self.wpos, to_encode)
             self.wpos += needed
         elif val is None:
             needed = 1
             self.ensure_size(needed)
-            struct.pack_into('=c', self.buf, self.wpos, BSER_NULL)
+            struct.pack_into(b'=c', self.buf, self.wpos, BSER_NULL)
             self.wpos += needed
         elif isinstance(val, (int, long)):
             self.append_long(val)
-        elif isinstance(val, (str, unicode)):
+        elif isinstance(val, STRING_TYPES):
             self.append_string(val)
         elif isinstance(val, float):
             needed = 9
             self.ensure_size(needed)
-            struct.pack_into('=cd', self.buf, self.wpos, BSER_REAL, val)
+            struct.pack_into(b'=cd', self.buf, self.wpos, BSER_REAL, val)
             self.wpos += needed
-        elif isinstance(val, collections.Mapping) and isinstance(val, collections.Sized):
+        elif isinstance(val, collections.Mapping) and \
+            isinstance(val, collections.Sized):
             val_len = len(val)
             size = _int_size(val_len)
             needed = 2 + size
             self.ensure_size(needed)
             if size == 1:
-                struct.pack_into('=ccb', self.buf, self.wpos, BSER_OBJECT, BSER_INT8, val_len)
+                struct.pack_into(b'=ccb', self.buf, self.wpos, BSER_OBJECT,
+                    BSER_INT8, val_len)
             elif size == 2:
-                struct.pack_into('=cch', self.buf, self.wpos, BSER_OBJECT, BSER_INT16, val_len)
+                struct.pack_into(b'=cch', self.buf, self.wpos, BSER_OBJECT,
+                    BSER_INT16, val_len)
             elif size == 4:
-                struct.pack_into('=cci', self.buf, self.wpos, BSER_OBJECT, BSER_INT32, val_len)
+                struct.pack_into(b'=cci', self.buf, self.wpos, BSER_OBJECT,
+                    BSER_INT32, val_len)
             elif size == 8:
-                struct.pack_into('=ccq', self.buf, self.wpos, BSER_OBJECT, BSER_INT64, val_len)
+                struct.pack_into(b'=ccq', self.buf, self.wpos, BSER_OBJECT,
+                    BSER_INT64, val_len)
             else:
                 raise RuntimeError('Cannot represent this mapping value')
             self.wpos += needed
-            for k, v in val.iteritems():
+            if compat.PYTHON3:
+                iteritems = val.items()
+            else:
+                iteritems = val.iteritems()
+            for k, v in iteritems:
                 self.append_string(k)
                 self.append_recursive(v)
-        elif isinstance(val, collections.Iterable) and isinstance(val, collections.Sized):
+        elif isinstance(val, collections.Iterable) and \
+            isinstance(val, collections.Sized):
             val_len = len(val)
             size = _int_size(val_len)
             needed = 2 + size
             self.ensure_size(needed)
             if size == 1:
-                struct.pack_into('=ccb', self.buf, self.wpos, BSER_ARRAY, BSER_INT8, val_len)
+                struct.pack_into(b'=ccb', self.buf, self.wpos, BSER_ARRAY,
+                    BSER_INT8, val_len)
             elif size == 2:
-                struct.pack_into('=cch', self.buf, self.wpos, BSER_ARRAY, BSER_INT16, val_len)
+                struct.pack_into(b'=cch', self.buf, self.wpos, BSER_ARRAY,
+                    BSER_INT16, val_len)
             elif size == 4:
-                struct.pack_into('=cci', self.buf, self.wpos, BSER_ARRAY, BSER_INT32, val_len)
+                struct.pack_into(b'=cci', self.buf, self.wpos, BSER_ARRAY,
+                    BSER_INT32, val_len)
             elif size == 8:
-                struct.pack_into('=ccq', self.buf, self.wpos, BSER_ARRAY, BSER_INT64, val_len)
+                struct.pack_into(b'=ccq', self.buf, self.wpos, BSER_ARRAY,
+                    BSER_INT64, val_len)
             else:
                 raise RuntimeError('Cannot represent this sequence value')
             self.wpos += needed
@@ -181,56 +232,18 @@
             raise RuntimeError('Cannot represent unknown value type')
 
 
-def dumps(obj):
-    bser_buf = _bser_buffer()
+def dumps(obj, version=1, capabilities=0):
+    bser_buf = _bser_buffer(version=version)
     bser_buf.append_recursive(obj)
     # Now fill in the overall length
-    obj_len = bser_buf.wpos - len(EMPTY_HEADER)
-    struct.pack_into('=i', bser_buf.buf, 3, obj_len)
-    return bser_buf.buf.raw[:bser_buf.wpos]
-
-
-def _bunser_int(buf, pos):
-    try:
-        int_type = buf[pos]
-    except IndexError:
-        raise ValueError('Invalid bser int encoding, pos out of range')
-    if int_type == BSER_INT8:
-        needed = 2
-        fmt = '=b'
-    elif int_type == BSER_INT16:
-        needed = 3
-        fmt = '=h'
-    elif int_type == BSER_INT32:
-        needed = 5
-        fmt = '=i'
-    elif int_type == BSER_INT64:
-        needed = 9
-        fmt = '=q'
+    if version == 1:
+        obj_len = bser_buf.wpos - len(EMPTY_HEADER)
+        struct.pack_into(b'=i', bser_buf.buf, 3, obj_len)
     else:
-        raise ValueError('Invalid bser int encoding 0x%02x' % int(int_type))
-    int_val = struct.unpack_from(fmt, buf, pos + 1)[0]
-    return (int_val, pos + needed)
-
-
-def _bunser_string(buf, pos):
-    str_len, pos = _bunser_int(buf, pos + 1)
-    str_val = struct.unpack_from(str(str_len) + 's', buf, pos)[0]
-    return (str_val, pos + str_len)
-
-
-def _bunser_array(buf, pos, mutable=True):
-    arr_len, pos = _bunser_int(buf, pos + 1)
-    arr = []
-    for i in range(arr_len):
-        arr_item, pos = _bser_loads_recursive(buf, pos, mutable)
-        arr.append(arr_item)
-
-    if not mutable:
-      arr = tuple(arr)
-
-    return arr, pos
-
+        obj_len = bser_buf.wpos - len(EMPTY_HEADER_V2)
+        struct.pack_into(b'=i', bser_buf.buf, 2, capabilities)
+        struct.pack_into(b'=i', bser_buf.buf, 7, obj_len)
+    return bser_buf.buf.raw[:bser_buf.wpos]
 
 # This is a quack-alike with the bserObjectType in bser.c
 # It provides by getattr accessors and getitem for both index
@@ -260,100 +273,212 @@
     def __len__(self):
         return len(self._keys)
 
-def _bunser_object(buf, pos, mutable=True):
-    obj_len, pos = _bunser_int(buf, pos + 1)
-    if mutable:
-        obj = {}
-    else:
-        keys = []
-        vals = []
+class Bunser(object):
+    def __init__(self, mutable=True, value_encoding=None, value_errors=None):
+        self.mutable = mutable
+        self.value_encoding = value_encoding
+
+        if value_encoding is None:
+            self.value_errors = None
+        elif value_errors is None:
+            self.value_errors = 'strict'
+        else:
+            self.value_errors = value_errors
 
-    for i in range(obj_len):
-        key, pos = _bunser_string(buf, pos)
-        val, pos = _bser_loads_recursive(buf, pos, mutable)
-        if mutable:
-            obj[key] = val
+    @staticmethod
+    def unser_int(buf, pos):
+        try:
+            int_type = _buf_pos(buf, pos)
+        except IndexError:
+            raise ValueError('Invalid bser int encoding, pos out of range')
+        if int_type == BSER_INT8:
+            needed = 2
+            fmt = b'=b'
+        elif int_type == BSER_INT16:
+            needed = 3
+            fmt = b'=h'
+        elif int_type == BSER_INT32:
+            needed = 5
+            fmt = b'=i'
+        elif int_type == BSER_INT64:
+            needed = 9
+            fmt = b'=q'
         else:
-            keys.append(key)
-            vals.append(val)
+            raise ValueError('Invalid bser int encoding 0x%s' %
+                             binascii.hexlify(int_type).decode('ascii'))
+        int_val = struct.unpack_from(fmt, buf, pos + 1)[0]
+        return (int_val, pos + needed)
 
-    if not mutable:
-        obj = _BunserDict(keys, vals)
-
-    return obj, pos
-
+    def unser_utf8_string(self, buf, pos):
+        str_len, pos = self.unser_int(buf, pos + 1)
+        str_val = struct.unpack_from(tobytes(str_len) + b's', buf, pos)[0]
+        return (str_val.decode('utf-8'), pos + str_len)
 
-def _bunser_template(buf, pos, mutable=True):
-    if buf[pos + 1] != BSER_ARRAY:
-        raise RuntimeError('Expect ARRAY to follow TEMPLATE')
-    keys, pos = _bunser_array(buf, pos + 1)
-    nitems, pos = _bunser_int(buf, pos)
-    arr = []
-    for i in range(nitems):
-        if mutable:
+    def unser_bytestring(self, buf, pos):
+        str_len, pos = self.unser_int(buf, pos + 1)
+        str_val = struct.unpack_from(tobytes(str_len) + b's', buf, pos)[0]
+        if self.value_encoding is not None:
+            str_val = str_val.decode(self.value_encoding, self.value_errors)
+            # str_len stays the same because that's the length in bytes
+        return (str_val, pos + str_len)
+
+    def unser_array(self, buf, pos):
+        arr_len, pos = self.unser_int(buf, pos + 1)
+        arr = []
+        for i in range(arr_len):
+            arr_item, pos = self.loads_recursive(buf, pos)
+            arr.append(arr_item)
+
+        if not self.mutable:
+          arr = tuple(arr)
+
+        return arr, pos
+
+    def unser_object(self, buf, pos):
+        obj_len, pos = self.unser_int(buf, pos + 1)
+        if self.mutable:
             obj = {}
         else:
+            keys = []
             vals = []
 
-        for keyidx in range(len(keys)):
-            if buf[pos] == BSER_SKIP:
-                pos += 1
-                ele = None
+        for i in range(obj_len):
+            key, pos = self.unser_utf8_string(buf, pos)
+            val, pos = self.loads_recursive(buf, pos)
+            if self.mutable:
+                obj[key] = val
             else:
-                ele, pos = _bser_loads_recursive(buf, pos, mutable)
+                keys.append(key)
+                vals.append(val)
 
-            if mutable:
-                key = keys[keyidx]
-                obj[key] = ele
-            else:
-                vals.append(ele)
-
-        if not mutable:
+        if not self.mutable:
             obj = _BunserDict(keys, vals)
 
-        arr.append(obj)
-    return arr, pos
+        return obj, pos
+
+    def unser_template(self, buf, pos):
+        val_type = _buf_pos(buf, pos + 1)
+        if val_type != BSER_ARRAY:
+            raise RuntimeError('Expect ARRAY to follow TEMPLATE')
+        # force UTF-8 on keys
+        keys_bunser = Bunser(mutable=self.mutable, value_encoding='utf-8')
+        keys, pos = keys_bunser.unser_array(buf, pos + 1)
+        nitems, pos = self.unser_int(buf, pos)
+        arr = []
+        for i in range(nitems):
+            if self.mutable:
+                obj = {}
+            else:
+                vals = []
+
+            for keyidx in range(len(keys)):
+                if _buf_pos(buf, pos) == BSER_SKIP:
+                    pos += 1
+                    ele = None
+                else:
+                    ele, pos = self.loads_recursive(buf, pos)
+
+                if self.mutable:
+                    key = keys[keyidx]
+                    obj[key] = ele
+                else:
+                    vals.append(ele)
+
+            if not self.mutable:
+                obj = _BunserDict(keys, vals)
+
+            arr.append(obj)
+        return arr, pos
+
+    def loads_recursive(self, buf, pos):
+        val_type = _buf_pos(buf, pos)
+        if (val_type == BSER_INT8 or val_type == BSER_INT16 or
+            val_type == BSER_INT32 or val_type == BSER_INT64):
+            return self.unser_int(buf, pos)
+        elif val_type == BSER_REAL:
+            val = struct.unpack_from(b'=d', buf, pos + 1)[0]
+            return (val, pos + 9)
+        elif val_type == BSER_TRUE:
+            return (True, pos + 1)
+        elif val_type == BSER_FALSE:
+            return (False, pos + 1)
+        elif val_type == BSER_NULL:
+            return (None, pos + 1)
+        elif val_type == BSER_BYTESTRING:
+            return self.unser_bytestring(buf, pos)
+        elif val_type == BSER_UTF8STRING:
+            return self.unser_utf8_string(buf, pos)
+        elif val_type == BSER_ARRAY:
+            return self.unser_array(buf, pos)
+        elif val_type == BSER_OBJECT:
+            return self.unser_object(buf, pos)
+        elif val_type == BSER_TEMPLATE:
+            return self.unser_template(buf, pos)
+        else:
+            raise ValueError('unhandled bser opcode 0x%s' %
+                             binascii.hexlify(val_type).decode('ascii'))
 
 
-def _bser_loads_recursive(buf, pos, mutable=True):
-    val_type = buf[pos]
-    if (val_type == BSER_INT8 or val_type == BSER_INT16 or
-        val_type == BSER_INT32 or val_type == BSER_INT64):
-        return _bunser_int(buf, pos)
-    elif val_type == BSER_REAL:
-        val = struct.unpack_from('=d', buf, pos + 1)[0]
-        return (val, pos + 9)
-    elif val_type == BSER_TRUE:
-        return (True, pos + 1)
-    elif val_type == BSER_FALSE:
-        return (False, pos + 1)
-    elif val_type == BSER_NULL:
-        return (None, pos + 1)
-    elif val_type == BSER_STRING:
-        return _bunser_string(buf, pos)
-    elif val_type == BSER_ARRAY:
-        return _bunser_array(buf, pos, mutable)
-    elif val_type == BSER_OBJECT:
-        return _bunser_object(buf, pos, mutable)
-    elif val_type == BSER_TEMPLATE:
-        return _bunser_template(buf, pos, mutable)
+def _pdu_info_helper(buf):
+    bser_version = -1
+    if buf[0:2] == EMPTY_HEADER[0:2]:
+        bser_version = 1
+        bser_capabilities = 0
+        expected_len, pos2 = Bunser.unser_int(buf, 2)
+    elif buf[0:2] == EMPTY_HEADER_V2[0:2]:
+        if len(buf) < 8:
+            raise ValueError('Invalid BSER header')
+        bser_version = 2
+        bser_capabilities = struct.unpack_from("I", buf, 2)[0]
+        expected_len, pos2 = Bunser.unser_int(buf, 6)
     else:
-        raise RuntimeError('unhandled bser opcode 0x%02x' % (val_type,))
+        raise ValueError('Invalid BSER header')
+
+    return bser_version, bser_capabilities, expected_len, pos2
+
+
+def pdu_info(buf):
+    info = _pdu_info_helper(buf)
+    return info[0], info[1], info[2] + info[3]
 
 
 def pdu_len(buf):
-    if buf[0:2] != EMPTY_HEADER[0:2]:
-        raise RuntimeError('Invalid BSER header')
-    expected_len, pos = _bunser_int(buf, 2)
-    return expected_len + pos
+    info = _pdu_info_helper(buf)
+    return info[2] + info[3]
 
 
-def loads(buf, mutable=True):
-    if buf[0:2] != EMPTY_HEADER[0:2]:
-        raise RuntimeError('Invalid BSER header')
-    expected_len, pos = _bunser_int(buf, 2)
+def loads(buf, mutable=True, value_encoding=None, value_errors=None):
+    """Deserialize a BSER-encoded blob.
+
+    @param buf: The buffer to deserialize.
+    @type buf: bytes
+
+    @param mutable: Whether to return mutable results.
+    @type mutable: bool
+
+    @param value_encoding: Optional codec to use to decode values. If
+                           unspecified or None, return values as bytestrings.
+    @type value_encoding: str
+
+    @param value_errors: Optional error handler for codec. 'strict' by default.
+                         The other most common argument is 'surrogateescape' on
+                         Python 3. If value_encoding is None, this is ignored.
+    @type value_errors: str
+    """
+
+    info = _pdu_info_helper(buf)
+    expected_len = info[2]
+    pos = info[3]
+
     if len(buf) != expected_len + pos:
-        raise RuntimeError('bser data len != header len')
-    return _bser_loads_recursive(buf, pos, mutable)[0]
+        raise ValueError('bser data len != header len')
+
+    bunser = Bunser(mutable=mutable, value_encoding=value_encoding,
+                    value_errors=value_errors)
 
-# no-check-code -- this is a 3rd party library
+    return bunser.loads_recursive(buf, pos)[0]
+
+
+def load(fp, mutable=True, value_encoding=None, value_errors=None):
+    from . import load
+    return load.load(fp, mutable, value_encoding, value_errors)
--- a/hgext/fsmonitor/state.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/fsmonitor/state.py	Wed Jan 18 11:43:36 2017 -0500
@@ -59,6 +59,12 @@
             state = file.read().split('\0')
             # state = hostname\0clock\0ignorehash\0 + list of files, each
             # followed by a \0
+            if len(state) < 3:
+                self._ui.log(
+                    'fsmonitor', 'fsmonitor: state file truncated (expected '
+                    '3 chunks, found %d), nuking state\n', len(state))
+                self.invalidate()
+                return None, None, None
             diskhostname = state[0]
             hostname = socket.gethostname()
             if diskhostname != hostname:
@@ -85,12 +91,12 @@
             return
 
         try:
-            file = self._opener('fsmonitor.state', 'wb')
+            file = self._opener('fsmonitor.state', 'wb', atomictemp=True)
         except (IOError, OSError):
             self._ui.warn(_("warning: unable to write out fsmonitor state\n"))
             return
 
-        try:
+        with file:
             file.write(struct.pack(_versionformat, _version))
             file.write(socket.gethostname() + '\0')
             file.write(clock + '\0')
@@ -98,8 +104,6 @@
             if notefiles:
                 file.write('\0'.join(notefiles))
                 file.write('\0')
-        finally:
-            file.close()
 
     def invalidate(self):
         try:
--- a/hgext/fsmonitor/watchmanclient.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/fsmonitor/watchmanclient.py	Wed Jan 18 11:43:36 2017 -0500
@@ -87,7 +87,7 @@
                     useImmutableBser=True)
             return self._watchmanclient.query(*watchmanargs)
         except pywatchman.CommandError as ex:
-            if ex.msg.startswith('unable to resolve root'):
+            if 'unable to resolve root' in ex.msg:
                 raise WatchmanNoRoot(self._root, ex.msg)
             raise Unavailable(ex.msg)
         except pywatchman.WatchmanError as ex:
--- a/hgext/histedit.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/histedit.py	Wed Jan 18 11:43:36 2017 -0500
@@ -173,7 +173,6 @@
 
 import errno
 import os
-import sys
 
 from mercurial.i18n import _
 from mercurial import (
@@ -991,9 +990,9 @@
         return goaleditplan
     return goalnew
 
-def _readfile(path):
+def _readfile(ui, path):
     if path == '-':
-        return sys.stdin.read()
+        return ui.fin.read()
     else:
         with open(path, 'rb') as f:
             return f.read()
@@ -1191,7 +1190,7 @@
                                  node.short(state.topmost))
         rules = ruleeditor(repo, ui, state.actions, comment)
     else:
-        rules = _readfile(rules)
+        rules = _readfile(ui, rules)
     actions = parserules(rules, state)
     ctxs = [repo[act.node] \
             for act in state.actions if act.node]
@@ -1232,7 +1231,7 @@
         actions = [pick(state, r) for r in revs]
         rules = ruleeditor(repo, ui, actions, comment)
     else:
-        rules = _readfile(rules)
+        rules = _readfile(ui, rules)
     actions = parserules(rules, state)
     warnverifyactions(ui, repo, actions, state, ctxs)
 
@@ -1335,7 +1334,8 @@
     rules = '\n'.join([act.torule() for act in actions])
     rules += '\n\n'
     rules += editcomment
-    rules = ui.edit(rules, ui.username(), {'prefix': 'histedit'})
+    rules = ui.edit(rules, ui.username(), {'prefix': 'histedit'},
+                    tmpdir=repo.path)
 
     # Save edit rules in .hg/histedit-last-edit.txt in case
     # the user needs to ask for help after something
@@ -1406,12 +1406,12 @@
                        % node.short(missing[0]))
 
 def adjustreplacementsfrommarkers(repo, oldreplacements):
-    """Adjust replacements from obsolescense markers
+    """Adjust replacements from obsolescence markers
 
     Replacements structure is originally generated based on
     histedit's state and does not account for changes that are
     not recorded there. This function fixes that by adding
-    data read from obsolescense markers"""
+    data read from obsolescence markers"""
     if not obsolete.isenabled(repo, obsolete.createmarkersopt):
         return oldreplacements
 
--- a/hgext/keyword.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/keyword.py	Wed Jan 18 11:43:36 2017 -0500
@@ -737,6 +737,8 @@
             return ret
 
     def kwfilectx_cmp(orig, self, fctx):
+        if fctx._customcmp:
+            return fctx.cmp(self)
         # keyword affects data size, comparing wdir and filelog size does
         # not make sense
         if (fctx._filenode is None and
--- a/hgext/largefiles/lfcommands.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/largefiles/lfcommands.py	Wed Jan 18 11:43:36 2017 -0500
@@ -510,18 +510,21 @@
                 lfdirstate.normal(lfile)
                 update1 = 1
 
-            # copy the state of largefile standin from the repository's
+            # copy the exec mode of largefile standin from the repository's
             # dirstate to its state in the lfdirstate.
             rellfile = lfile
             relstandin = lfutil.standin(lfile)
             if wvfs.exists(relstandin):
+                # exec is decided by the users permissions using mask 0o100
                 standinexec = wvfs.stat(relstandin).st_mode & 0o100
-                st = wvfs.stat(rellfile).st_mode
-                if standinexec != st & 0o100:
-                    st &= ~0o111
+                st = wvfs.stat(rellfile)
+                mode = st.st_mode
+                if standinexec != mode & 0o100:
+                    # first remove all X bits, then shift all R bits to X
+                    mode &= ~0o111
                     if standinexec:
-                        st |= (st >> 2) & 0o111 & ~util.umask
-                    wvfs.chmod(rellfile, st)
+                        mode |= (mode >> 2) & 0o111 & ~util.umask
+                    wvfs.chmod(rellfile, mode)
                     update1 = 1
 
             updated += update1
--- a/hgext/largefiles/lfutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/largefiles/lfutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -19,10 +19,12 @@
 
 from mercurial import (
     dirstate,
+    encoding,
     error,
     httpconnection,
     match as matchmod,
     node,
+    pycompat,
     scmutil,
     util,
 )
@@ -72,23 +74,25 @@
     path = ui.configpath(longname, 'usercache', None)
     if path:
         return path
-    if os.name == 'nt':
-        appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
+    if pycompat.osname == 'nt':
+        appdata = encoding.environ.get('LOCALAPPDATA',\
+                        encoding.environ.get('APPDATA'))
         if appdata:
             return os.path.join(appdata, longname)
     elif platform.system() == 'Darwin':
-        home = os.getenv('HOME')
+        home = encoding.environ.get('HOME')
         if home:
             return os.path.join(home, 'Library', 'Caches', longname)
-    elif os.name == 'posix':
-        path = os.getenv('XDG_CACHE_HOME')
+    elif pycompat.osname == 'posix':
+        path = encoding.environ.get('XDG_CACHE_HOME')
         if path:
             return os.path.join(path, longname)
-        home = os.getenv('HOME')
+        home = encoding.environ.get('HOME')
         if home:
             return os.path.join(home, '.cache', longname)
     else:
-        raise error.Abort(_('unknown operating system: %s\n') % os.name)
+        raise error.Abort(_('unknown operating system: %s\n')
+                          % pycompat.osname)
     raise error.Abort(_('unknown %s usercache location') % longname)
 
 def inusercache(ui, hash):
--- a/hgext/largefiles/proto.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/largefiles/proto.py	Wed Jan 18 11:43:36 2017 -0500
@@ -76,7 +76,7 @@
         yield '%d\n' % length
         for chunk in util.filechunkiter(f):
             yield chunk
-    return wireproto.streamres(generator())
+    return wireproto.streamres(gen=generator())
 
 def statlfile(repo, proto, sha):
     '''Server command for checking if a largefile is present - returns '2\n' if
--- a/hgext/logtoprocess.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/logtoprocess.py	Wed Jan 18 11:43:36 2017 -0500
@@ -27,7 +27,7 @@
 
 would log the warning message and traceback of any failed command dispatch.
 
-Scripts are run asychronously as detached daemon processes; mercurial will
+Scripts are run asynchronously as detached daemon processes; mercurial will
 not ensure that they exit cleanly.
 
 """
@@ -40,6 +40,8 @@
 import subprocess
 import sys
 
+from mercurial import encoding
+
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
@@ -117,7 +119,7 @@
                 optpairs = (
                     ('OPT_{0}'.format(key.upper()), str(value))
                     for key, value in opts.iteritems())
-                env = dict(itertools.chain(os.environ.items(),
+                env = dict(itertools.chain(encoding.environ.items(),
                                            msgpairs, optpairs),
                            EVENT=event, HGPID=str(os.getpid()))
                 # Connect stdin to /dev/null to prevent child processes messing
--- a/hgext/mq.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/mq.py	Wed Jan 18 11:43:36 2017 -0500
@@ -79,7 +79,7 @@
 from mercurial import (
     cmdutil,
     commands,
-    dispatch,
+    dirstateguard,
     error,
     extensions,
     hg,
@@ -87,6 +87,7 @@
     lock as lockmod,
     patch as patchmod,
     phases,
+    pycompat,
     registrar,
     revset,
     scmutil,
@@ -1660,7 +1661,7 @@
             # caching against the next repo.status call
             mm, aa, dd = repo.status(patchparent, top)[:3]
             changes = repo.changelog.read(top)
-            man = repo.manifest.read(changes[0])
+            man = repo.manifestlog[changes[0]].read()
             aaa = aa[:]
             matchfn = scmutil.match(repo[None], pats, opts)
             # in short mode, we only diff the files included in the
@@ -1725,7 +1726,7 @@
 
             dsguard = None
             try:
-                dsguard = cmdutil.dirstateguard(repo, 'mq.refresh')
+                dsguard = dirstateguard.dirstateguard(repo, 'mq.refresh')
                 if diffopts.git or diffopts.upgrade:
                     copies = {}
                     for dst in a:
@@ -3523,7 +3524,7 @@
             raise error.Abort(_('only a local queue repository '
                                'may be initialized'))
     else:
-        repopath = cmdutil.findrepo(os.getcwd())
+        repopath = cmdutil.findrepo(pycompat.getcwd())
         if not repopath:
             raise error.Abort(_('there is no Mercurial repository here '
                                '(.hg not found)'))
@@ -3588,7 +3589,7 @@
         for cmd, entry in cmdtable.iteritems():
             cmd = cmdutil.parsealiases(cmd)[0]
             func = entry[0]
-            if dispatch._cmdattr(ui, cmd, func, 'norepo'):
+            if func.norepo:
                 continue
             entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
             entry[1].extend(mqopt)
--- a/hgext/pager.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/pager.py	Wed Jan 18 11:43:36 2017 -0500
@@ -71,6 +71,7 @@
     cmdutil,
     commands,
     dispatch,
+    encoding,
     extensions,
     util,
     )
@@ -84,19 +85,19 @@
 def _runpager(ui, p):
     pager = subprocess.Popen(p, shell=True, bufsize=-1,
                              close_fds=util.closefds, stdin=subprocess.PIPE,
-                             stdout=sys.stdout, stderr=sys.stderr)
+                             stdout=util.stdout, stderr=util.stderr)
 
     # back up original file objects and descriptors
     olduifout = ui.fout
-    oldstdout = sys.stdout
-    stdoutfd = os.dup(sys.stdout.fileno())
-    stderrfd = os.dup(sys.stderr.fileno())
+    oldstdout = util.stdout
+    stdoutfd = os.dup(util.stdout.fileno())
+    stderrfd = os.dup(util.stderr.fileno())
 
     # create new line-buffered stdout so that output can show up immediately
-    ui.fout = sys.stdout = newstdout = os.fdopen(sys.stdout.fileno(), 'wb', 1)
-    os.dup2(pager.stdin.fileno(), sys.stdout.fileno())
-    if ui._isatty(sys.stderr):
-        os.dup2(pager.stdin.fileno(), sys.stderr.fileno())
+    ui.fout = util.stdout = newstdout = os.fdopen(util.stdout.fileno(), 'wb', 1)
+    os.dup2(pager.stdin.fileno(), util.stdout.fileno())
+    if ui._isatty(util.stderr):
+        os.dup2(pager.stdin.fileno(), util.stderr.fileno())
 
     @atexit.register
     def killpager():
@@ -104,26 +105,27 @@
             signal.signal(signal.SIGINT, signal.SIG_IGN)
         pager.stdin.close()
         ui.fout = olduifout
-        sys.stdout = oldstdout
+        util.stdout = oldstdout
         # close new stdout while it's associated with pager; otherwise stdout
         # fd would be closed when newstdout is deleted
         newstdout.close()
         # restore original fds: stdout is open again
-        os.dup2(stdoutfd, sys.stdout.fileno())
-        os.dup2(stderrfd, sys.stderr.fileno())
+        os.dup2(stdoutfd, util.stdout.fileno())
+        os.dup2(stderrfd, util.stderr.fileno())
         pager.wait()
 
 def uisetup(ui):
     if '--debugger' in sys.argv or not ui.formatted():
         return
 
-    # chg has its own pager implementation
-    argv = sys.argv[:]
-    if 'chgunix' in dispatch._earlygetopt(['--cmdserver'], argv):
-        return
+    class pagerui(ui.__class__):
+        def _runpager(self, pagercmd):
+            _runpager(self, pagercmd)
+
+    ui.__class__ = pagerui
 
     def pagecmd(orig, ui, options, cmd, cmdfunc):
-        p = ui.config("pager", "pager", os.environ.get("PAGER"))
+        p = ui.config("pager", "pager", encoding.environ.get("PAGER"))
         usepager = False
         always = util.parsebool(options['pager'])
         auto = options['pager'] == 'auto'
@@ -156,7 +158,7 @@
             ui.setconfig('ui', 'interactive', False, 'pager')
             if util.safehasattr(signal, "SIGPIPE"):
                 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-            _runpager(ui, p)
+            ui._runpager(p)
         return orig(ui, options, cmd, cmdfunc)
 
     # Wrap dispatch._runcommand after color is loaded so color can see
--- a/hgext/patchbomb.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/patchbomb.py	Wed Jan 18 11:43:36 2017 -0500
@@ -75,6 +75,7 @@
 from mercurial import (
     cmdutil,
     commands,
+    encoding,
     error,
     hg,
     mail,
@@ -166,7 +167,7 @@
         while patchlines and not patchlines[0].strip():
             patchlines.pop(0)
 
-    ds = patch.diffstat(patchlines, git=opts.get('git'))
+    ds = patch.diffstat(patchlines)
     if opts.get('diffstat'):
         body += ds + '\n\n'
 
@@ -270,7 +271,7 @@
     else:
         ui.write(_('\nWrite the introductory message for the '
                    'patch series.\n\n'))
-        body = ui.edit(defaultbody, sender)
+        body = ui.edit(defaultbody, sender, tmpdir=repo.path)
         # Save series description in case sendmail fails
         msgfile = repo.vfs('last-email.txt', 'wb')
         msgfile.write(body)
@@ -693,8 +694,8 @@
         if opts.get('test'):
             ui.status(_('displaying '), subj, ' ...\n')
             ui.flush()
-            if 'PAGER' in os.environ and not ui.plain():
-                fp = util.popen(os.environ['PAGER'], 'w')
+            if 'PAGER' in encoding.environ and not ui.plain():
+                fp = util.popen(encoding.environ['PAGER'], 'w')
             else:
                 fp = ui
             generator = emailmod.Generator.Generator(fp, mangle_from_=False)
--- a/hgext/rebase.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/rebase.py	Wed Jan 18 11:43:36 2017 -0500
@@ -32,11 +32,13 @@
     commands,
     copies,
     destutil,
+    dirstateguard,
     error,
     extensions,
     hg,
     lock,
-    merge,
+    merge as mergemod,
+    mergeutil,
     obsolete,
     patch,
     phases,
@@ -482,19 +484,20 @@
             ui.note(_("update back to initial working directory parent\n"))
             hg.updaterepo(repo, newwd, False)
 
+        if self.currentbookmarks:
+            with repo.transaction('bookmark') as tr:
+                updatebookmarks(repo, targetnode, nstate,
+                                self.currentbookmarks, tr)
+                if self.activebookmark not in repo._bookmarks:
+                    # active bookmark was divergent one and has been deleted
+                    self.activebookmark = None
+
         if not self.keepf:
             collapsedas = None
             if self.collapsef:
                 collapsedas = newnode
             clearrebased(ui, repo, self.state, self.skipped, collapsedas)
 
-        with repo.transaction('bookmark') as tr:
-            if self.currentbookmarks:
-                updatebookmarks(repo, targetnode, nstate,
-                                self.currentbookmarks, tr)
-                if self.activebookmark not in repo._bookmarks:
-                    # active bookmark was divergent one and has been deleted
-                    self.activebookmark = None
         clearstatus(repo)
         clearcollapsemsg(repo)
 
@@ -661,6 +664,9 @@
                     _('abort and continue do not allow specifying revisions'))
             if abortf and opts.get('tool', False):
                 ui.warn(_('tool option will be ignored\n'))
+            if contf:
+                ms = mergemod.mergestate.read(repo)
+                mergeutil.checkunresolved(ms)
 
             retcode = rbsrt._prepareabortorcontinue(abortf)
             if retcode is not None:
@@ -718,12 +724,19 @@
             dest = repo[_destrebase(repo, base, destspace=destspace)]
             destf = str(dest)
 
-        commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first()
-        if commonanc is not None:
-            rebaseset = repo.revs('(%d::(%ld) - %d)::',
-                                  commonanc, base, commonanc)
-        else:
-            rebaseset = []
+        roots = [] # selected children of branching points
+        bpbase = {} # {branchingpoint: [origbase]}
+        for b in base: # group bases by branching points
+            bp = repo.revs('ancestor(%d, %d)', b, dest).first()
+            bpbase[bp] = bpbase.get(bp, []) + [b]
+        if None in bpbase:
+            # emulate the old behavior, showing "nothing to rebase" (a better
+            # behavior may be abort with "cannot find branching point" error)
+            bpbase.clear()
+        for bp, bs in bpbase.iteritems(): # calculate roots
+            roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
+
+        rebaseset = repo.revs('%ld::', roots)
 
         if not rebaseset:
             # transform to list because smartsets are not comparable to
@@ -786,7 +799,7 @@
     '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
     but also store useful information in extra.
     Return node of committed revision.'''
-    dsguard = cmdutil.dirstateguard(repo, 'rebase')
+    dsguard = dirstateguard.dirstateguard(repo, 'rebase')
     try:
         repo.setparents(repo[p1].node(), repo[p2].node())
         ctx = repo[rev]
@@ -823,7 +836,7 @@
     # Update to target and merge it with local
     if repo['.'].rev() != p1:
         repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
-        merge.update(repo, p1, False, True)
+        mergemod.update(repo, p1, False, True)
     else:
         repo.ui.debug(" already in target\n")
     repo.dirstate.write(repo.currenttransaction())
@@ -832,8 +845,8 @@
         repo.ui.debug("   detach base %d:%s\n" % (base, repo[base]))
     # When collapsing in-place, the parent is the common ancestor, we
     # have to allow merging with it.
-    stats = merge.update(repo, rev, True, True, base, collapse,
-                        labels=['dest', 'source'])
+    stats = mergemod.update(repo, rev, True, True, base, collapse,
+                            labels=['dest', 'source'])
     if collapse:
         copies.duplicatecopies(repo, rev, target)
     else:
@@ -855,10 +868,7 @@
     else:
         return None
 
-def _checkobsrebase(repo, ui,
-                                  rebaseobsrevs,
-                                  rebasesetrevs,
-                                  rebaseobsskipped):
+def _checkobsrebase(repo, ui, rebaseobsrevs, rebasesetrevs, rebaseobsskipped):
     """
     Abort if rebase will create divergence or rebase is noop because of markers
 
@@ -1150,7 +1160,7 @@
 
             # Update away from the rebase if necessary
             if shouldupdate or needupdate(repo, state):
-                merge.update(repo, originalwd, False, True)
+                mergemod.update(repo, originalwd, False, True)
 
             # Strip from the first rebased revision
             if rebased:
@@ -1306,6 +1316,10 @@
                 ui.debug('--update and --rebase are not compatible, ignoring '
                          'the update flag\n')
 
+            cmdutil.checkunfinished(repo)
+            cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
+                'please commit or shelve your changes first'))
+
             revsprepull = len(repo)
             origpostincoming = commands.postincoming
             def _dummy(*args, **kwargs):
@@ -1382,7 +1396,7 @@
     """return a mapping obsolete => successor for all obsolete nodes to be
     rebased that have a successors in the destination
 
-    obsolete => None entries in the mapping indicate nodes with no succesor"""
+    obsolete => None entries in the mapping indicate nodes with no successor"""
     obsoletenotrebased = {}
 
     # Build a mapping successor => obsolete nodes for the obsolete
@@ -1414,7 +1428,7 @@
     return obsoletenotrebased
 
 def summaryhook(ui, repo):
-    if not os.path.exists(repo.join('rebasestate')):
+    if not repo.vfs.exists('rebasestate'):
         return
     try:
         rbsrt = rebaseruntime(repo, ui, {})
--- a/hgext/schemes.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/schemes.py	Wed Jan 18 11:43:36 2017 -0500
@@ -50,6 +50,7 @@
     error,
     extensions,
     hg,
+    pycompat,
     templater,
     util,
 )
@@ -114,7 +115,7 @@
     schemes.update(dict(ui.configitems('schemes')))
     t = templater.engine(lambda x: x)
     for scheme, url in schemes.items():
-        if (os.name == 'nt' and len(scheme) == 1 and scheme.isalpha()
+        if (pycompat.osname == 'nt' and len(scheme) == 1 and scheme.isalpha()
             and os.path.exists('%s:\\' % scheme)):
             raise error.Abort(_('custom scheme %s:// conflicts with drive '
                                'letter %s:\\\n') % (scheme, scheme.upper()))
--- a/hgext/shelve.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/shelve.py	Wed Jan 18 11:43:36 2017 -0500
@@ -62,6 +62,13 @@
 
 backupdir = 'shelve-backup'
 shelvedir = 'shelved'
+shelvefileextensions = ['hg', 'patch']
+# universal extension is present in all types of shelves
+patchextension = 'patch'
+
+# we never need the user, so we use a
+# generic user for all shelve operations
+shelveuser = 'shelve@localhost'
 
 class shelvedfile(object):
     """Helper for the file storing a single shelve
@@ -154,6 +161,8 @@
     """
     _version = 1
     _filename = 'shelvedstate'
+    _keep = 'keep'
+    _nokeep = 'nokeep'
 
     @classmethod
     def load(cls, repo):
@@ -170,6 +179,7 @@
             parents = [nodemod.bin(h) for h in fp.readline().split()]
             stripnodes = [nodemod.bin(h) for h in fp.readline().split()]
             branchtorestore = fp.readline().strip()
+            keep = fp.readline().strip() == cls._keep
         except (ValueError, TypeError) as err:
             raise error.CorruptedState(str(err))
         finally:
@@ -183,6 +193,7 @@
             obj.parents = parents
             obj.stripnodes = stripnodes
             obj.branchtorestore = branchtorestore
+            obj.keep = keep
         except error.RepoLookupError as err:
             raise error.CorruptedState(str(err))
 
@@ -190,7 +201,7 @@
 
     @classmethod
     def save(cls, repo, name, originalwctx, pendingctx, stripnodes,
-             branchtorestore):
+             branchtorestore, keep=False):
         fp = repo.vfs(cls._filename, 'wb')
         fp.write('%i\n' % cls._version)
         fp.write('%s\n' % name)
@@ -201,6 +212,7 @@
         fp.write('%s\n' %
                  ' '.join([nodemod.hex(n) for n in stripnodes]))
         fp.write('%s\n' % branchtorestore)
+        fp.write('%s\n' % (cls._keep if keep else cls._nokeep))
         fp.close()
 
     @classmethod
@@ -210,7 +222,8 @@
 def cleanupoldbackups(repo):
     vfs = scmutil.vfs(repo.join(backupdir))
     maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
-    hgfiles = [f for f in vfs.listdir() if f.endswith('.hg')]
+    hgfiles = [f for f in vfs.listdir()
+               if f.endswith('.' + patchextension)]
     hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
     if 0 < maxbackups and maxbackups < len(hgfiles):
         bordermtime = hgfiles[-maxbackups][0]
@@ -220,8 +233,8 @@
         if mtime == bordermtime:
             # keep it, because timestamp can't decide exact order of backups
             continue
-        base = f[:-3]
-        for ext in 'hg patch'.split():
+        base = f[:-(1 + len(patchextension))]
+        for ext in shelvefileextensions:
             try:
                 vfs.unlink(base + '.' + ext)
             except OSError as err:
@@ -242,24 +255,111 @@
         cmdutil.checkunfinished(repo)
         return _docreatecmd(ui, repo, pats, opts)
 
-def _docreatecmd(ui, repo, pats, opts):
-    def mutableancestors(ctx):
-        """return all mutable ancestors for ctx (included)
+def getshelvename(repo, parent, opts):
+    """Decide on the name this shelve is going to have"""
+    def gennames():
+        yield label
+        for i in xrange(1, 100):
+            yield '%s-%02d' % (label, i)
+    name = opts.get('name')
+    label = repo._activebookmark or parent.branch() or 'default'
+    # slashes aren't allowed in filenames, therefore we rename it
+    label = label.replace('/', '_')
+    label = label.replace('\\', '_')
+    # filenames must not start with '.' as it should not be hidden
+    if label.startswith('.'):
+        label = label.replace('.', '_', 1)
+
+    if name:
+        if shelvedfile(repo, name, patchextension).exists():
+            e = _("a shelved change named '%s' already exists") % name
+            raise error.Abort(e)
+
+        # ensure we are not creating a subdirectory or a hidden file
+        if '/' in name or '\\' in name:
+            raise error.Abort(_('shelved change names can not contain slashes'))
+        if name.startswith('.'):
+            raise error.Abort(_("shelved change names can not start with '.'"))
+
+    else:
+        for n in gennames():
+            if not shelvedfile(repo, n, patchextension).exists():
+                name = n
+                break
+        else:
+            raise error.Abort(_("too many shelved changes named '%s'") % label)
+
+    return name
+
+def mutableancestors(ctx):
+    """return all mutable ancestors for ctx (included)
+
+    Much faster than the revset ancestors(ctx) & draft()"""
+    seen = set([nodemod.nullrev])
+    visit = collections.deque()
+    visit.append(ctx)
+    while visit:
+        ctx = visit.popleft()
+        yield ctx.node()
+        for parent in ctx.parents():
+            rev = parent.rev()
+            if rev not in seen:
+                seen.add(rev)
+                if parent.mutable():
+                    visit.append(parent)
 
-        Much faster than the revset ancestors(ctx) & draft()"""
-        seen = set([nodemod.nullrev])
-        visit = collections.deque()
-        visit.append(ctx)
-        while visit:
-            ctx = visit.popleft()
-            yield ctx.node()
-            for parent in ctx.parents():
-                rev = parent.rev()
-                if rev not in seen:
-                    seen.add(rev)
-                    if parent.mutable():
-                        visit.append(parent)
+def getcommitfunc(extra, interactive, editor=False):
+    def commitfunc(ui, repo, message, match, opts):
+        hasmq = util.safehasattr(repo, 'mq')
+        if hasmq:
+            saved, repo.mq.checkapplied = repo.mq.checkapplied, False
+        backup = repo.ui.backupconfig('phases', 'new-commit')
+        try:
+            repo.ui.setconfig('phases', 'new-commit', phases.secret)
+            editor_ = False
+            if editor:
+                editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
+                                                  **opts)
+            return repo.commit(message, shelveuser, opts.get('date'), match,
+                               editor=editor_, extra=extra)
+        finally:
+            repo.ui.restoreconfig(backup)
+            if hasmq:
+                repo.mq.checkapplied = saved
+
+    def interactivecommitfunc(ui, repo, *pats, **opts):
+        match = scmutil.match(repo['.'], pats, {})
+        message = opts['message']
+        return commitfunc(ui, repo, message, match, opts)
+
+    return interactivecommitfunc if interactive else commitfunc
 
+def _nothingtoshelvemessaging(ui, repo, pats, opts):
+    stat = repo.status(match=scmutil.match(repo[None], pats, opts))
+    if stat.deleted:
+        ui.status(_("nothing changed (%d missing files, see "
+                    "'hg status')\n") % len(stat.deleted))
+    else:
+        ui.status(_("nothing changed\n"))
+
+def _shelvecreatedcommit(repo, node, name):
+    bases = list(mutableancestors(repo[node]))
+    shelvedfile(repo, name, 'hg').writebundle(bases, node)
+    cmdutil.export(repo, [node],
+                   fp=shelvedfile(repo, name, patchextension).opener('wb'),
+                   opts=mdiff.diffopts(git=True))
+
+def _includeunknownfiles(repo, pats, opts, extra):
+    s = repo.status(match=scmutil.match(repo[None], pats, opts),
+                    unknown=True)
+    if s.unknown:
+        extra['shelve_unknown'] = '\0'.join(s.unknown)
+        repo[None].add(s.unknown)
+
+def _finishshelve(repo):
+    _aborttransaction(repo)
+
+def _docreatecmd(ui, repo, pats, opts):
     wctx = repo[None]
     parents = wctx.parents()
     if len(parents) > 1:
@@ -267,18 +367,6 @@
     parent = parents[0]
     origbranch = wctx.branch()
 
-    # we never need the user, so we use a generic user for all shelve operations
-    user = 'shelve@localhost'
-    label = repo._activebookmark or parent.branch() or 'default'
-
-    # slashes aren't allowed in filenames, therefore we rename it
-    label = label.replace('/', '_')
-
-    def gennames():
-        yield label
-        for i in xrange(1, 100):
-            yield '%s-%02d' % (label, i)
-
     if parent.node() != nodemod.nullid:
         desc = "changes to: %s" % parent.description().split('\n', 1)[0]
     else:
@@ -287,8 +375,6 @@
     if not opts.get('message'):
         opts['message'] = desc
 
-    name = opts.get('name')
-
     lock = tr = None
     try:
         lock = repo.lock()
@@ -297,81 +383,31 @@
         # pull races. ensure we don't print the abort message to stderr.
         tr = repo.transaction('commit', report=lambda x: None)
 
-        if name:
-            if shelvedfile(repo, name, 'hg').exists():
-                raise error.Abort(_("a shelved change named '%s' already exists"
-                                   ) % name)
-        else:
-            for n in gennames():
-                if not shelvedfile(repo, n, 'hg').exists():
-                    name = n
-                    break
-            else:
-                raise error.Abort(_("too many shelved changes named '%s'") %
-                                 label)
-
-        # ensure we are not creating a subdirectory or a hidden file
-        if '/' in name or '\\' in name:
-            raise error.Abort(_('shelved change names may not contain slashes'))
-        if name.startswith('.'):
-            raise error.Abort(_("shelved change names may not start with '.'"))
         interactive = opts.get('interactive', False)
         includeunknown = (opts.get('unknown', False) and
                           not opts.get('addremove', False))
 
-        extra={}
+        name = getshelvename(repo, parent, opts)
+        extra = {}
         if includeunknown:
-            s = repo.status(match=scmutil.match(repo[None], pats, opts),
-                            unknown=True)
-            if s.unknown:
-                extra['shelve_unknown'] = '\0'.join(s.unknown)
-                repo[None].add(s.unknown)
+            _includeunknownfiles(repo, pats, opts, extra)
 
         if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
             # In non-bare shelve we don't store newly created branch
             # at bundled commit
             repo.dirstate.setbranch(repo['.'].branch())
 
-        def commitfunc(ui, repo, message, match, opts):
-            hasmq = util.safehasattr(repo, 'mq')
-            if hasmq:
-                saved, repo.mq.checkapplied = repo.mq.checkapplied, False
-            backup = repo.ui.backupconfig('phases', 'new-commit')
-            try:
-                repo.ui. setconfig('phases', 'new-commit', phases.secret)
-                editor = cmdutil.getcommiteditor(editform='shelve.shelve',
-                                                 **opts)
-                return repo.commit(message, user, opts.get('date'), match,
-                                   editor=editor, extra=extra)
-            finally:
-                repo.ui.restoreconfig(backup)
-                if hasmq:
-                    repo.mq.checkapplied = saved
-
-        def interactivecommitfunc(ui, repo, *pats, **opts):
-            match = scmutil.match(repo['.'], pats, {})
-            message = opts['message']
-            return commitfunc(ui, repo, message, match, opts)
+        commitfunc = getcommitfunc(extra, interactive, editor=True)
         if not interactive:
             node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
         else:
-            node = cmdutil.dorecord(ui, repo, interactivecommitfunc, None,
+            node = cmdutil.dorecord(ui, repo, commitfunc, None,
                                     False, cmdutil.recordfilter, *pats, **opts)
         if not node:
-            stat = repo.status(match=scmutil.match(repo[None], pats, opts))
-            if stat.deleted:
-                ui.status(_("nothing changed (%d missing files, see "
-                            "'hg status')\n") % len(stat.deleted))
-            else:
-                ui.status(_("nothing changed\n"))
+            _nothingtoshelvemessaging(ui, repo, pats, opts)
             return 1
 
-        bases = list(mutableancestors(repo[node]))
-        shelvedfile(repo, name, 'hg').writebundle(bases, node)
-        cmdutil.export(repo, [node],
-                       fp=shelvedfile(repo, name, 'patch').opener('wb'),
-                       opts=mdiff.diffopts(git=True))
-
+        _shelvecreatedcommit(repo, node, name)
 
         if ui.formatted():
             desc = util.ellipsis(desc, ui.termwidth())
@@ -380,7 +416,7 @@
         if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
             repo.dirstate.setbranch(origbranch)
 
-        _aborttransaction(repo)
+        _finishshelve(repo)
     finally:
         lockmod.release(tr, lock)
 
@@ -399,7 +435,7 @@
     with repo.wlock():
         for (name, _type) in repo.vfs.readdir(shelvedir):
             suffix = name.rsplit('.', 1)[-1]
-            if suffix in ('hg', 'patch'):
+            if suffix in shelvefileextensions:
                 shelvedfile(repo, name).movetobackup()
             cleanupoldbackups(repo)
 
@@ -410,8 +446,15 @@
     with repo.wlock():
         try:
             for name in pats:
-                for suffix in 'hg patch'.split():
-                    shelvedfile(repo, name, suffix).movetobackup()
+                for suffix in shelvefileextensions:
+                    shfile = shelvedfile(repo, name, suffix)
+                    # patch file is necessary, as it should
+                    # be present for any kind of shelve,
+                    # but the .hg file is optional as in future we
+                    # will add obsolete shelve with does not create a
+                    # bundle
+                    if shfile.exists() or suffix == patchextension:
+                        shfile.movetobackup()
             cleanupoldbackups(repo)
         except OSError as err:
             if err.errno != errno.ENOENT:
@@ -429,7 +472,7 @@
     info = []
     for (name, _type) in names:
         pfx, sfx = name.rsplit('.', 1)
-        if not pfx or sfx != 'patch':
+        if not pfx or sfx != patchextension:
             continue
         st = shelvedfile(repo, name).stat()
         info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
@@ -457,7 +500,7 @@
         ui.write(age, label='shelve.age')
         ui.write(' ' * (12 - len(age)))
         used += 12
-        with open(name + '.patch', 'rb') as fp:
+        with open(name + '.' + patchextension, 'rb') as fp:
             while True:
                 line = fp.readline()
                 if not line:
@@ -476,18 +519,17 @@
                 for chunk, label in patch.difflabel(iter, difflines):
                     ui.write(chunk, label=label)
             if opts['stat']:
-                for chunk, label in patch.diffstatui(difflines, width=width,
-                                                     git=True):
+                for chunk, label in patch.diffstatui(difflines, width=width):
                     ui.write(chunk, label=label)
 
-def singlepatchcmds(ui, repo, pats, opts, subcommand):
-    """subcommand that displays a single shelf"""
-    if len(pats) != 1:
-        raise error.Abort(_("--%s expects a single shelf") % subcommand)
-    shelfname = pats[0]
+def patchcmds(ui, repo, pats, opts, subcommand):
+    """subcommand that displays shelves"""
+    if len(pats) == 0:
+        raise error.Abort(_("--%s expects at least one shelf") % subcommand)
 
-    if not shelvedfile(repo, shelfname, 'patch').exists():
-        raise error.Abort(_("cannot find shelf %s") % shelfname)
+    for shelfname in pats:
+        if not shelvedfile(repo, shelfname, patchextension).exists():
+            raise error.Abort(_("cannot find shelf %s") % shelfname)
 
     listcmd(ui, repo, pats, opts)
 
@@ -557,8 +599,10 @@
 def unshelvecleanup(ui, repo, name, opts):
     """remove related files after an unshelve"""
     if not opts.get('keep'):
-        for filetype in 'hg patch'.split():
-            shelvedfile(repo, name, filetype).movetobackup()
+        for filetype in shelvefileextensions:
+            shfile = shelvedfile(repo, name, filetype)
+            if shfile.exists():
+                shfile.movetobackup()
         cleanupoldbackups(repo)
 
 def unshelvecontinue(ui, repo, state, opts):
@@ -600,6 +644,91 @@
         unshelvecleanup(ui, repo, state.name, opts)
         ui.status(_("unshelve of '%s' complete\n") % state.name)
 
+def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
+    """Temporarily commit working copy changes before moving unshelve commit"""
+    # Store pending changes in a commit and remember added in case a shelve
+    # contains unknown files that are part of the pending change
+    s = repo.status()
+    addedbefore = frozenset(s.added)
+    if not (s.modified or s.added or s.removed or s.deleted):
+        return tmpwctx, addedbefore
+    ui.status(_("temporarily committing pending changes "
+                "(restore with 'hg unshelve --abort')\n"))
+    commitfunc = getcommitfunc(extra=None, interactive=False,
+                               editor=False)
+    tempopts = {}
+    tempopts['message'] = "pending changes temporary commit"
+    tempopts['date'] = opts.get('date')
+    ui.quiet = True
+    node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
+    tmpwctx = repo[node]
+    return tmpwctx, addedbefore
+
+def _unshelverestorecommit(ui, repo, basename, oldquiet):
+    """Recreate commit in the repository during the unshelve"""
+    ui.quiet = True
+    shelvedfile(repo, basename, 'hg').applybundle()
+    shelvectx = repo['tip']
+    ui.quiet = oldquiet
+    return repo, shelvectx
+
+def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
+                          tmpwctx, shelvectx, branchtorestore):
+    """Rebase restored commit from its original location to a destination"""
+    # If the shelve is not immediately on top of the commit
+    # we'll be merging with, rebase it to be on top.
+    if tmpwctx.node() == shelvectx.parents()[0].node():
+        return shelvectx
+
+    ui.status(_('rebasing shelved changes\n'))
+    try:
+        rebase.rebase(ui, repo, **{
+            'rev': [shelvectx.rev()],
+            'dest': str(tmpwctx.rev()),
+            'keep': True,
+            'tool': opts.get('tool', ''),
+        })
+    except error.InterventionRequired:
+        tr.close()
+
+        stripnodes = [repo.changelog.node(rev)
+                      for rev in xrange(oldtiprev, len(repo))]
+        shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes,
+                          branchtorestore, opts.get('keep'))
+
+        util.rename(repo.join('rebasestate'),
+                    repo.join('unshelverebasestate'))
+        raise error.InterventionRequired(
+            _("unresolved conflicts (see 'hg resolve', then "
+              "'hg unshelve --continue')"))
+
+    # refresh ctx after rebase completes
+    shelvectx = repo['tip']
+
+    if not shelvectx in tmpwctx.children():
+        # rebase was a no-op, so it produced no child commit
+        shelvectx = tmpwctx
+    return shelvectx
+
+def _forgetunknownfiles(repo, shelvectx, addedbefore):
+    # Forget any files that were unknown before the shelve, unknown before
+    # unshelve started, but are now added.
+    shelveunknown = shelvectx.extra().get('shelve_unknown')
+    if not shelveunknown:
+        return
+    shelveunknown = frozenset(shelveunknown.split('\0'))
+    addedafter = frozenset(repo.status().added)
+    toforget = (addedafter & shelveunknown) - addedbefore
+    repo[None].forget(toforget)
+
+def _finishunshelve(repo, oldtiprev, tr):
+    # The transaction aborting will strip all the commits for us,
+    # but it doesn't update the inmemory structures, so addchangegroup
+    # hooks still fire and try to operate on the missing commits.
+    # Clean up manually to prevent this.
+    repo.unfiltered().changelog.strip(oldtiprev, tr)
+    _aborttransaction(repo)
+
 @command('unshelve',
          [('a', 'abort', None,
            _('abort an incomplete unshelve operation')),
@@ -667,6 +796,8 @@
 
         try:
             state = shelvedstate.load(repo)
+            if opts.get('keep') is None:
+                opts['keep'] = state.keep
         except IOError as err:
             if err.errno != errno.ENOENT:
                 raise
@@ -701,7 +832,7 @@
     else:
         basename = shelved[0]
 
-    if not shelvedfile(repo, basename, 'patch').exists():
+    if not shelvedfile(repo, basename, patchextension).exists():
         raise error.Abort(_("shelved change '%s' not found") % basename)
 
     oldquiet = ui.quiet
@@ -722,101 +853,25 @@
         # and shelvectx is the unshelved changes. Then we merge it all down
         # to the original pctx.
 
-        # Store pending changes in a commit and remember added in case a shelve
-        # contains unknown files that are part of the pending change
-        s = repo.status()
-        addedbefore = frozenset(s.added)
-        if s.modified or s.added or s.removed or s.deleted:
-            ui.status(_("temporarily committing pending changes "
-                        "(restore with 'hg unshelve --abort')\n"))
-            def commitfunc(ui, repo, message, match, opts):
-                hasmq = util.safehasattr(repo, 'mq')
-                if hasmq:
-                    saved, repo.mq.checkapplied = repo.mq.checkapplied, False
+        tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
+                                                         tmpwctx)
 
-                backup = repo.ui.backupconfig('phases', 'new-commit')
-                try:
-                    repo.ui.setconfig('phases', 'new-commit', phases.secret)
-                    return repo.commit(message, 'shelve@localhost',
-                                       opts.get('date'), match)
-                finally:
-                    repo.ui.restoreconfig(backup)
-                    if hasmq:
-                        repo.mq.checkapplied = saved
-
-            tempopts = {}
-            tempopts['message'] = "pending changes temporary commit"
-            tempopts['date'] = opts.get('date')
-            ui.quiet = True
-            node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
-            tmpwctx = repo[node]
-
-        ui.quiet = True
-        shelvedfile(repo, basename, 'hg').applybundle()
-
-        ui.quiet = oldquiet
-
-        shelvectx = repo['tip']
+        repo, shelvectx = _unshelverestorecommit(ui, repo, basename, oldquiet)
 
         branchtorestore = ''
         if shelvectx.branch() != shelvectx.p1().branch():
             branchtorestore = shelvectx.branch()
 
-        # If the shelve is not immediately on top of the commit
-        # we'll be merging with, rebase it to be on top.
-        if tmpwctx.node() != shelvectx.parents()[0].node():
-            ui.status(_('rebasing shelved changes\n'))
-            try:
-                rebase.rebase(ui, repo, **{
-                    'rev' : [shelvectx.rev()],
-                    'dest' : str(tmpwctx.rev()),
-                    'keep' : True,
-                    'tool' : opts.get('tool', ''),
-                })
-            except error.InterventionRequired:
-                tr.close()
-
-                stripnodes = [repo.changelog.node(rev)
-                              for rev in xrange(oldtiprev, len(repo))]
-                shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes,
-                                  branchtorestore)
-
-                util.rename(repo.join('rebasestate'),
-                            repo.join('unshelverebasestate'))
-                raise error.InterventionRequired(
-                    _("unresolved conflicts (see 'hg resolve', then "
-                      "'hg unshelve --continue')"))
-
-            # refresh ctx after rebase completes
-            shelvectx = repo['tip']
-
-            if not shelvectx in tmpwctx.children():
-                # rebase was a no-op, so it produced no child commit
-                shelvectx = tmpwctx
-
+        shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
+                                          basename, pctx, tmpwctx, shelvectx,
+                                          branchtorestore)
         mergefiles(ui, repo, pctx, shelvectx)
         restorebranch(ui, repo, branchtorestore)
-
-        # Forget any files that were unknown before the shelve, unknown before
-        # unshelve started, but are now added.
-        shelveunknown = shelvectx.extra().get('shelve_unknown')
-        if shelveunknown:
-            shelveunknown = frozenset(shelveunknown.split('\0'))
-            addedafter = frozenset(repo.status().added)
-            toforget = (addedafter & shelveunknown) - addedbefore
-            repo[None].forget(toforget)
+        _forgetunknownfiles(repo, shelvectx, addedbefore)
 
         shelvedstate.clear(repo)
-
-        # The transaction aborting will strip all the commits for us,
-        # but it doesn't update the inmemory structures, so addchangegroup
-        # hooks still fire and try to operate on the missing commits.
-        # Clean up manually to prevent this.
-        repo.unfiltered().changelog.strip(oldtiprev, tr)
-
+        _finishunshelve(repo, oldtiprev, tr)
         unshelvecleanup(ui, repo, basename, opts)
-
-        _aborttransaction(repo)
     finally:
         ui.quiet = oldquiet
         if tr:
@@ -912,9 +967,9 @@
     elif checkopt('list'):
         return listcmd(ui, repo, pats, opts)
     elif checkopt('patch'):
-        return singlepatchcmds(ui, repo, pats, opts, subcommand='patch')
+        return patchcmds(ui, repo, pats, opts, subcommand='patch')
     elif checkopt('stat'):
-        return singlepatchcmds(ui, repo, pats, opts, subcommand='stat')
+        return patchcmds(ui, repo, pats, opts, subcommand='stat')
     else:
         return createcmd(ui, repo, pats, opts)
 
--- a/hgext/win32mbcs.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/hgext/win32mbcs.py	Wed Jan 18 11:43:36 2017 -0500
@@ -53,6 +53,7 @@
 from mercurial import (
     encoding,
     error,
+    pycompat,
 )
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
@@ -97,7 +98,7 @@
     except UnicodeError:
         us = s
     if us and us[-1] not in ':/\\':
-        s += os.sep
+        s += pycompat.ossep
     return s
 
 
@@ -138,10 +139,7 @@
     func = getattr(module, name)
     def f(*args, **kwds):
         return wrapper(func, args, kwds)
-    try:
-        f.__name__ = func.__name__ # fails with Python 2.3
-    except Exception:
-        pass
+    f.__name__ = func.__name__
     setattr(module, name, f)
 
 # List of functions to be wrapped.
@@ -171,7 +169,7 @@
 def extsetup(ui):
     # TODO: decide use of config section for this extension
     if ((not os.path.supports_unicode_filenames) and
-        (sys.platform != 'cygwin')):
+        (pycompat.sysplatform != 'cygwin')):
         ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
         return
     # determine encoding for filename
@@ -181,7 +179,7 @@
     if _encoding.lower() in problematic_encodings.split():
         for f in funcs.split():
             wrapname(f, wrapper)
-        if os.name == 'nt':
+        if pycompat.osname == 'nt':
             for f in winfuncs.split():
                 wrapname(f, wrapper)
         wrapname("mercurial.osutil.listdir", wrapperforlistdir)
--- a/i18n/ja.po	Wed Jan 04 10:51:37 2017 -0600
+++ b/i18n/ja.po	Wed Jan 18 11:43:36 2017 -0500
@@ -6741,7 +6741,7 @@
 msgstr ""
 
 msgid ""
-"Scripts are run asychronously as detached daemon processes; mercurial will\n"
+"Scripts are run asynchronously as detached daemon processes; mercurial will\n"
 "not ensure that they exit cleanly."
 msgstr ""
 
@@ -10360,7 +10360,7 @@
 msgstr "shelve 状態管理ファイルが破損しています"
 
 msgid "please run hg unshelve --abort to abort unshelve operation"
-msgstr "'hg unshelve --abort' を実施して unshleve 操作を中断してください"
+msgstr "'hg unshelve --abort' を実施して unshelve 操作を中断してください"
 
 msgid ""
 "could not read shelved state file, your working copy may be in an unexpected "
@@ -14194,7 +14194,7 @@
 msgstr "トランザクション実施途中は廃止マーカを破棄できません"
 
 #, python-format
-msgid "deleted %i obsolescense markers\n"
+msgid "deleted %i obsolescence markers\n"
 msgstr "%i 個の廃止マーカを破棄\n"
 
 #, python-format
@@ -32661,8 +32661,8 @@
 msgid "revset expects one or more arguments"
 msgstr "revset の引数は1個以上です"
 
-msgid ":rstdoc(text, style): Format ReStructuredText."
-msgstr ":rstdoc(text, style): 出力を ReStructuredText として整形します。"
+msgid ":rstdoc(text, style): Format reStructuredText."
+msgstr ":rstdoc(text, style): 出力を reStructuredText として整形します。"
 
 #. i18n: "rstdoc" is a keyword
 msgid "rstdoc expects two arguments"
--- a/i18n/pt_BR.po	Wed Jan 04 10:51:37 2017 -0600
+++ b/i18n/pt_BR.po	Wed Jan 18 11:43:36 2017 -0500
@@ -6803,7 +6803,7 @@
 "falhasse."
 
 msgid ""
-"Scripts are run asychronously as detached daemon processes; mercurial will\n"
+"Scripts are run asynchronously as detached daemon processes; mercurial will\n"
 "not ensure that they exit cleanly."
 msgstr ""
 "Os scripts são executados assincronamente como processos desanexados;\n"
@@ -14480,7 +14480,7 @@
 "não é possível apagar marcações de obsolescência durante uma transação."
 
 #, python-format
-msgid "deleted %i obsolescense markers\n"
+msgid "deleted %i obsolescence markers\n"
 msgstr "%i marcações de obsolescência apagadas\n"
 
 #, python-format
@@ -33571,8 +33571,8 @@
 msgid "revset expects one or more arguments"
 msgstr "revset espera um ou mais argumentos"
 
-msgid ":rstdoc(text, style): Format ReStructuredText."
-msgstr ":rstdoc(texto, estilo): Formata ReStructuredText."
+msgid ":rstdoc(text, style): Format reStructuredText."
+msgstr ":rstdoc(texto, estilo): Formata reStructuredText."
 
 #. i18n: "rstdoc" is a keyword
 msgid "rstdoc expects two arguments"
--- a/mercurial/archival.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/archival.py	Wed Jan 18 11:43:36 2017 -0500
@@ -141,7 +141,7 @@
         self.mtime = mtime
         self.fileobj = None
 
-        def taropen(name, mode, fileobj=None):
+        def taropen(mode, name='', fileobj=None):
             if kind == 'gz':
                 mode = mode[0]
                 if not fileobj:
@@ -155,10 +155,9 @@
                 return tarfile.open(name, mode + kind, fileobj)
 
         if isinstance(dest, str):
-            self.z = taropen(dest, mode='w:')
+            self.z = taropen('w:', name=dest)
         else:
-            # Python 2.5-2.5.1 have a regression that requires a name arg
-            self.z = taropen(name='', mode='w|', fileobj=dest)
+            self.z = taropen('w|', fileobj=dest)
 
     def addfile(self, name, mode, islink, data):
         i = tarfile.TarInfo(name)
--- a/mercurial/bdiff.c	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/bdiff.c	Wed Jan 18 11:43:36 2017 -0500
@@ -17,6 +17,10 @@
 #include "bitmanipulation.h"
 #include "bdiff.h"
 
+/* Hash implementation from diffutils */
+#define ROL(v, n) ((v) << (n) | (v) >> (sizeof(v) * CHAR_BIT - (n)))
+#define HASH(h, c) ((c) + ROL(h ,7))
+
 struct pos {
 	int pos, len;
 };
@@ -31,9 +35,11 @@
 
 	/* count the lines */
 	i = 1; /* extra line for sentinel */
-	for (p = a; p < a + len; p++)
-		if (*p == '\n' || p == plast)
+	for (p = a; p < plast; p++)
+		if (*p == '\n')
 			i++;
+	if (p == plast)
+		i++;
 
 	*lr = l = (struct bdiff_line *)malloc(sizeof(struct bdiff_line) * i);
 	if (!l)
@@ -41,11 +47,10 @@
 
 	/* build the line array and calculate hashes */
 	hash = 0;
-	for (p = a; p < a + len; p++) {
-		/* Leonid Yuriev's hash */
-		hash = (hash * 1664525) + (unsigned char)*p + 1013904223;
+	for (p = a; p < plast; p++) {
+		hash = HASH(hash, *p);
 
-		if (*p == '\n' || p == plast) {
+		if (*p == '\n') {
 			l->hash = hash;
 			hash = 0;
 			l->len = p - b + 1;
@@ -56,6 +61,15 @@
 		}
 	}
 
+	if (p == plast) {
+		hash = HASH(hash, *p);
+		l->hash = hash;
+		l->len = p - b + 1;
+		l->l = b;
+		l->n = INT_MAX;
+		l++;
+	}
+
 	/* set up a sentinel */
 	l->hash = 0;
 	l->len = 0;
@@ -138,7 +152,7 @@
 			struct pos *pos,
 			 int a1, int a2, int b1, int b2, int *omi, int *omj)
 {
-	int mi = a1, mj = b1, mk = 0, i, j, k, half;
+	int mi = a1, mj = b1, mk = 0, i, j, k, half, bhalf;
 
 	/* window our search on large regions to better bound
 	   worst-case performance. by choosing a window at the end, we
@@ -146,7 +160,8 @@
 	if (a2 - a1 > 30000)
 		a1 = a2 - 30000;
 
-	half = (a1 + a2) / 2;
+	half = (a1 + a2 - 1) / 2;
+	bhalf = (b1 + b2 - 1) / 2;
 
 	for (i = a1; i < a2; i++) {
 		/* skip all lines in b after the current block */
@@ -172,10 +187,20 @@
 
 			/* best match so far? we prefer matches closer
 			   to the middle to balance recursion */
-			if (k > mk || (k == mk && (i <= mi || i < half))) {
+			if (k > mk) {
+				/* a longer match */
 				mi = i;
 				mj = j;
 				mk = k;
+			} else if (k == mk) {
+				if (i > mi && i <= half && j > b1) {
+					/* same match but closer to half */
+					mi = i;
+					mj = j;
+				} else if (i == mi && (mj > bhalf || i == a1)) {
+					/* same i but best earlier j */
+					mj = j;
+				}
 			}
 		}
 	}
--- a/mercurial/bdiff_module.c	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/bdiff_module.c	Wed Jan 18 11:43:36 2017 -0500
@@ -61,12 +61,12 @@
 
 static PyObject *bdiff(PyObject *self, PyObject *args)
 {
-	char *sa, *sb, *rb;
+	char *sa, *sb, *rb, *ia, *ib;
 	PyObject *result = NULL;
 	struct bdiff_line *al, *bl;
 	struct bdiff_hunk l, *h;
 	int an, bn, count;
-	Py_ssize_t len = 0, la, lb;
+	Py_ssize_t len = 0, la, lb, li = 0, lcommon = 0, lmax;
 	PyThreadState *_save;
 
 	l.next = NULL;
@@ -80,8 +80,17 @@
 	}
 
 	_save = PyEval_SaveThread();
-	an = bdiff_splitlines(sa, la, &al);
-	bn = bdiff_splitlines(sb, lb, &bl);
+
+	lmax = la > lb ? lb : la;
+	for (ia = sa, ib = sb;
+	     li < lmax && *ia == *ib;
+	     ++li, ++ia, ++ib)
+		if (*ia == '\n')
+			lcommon = li + 1;
+	/* we can almost add: if (li == lmax) lcommon = li; */
+
+	an = bdiff_splitlines(sa + lcommon, la - lcommon, &al);
+	bn = bdiff_splitlines(sb + lcommon, lb - lcommon, &bl);
 	if (!al || !bl)
 		goto nomem;
 
@@ -112,8 +121,8 @@
 	for (h = l.next; h; h = h->next) {
 		if (h->a1 != la || h->b1 != lb) {
 			len = bl[h->b1].l - bl[lb].l;
-			putbe32((uint32_t)(al[la].l - al->l), rb);
-			putbe32((uint32_t)(al[h->a1].l - al->l), rb + 4);
+			putbe32((uint32_t)(al[la].l + lcommon - al->l), rb);
+			putbe32((uint32_t)(al[h->a1].l + lcommon - al->l), rb + 4);
 			putbe32((uint32_t)len, rb + 8);
 			memcpy(rb + 12, bl[lb].l, len);
 			rb += 12 + len;
--- a/mercurial/bookmarks.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/bookmarks.py	Wed Jan 18 11:43:36 2017 -0500
@@ -8,7 +8,6 @@
 from __future__ import absolute_import
 
 import errno
-import os
 
 from .i18n import _
 from .node import (
@@ -31,7 +30,7 @@
     may need to tweak this behavior further.
     """
     bkfile = None
-    if 'HG_PENDING' in os.environ:
+    if 'HG_PENDING' in encoding.environ:
         try:
             bkfile = repo.vfs('bookmarks.pending')
         except IOError as inst:
@@ -284,17 +283,21 @@
             lockmod.release(tr, lock)
     return update
 
-def listbookmarks(repo):
+def listbinbookmarks(repo):
     # We may try to list bookmarks on a repo type that does not
     # support it (e.g., statichttprepository).
     marks = getattr(repo, '_bookmarks', {})
 
-    d = {}
     hasnode = repo.changelog.hasnode
     for k, v in marks.iteritems():
         # don't expose local divergent bookmarks
         if hasnode(v) and ('@' not in k or k.endswith('@')):
-            d[k] = hex(v)
+            yield k, v
+
+def listbookmarks(repo):
+    d = {}
+    for book, node in listbinbookmarks(repo):
+        d[book] = hex(node)
     return d
 
 def pushbookmark(repo, key, old, new):
@@ -319,8 +322,7 @@
     finally:
         lockmod.release(tr, l, w)
 
-def compare(repo, srcmarks, dstmarks,
-            srchex=None, dsthex=None, targets=None):
+def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
     '''Compare bookmarks between srcmarks and dstmarks
 
     This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
@@ -343,19 +345,9 @@
     Changeset IDs of tuples in "addsrc", "adddst", "differ" or
      "invalid" list may be unknown for repo.
 
-    This function expects that "srcmarks" and "dstmarks" return
-    changeset ID in 40 hexadecimal digit string for specified
-    bookmark. If not so (e.g. bmstore "repo._bookmarks" returning
-    binary value), "srchex" or "dsthex" should be specified to convert
-    into such form.
-
     If "targets" is specified, only bookmarks listed in it are
     examined.
     '''
-    if not srchex:
-        srchex = lambda x: x
-    if not dsthex:
-        dsthex = lambda x: x
 
     if targets:
         bset = set(targets)
@@ -377,14 +369,14 @@
     for b in sorted(bset):
         if b not in srcmarks:
             if b in dstmarks:
-                adddst((b, None, dsthex(dstmarks[b])))
+                adddst((b, None, dstmarks[b]))
             else:
                 invalid((b, None, None))
         elif b not in dstmarks:
-            addsrc((b, srchex(srcmarks[b]), None))
+            addsrc((b, srcmarks[b], None))
         else:
-            scid = srchex(srcmarks[b])
-            dcid = dsthex(dstmarks[b])
+            scid = srcmarks[b]
+            dcid = dstmarks[b]
             if scid == dcid:
                 same((b, scid, dcid))
             elif scid in repo and dcid in repo:
@@ -435,11 +427,17 @@
 
     return None
 
+def unhexlifybookmarks(marks):
+    binremotemarks = {}
+    for name, node in marks.items():
+        binremotemarks[name] = bin(node)
+    return binremotemarks
+
 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
     ui.debug("checking for updated bookmarks\n")
     localmarks = repo._bookmarks
     (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
-     ) = compare(repo, remotemarks, localmarks, dsthex=hex)
+    ) = comparebookmarks(repo, remotemarks, localmarks)
 
     status = ui.status
     warn = ui.warn
@@ -450,15 +448,15 @@
     changed = []
     for b, scid, dcid in addsrc:
         if scid in repo: # add remote bookmarks for changes we already have
-            changed.append((b, bin(scid), status,
+            changed.append((b, scid, status,
                             _("adding remote bookmark %s\n") % (b)))
         elif b in explicit:
             explicit.remove(b)
             ui.warn(_("remote bookmark %s points to locally missing %s\n")
-                    % (b, scid[:12]))
+                    % (b, hex(scid)[:12]))
 
     for b, scid, dcid in advsrc:
-        changed.append((b, bin(scid), status,
+        changed.append((b, scid, status,
                         _("updating bookmark %s\n") % (b)))
     # remove normal movement from explicit set
     explicit.difference_update(d[0] for d in changed)
@@ -466,13 +464,12 @@
     for b, scid, dcid in diverge:
         if b in explicit:
             explicit.discard(b)
-            changed.append((b, bin(scid), status,
+            changed.append((b, scid, status,
                             _("importing bookmark %s\n") % (b)))
         else:
-            snode = bin(scid)
-            db = _diverge(ui, b, path, localmarks, snode)
+            db = _diverge(ui, b, path, localmarks, scid)
             if db:
-                changed.append((db, snode, warn,
+                changed.append((db, scid, warn,
                                 _("divergent bookmark %s stored as %s\n") %
                                 (b, db)))
             else:
@@ -481,13 +478,13 @@
     for b, scid, dcid in adddst + advdst:
         if b in explicit:
             explicit.discard(b)
-            changed.append((b, bin(scid), status,
+            changed.append((b, scid, status,
                             _("importing bookmark %s\n") % (b)))
     for b, scid, dcid in differ:
         if b in explicit:
             explicit.remove(b)
             ui.warn(_("remote bookmark %s points to locally missing %s\n")
-                    % (b, scid[:12]))
+                    % (b, hex(scid)[:12]))
 
     if changed:
         tr = trfunc()
@@ -501,8 +498,8 @@
     '''
     ui.status(_("searching for changed bookmarks\n"))
 
-    r = compare(repo, other.listkeys('bookmarks'), repo._bookmarks,
-                dsthex=hex)
+    remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
+    r = comparebookmarks(repo, remotemarks, repo._bookmarks)
     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
 
     incomings = []
@@ -518,16 +515,16 @@
             incomings.append("   %-25s %s\n" % (b, getid(id)))
     for b, scid, dcid in addsrc:
         # i18n: "added" refers to a bookmark
-        add(b, scid, _('added'))
+        add(b, hex(scid), _('added'))
     for b, scid, dcid in advsrc:
         # i18n: "advanced" refers to a bookmark
-        add(b, scid, _('advanced'))
+        add(b, hex(scid), _('advanced'))
     for b, scid, dcid in diverge:
         # i18n: "diverged" refers to a bookmark
-        add(b, scid, _('diverged'))
+        add(b, hex(scid), _('diverged'))
     for b, scid, dcid in differ:
         # i18n: "changed" refers to a bookmark
-        add(b, scid, _('changed'))
+        add(b, hex(scid), _('changed'))
 
     if not incomings:
         ui.status(_("no changed bookmarks found\n"))
@@ -543,8 +540,8 @@
     '''
     ui.status(_("searching for changed bookmarks\n"))
 
-    r = compare(repo, repo._bookmarks, other.listkeys('bookmarks'),
-                srchex=hex)
+    remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
+    r = comparebookmarks(repo, repo._bookmarks, remotemarks)
     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
 
     outgoings = []
@@ -560,19 +557,19 @@
             outgoings.append("   %-25s %s\n" % (b, getid(id)))
     for b, scid, dcid in addsrc:
         # i18n: "added refers to a bookmark
-        add(b, scid, _('added'))
+        add(b, hex(scid), _('added'))
     for b, scid, dcid in adddst:
         # i18n: "deleted" refers to a bookmark
         add(b, ' ' * 40, _('deleted'))
     for b, scid, dcid in advsrc:
         # i18n: "advanced" refers to a bookmark
-        add(b, scid, _('advanced'))
+        add(b, hex(scid), _('advanced'))
     for b, scid, dcid in diverge:
         # i18n: "diverged" refers to a bookmark
-        add(b, scid, _('diverged'))
+        add(b, hex(scid), _('diverged'))
     for b, scid, dcid in differ:
         # i18n: "changed" refers to a bookmark
-        add(b, scid, _('changed'))
+        add(b, hex(scid), _('changed'))
 
     if not outgoings:
         ui.status(_("no changed bookmarks found\n"))
@@ -588,8 +585,8 @@
 
     This returns "(# of incoming, # of outgoing)" tuple.
     '''
-    r = compare(repo, other.listkeys('bookmarks'), repo._bookmarks,
-                dsthex=hex)
+    remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
+    r = comparebookmarks(repo, remotemarks, repo._bookmarks)
     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
     return (len(addsrc), len(adddst))
 
--- a/mercurial/bundle2.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/bundle2.py	Wed Jan 18 11:43:36 2017 -0500
@@ -485,11 +485,11 @@
     return '\n'.join(chunks)
 
 bundletypes = {
-    "": ("", None),       # only when using unbundle on ssh and old http servers
+    "": ("", 'UN'),       # only when using unbundle on ssh and old http servers
                           # since the unification ssh accepts a header but there
                           # is no capability signaling it.
     "HG20": (), # special-cased below
-    "HG10UN": ("HG10UN", None),
+    "HG10UN": ("HG10UN", 'UN'),
     "HG10BZ": ("HG10", 'BZ'),
     "HG10GZ": ("HG10GZ", 'GZ'),
 }
@@ -511,15 +511,17 @@
         self._params = []
         self._parts = []
         self.capabilities = dict(capabilities)
-        self._compressor = util.compressors[None]()
+        self._compengine = util.compengines.forbundletype('UN')
+        self._compopts = None
 
-    def setcompression(self, alg):
+    def setcompression(self, alg, compopts=None):
         """setup core part compression to <alg>"""
-        if alg is None:
+        if alg in (None, 'UN'):
             return
         assert not any(n.lower() == 'Compression' for n, v in self._params)
         self.addparam('Compression', alg)
-        self._compressor = util.compressors[alg]()
+        self._compengine = util.compengines.forbundletype(alg)
+        self._compopts = compopts
 
     @property
     def nbparts(self):
@@ -571,12 +573,9 @@
         yield _pack(_fstreamparamsize, len(param))
         if param:
             yield param
-        # starting compression
-        for chunk in self._getcorechunk():
-            data = self._compressor.compress(chunk)
-            if data:
-                yield data
-        yield self._compressor.flush()
+        for chunk in self._compengine.compressstream(self._getcorechunk(),
+                                                     self._compopts):
+            yield chunk
 
     def _paramchunk(self):
         """return a encoded version of all stream parameters"""
@@ -680,7 +679,7 @@
     def __init__(self, ui, fp):
         """If header is specified, we do not read it out of the stream."""
         self.ui = ui
-        self._decompressor = util.decompressors[None]
+        self._compengine = util.compengines.forbundletype('UN')
         self._compressed = None
         super(unbundle20, self).__init__(fp)
 
@@ -754,9 +753,9 @@
             params = self._readexact(paramssize)
             self._processallparams(params)
             yield params
-            assert self._decompressor is util.decompressors[None]
+            assert self._compengine.bundletype == 'UN'
         # From there, payload might need to be decompressed
-        self._fp = self._decompressor(self._fp)
+        self._fp = self._compengine.decompressorreader(self._fp)
         emptycount = 0
         while emptycount < 2:
             # so we can brainlessly loop
@@ -780,7 +779,7 @@
         # make sure param have been loaded
         self.params
         # From there, payload need to be decompressed
-        self._fp = self._decompressor(self._fp)
+        self._fp = self._compengine.decompressorreader(self._fp)
         indebug(self.ui, 'start extraction of bundle2 parts')
         headerblock = self._readpartheader()
         while headerblock is not None:
@@ -822,10 +821,10 @@
 @b2streamparamhandler('compression')
 def processcompression(unbundler, param, value):
     """read compression parameter and install payload decompression"""
-    if value not in util.decompressors:
+    if value not in util.compengines.supportedbundletypes:
         raise error.BundleUnknownFeatureError(params=(param,),
                                               values=(value,))
-    unbundler._decompressor = util.decompressors[value]
+    unbundler._compengine = util.compengines.forbundletype(value)
     if value is not None:
         unbundler._compressed = True
 
@@ -1293,7 +1292,8 @@
     obscaps = caps.get('obsmarkers', ())
     return [int(c[1:]) for c in obscaps if c.startswith('V')]
 
-def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
+def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None,
+                compopts=None):
     """Write a bundle file and return its filename.
 
     Existing files will not be overwritten.
@@ -1304,7 +1304,7 @@
 
     if bundletype == "HG20":
         bundle = bundle20(ui)
-        bundle.setcompression(compression)
+        bundle.setcompression(compression, compopts)
         part = bundle.newpart('changegroup', data=cg.getchunks())
         part.addparam('version', cg.version)
         if 'clcount' in cg.extras:
@@ -1318,18 +1318,14 @@
             raise error.Abort(_('old bundle types only supports v1 '
                                 'changegroups'))
         header, comp = bundletypes[bundletype]
-        if comp not in util.compressors:
+        if comp not in util.compengines.supportedbundletypes:
             raise error.Abort(_('unknown stream compression type: %s')
                               % comp)
-        z = util.compressors[comp]()
-        subchunkiter = cg.getchunks()
+        compengine = util.compengines.forbundletype(comp)
         def chunkiter():
             yield header
-            for chunk in subchunkiter:
-                data = z.compress(chunk)
-                if data:
-                    yield data
-            yield z.flush()
+            for chunk in compengine.compressstream(cg.getchunks(), compopts):
+                yield chunk
         chunkiter = chunkiter()
 
     # parse the changegroup data, otherwise we will block
--- a/mercurial/bundlerepo.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/bundlerepo.py	Wed Jan 18 11:43:36 2017 -0500
@@ -35,6 +35,7 @@
     node as nodemod,
     pathutil,
     phases,
+    pycompat,
     revlog,
     scmutil,
     util,
@@ -116,7 +117,7 @@
         return mdiff.textdiff(self.revision(self.node(rev1)),
                               self.revision(self.node(rev2)))
 
-    def revision(self, nodeorrev):
+    def revision(self, nodeorrev, raw=False):
         """return an uncompressed revision of a given node or revision
         number.
         """
@@ -147,7 +148,10 @@
             delta = self._chunk(chain.pop())
             text = mdiff.patches(text, [delta])
 
-        self._checkhash(text, node, rev)
+        text, validatehash = self._processflags(text, self.flags(rev),
+                                                'read', raw=raw)
+        if validatehash:
+            self.checkhash(text, node, rev=rev)
         self._cache = (node, rev, text)
         return text
 
@@ -187,9 +191,9 @@
         finally:
             self.filteredrevs = oldfilter
 
-class bundlemanifest(bundlerevlog, manifest.manifest):
+class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
     def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
-        manifest.manifest.__init__(self, opener, dir=dir)
+        manifest.manifestrevlog.__init__(self, opener, dir=dir)
         bundlerevlog.__init__(self, opener, self.indexfile, bundle,
                               linkmapper)
         if dirlogstarts is None:
@@ -207,7 +211,7 @@
         if node in self.fulltextcache:
             result = self.fulltextcache[node].tostring()
         else:
-            result = manifest.manifest.revision(self, nodeorrev)
+            result = manifest.manifestrevlog.revision(self, nodeorrev)
         return result
 
     def dirlog(self, d):
@@ -359,7 +363,7 @@
 
     @localrepo.unfilteredpropertycache
     def filestart(self):
-        self.manifest
+        self.manifestlog
         return self.filestart
 
     def url(self):
@@ -392,7 +396,7 @@
         return bundlepeer(self)
 
     def getcwd(self):
-        return os.getcwd() # always outside the repo
+        return pycompat.getcwd() # always outside the repo
 
     # Check if parents exist in localrepo before setting
     def setparents(self, p1, p2=nullid):
@@ -412,13 +416,13 @@
     parentpath = ui.config("bundle", "mainreporoot", "")
     if not parentpath:
         # try to find the correct path to the working directory repo
-        parentpath = cmdutil.findrepo(os.getcwd())
+        parentpath = cmdutil.findrepo(pycompat.getcwd())
         if parentpath is None:
             parentpath = ''
     if parentpath:
         # Try to make the full path relative so we get a nice, short URL.
         # In particular, we don't want temp dir names in test outputs.
-        cwd = os.getcwd()
+        cwd = pycompat.getcwd()
         if parentpath == cwd:
             parentpath = ''
         else:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/cffi/bdiff.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,31 @@
+from __future__ import absolute_import
+
+import cffi
+import os
+
+ffi = cffi.FFI()
+ffi.set_source("_bdiff_cffi",
+    open(os.path.join(os.path.join(os.path.dirname(__file__), '..'),
+        'bdiff.c')).read(), include_dirs=['mercurial'])
+ffi.cdef("""
+struct bdiff_line {
+    int hash, n, e;
+    ssize_t len;
+    const char *l;
+};
+
+struct bdiff_hunk;
+struct bdiff_hunk {
+    int a1, a2, b1, b2;
+    struct bdiff_hunk *next;
+};
+
+int bdiff_splitlines(const char *a, ssize_t len, struct bdiff_line **lr);
+int bdiff_diff(struct bdiff_line *a, int an, struct bdiff_line *b, int bn,
+    struct bdiff_hunk *base);
+void bdiff_freehunks(struct bdiff_hunk *l);
+void free(void*);
+""")
+
+if __name__ == '__main__':
+    ffi.compile()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/cffi/mpatch.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,35 @@
+from __future__ import absolute_import
+
+import cffi
+import os
+
+ffi = cffi.FFI()
+mpatch_c = os.path.join(os.path.join(os.path.dirname(__file__), '..',
+                                     'mpatch.c'))
+ffi.set_source("_mpatch_cffi", open(mpatch_c).read(),
+               include_dirs=["mercurial"])
+ffi.cdef("""
+
+struct mpatch_frag {
+       int start, end, len;
+       const char *data;
+};
+
+struct mpatch_flist {
+       struct mpatch_frag *base, *head, *tail;
+};
+
+extern "Python" struct mpatch_flist* cffi_get_next_item(void*, ssize_t);
+
+int mpatch_decode(const char *bin, ssize_t len, struct mpatch_flist** res);
+ssize_t mpatch_calcsize(size_t len, struct mpatch_flist *l);
+void mpatch_lfree(struct mpatch_flist *a);
+static int mpatch_apply(char *buf, const char *orig, size_t len,
+                        struct mpatch_flist *l);
+struct mpatch_flist *mpatch_fold(void *bins,
+                       struct mpatch_flist* (*get_next_item)(void*, ssize_t),
+                       ssize_t start, ssize_t end);
+""")
+
+if __name__ == '__main__':
+    ffi.compile()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/cffi/osutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,102 @@
+from __future__ import absolute_import
+
+import cffi
+
+ffi = cffi.FFI()
+ffi.set_source("_osutil_cffi", """
+#include <sys/attr.h>
+#include <sys/vnode.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <time.h>
+
+typedef struct val_attrs {
+    uint32_t          length;
+    attribute_set_t   returned;
+    attrreference_t   name_info;
+    fsobj_type_t      obj_type;
+    struct timespec   mtime;
+    uint32_t          accessmask;
+    off_t             datalength;
+} __attribute__((aligned(4), packed)) val_attrs_t;
+""", include_dirs=['mercurial'])
+ffi.cdef('''
+
+typedef uint32_t attrgroup_t;
+
+typedef struct attrlist {
+    uint16_t     bitmapcount; /* number of attr. bit sets in list */
+    uint16_t   reserved;    /* (to maintain 4-byte alignment) */
+    attrgroup_t commonattr;  /* common attribute group */
+    attrgroup_t volattr;     /* volume attribute group */
+    attrgroup_t dirattr;     /* directory attribute group */
+    attrgroup_t fileattr;    /* file attribute group */
+    attrgroup_t forkattr;    /* fork attribute group */
+    ...;
+};
+
+typedef struct attribute_set {
+    ...;
+} attribute_set_t;
+
+typedef struct attrreference {
+    int attr_dataoffset;
+    int attr_length;
+    ...;
+} attrreference_t;
+
+typedef int ... off_t;
+
+typedef struct val_attrs {
+    uint32_t          length;
+    attribute_set_t   returned;
+    attrreference_t   name_info;
+    uint32_t          obj_type;
+    struct timespec   mtime;
+    uint32_t          accessmask;
+    off_t             datalength;
+    ...;
+} val_attrs_t;
+
+/* the exact layout of the above struct will be figured out during build time */
+
+typedef int ... time_t;
+
+typedef struct timespec {
+    time_t tv_sec;
+    ...;
+};
+
+int getattrlist(const char* path, struct attrlist * attrList, void * attrBuf,
+                size_t attrBufSize, unsigned int options);
+
+int getattrlistbulk(int dirfd, struct attrlist * attrList, void * attrBuf,
+                    size_t attrBufSize, uint64_t options);
+
+#define ATTR_BIT_MAP_COUNT ...
+#define ATTR_CMN_NAME ...
+#define ATTR_CMN_OBJTYPE ...
+#define ATTR_CMN_MODTIME ...
+#define ATTR_CMN_ACCESSMASK ...
+#define ATTR_CMN_ERROR ...
+#define ATTR_CMN_RETURNED_ATTRS ...
+#define ATTR_FILE_DATALENGTH ...
+
+#define VREG ...
+#define VDIR ...
+#define VLNK ...
+#define VBLK ...
+#define VCHR ...
+#define VFIFO ...
+#define VSOCK ...
+
+#define S_IFMT ...
+
+int open(const char *path, int oflag, int perm);
+int close(int);
+
+#define O_RDONLY ...
+''')
+
+if __name__ == '__main__':
+    ffi.compile()
--- a/mercurial/changegroup.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/changegroup.py	Wed Jan 18 11:43:36 2017 -0500
@@ -137,14 +137,16 @@
     _grouplistcount = 1 # One list of files after the manifests
 
     def __init__(self, fh, alg, extras=None):
-        if alg == 'UN':
-            alg = None # get more modern without breaking too much
-        if not alg in util.decompressors:
+        if alg is None:
+            alg = 'UN'
+        if alg not in util.compengines.supportedbundletypes:
             raise error.Abort(_('unknown stream compression type: %s')
                              % alg)
         if alg == 'BZ':
             alg = '_truncatedBZ'
-        self._stream = util.decompressors[alg](fh)
+
+        compengine = util.compengines.forbundletype(alg)
+        self._stream = compengine.decompressorreader(fh)
         self._type = alg
         self.extras = extras or {}
         self.callback = None
@@ -152,7 +154,7 @@
     # These methods (compressed, read, seek, tell) all appear to only
     # be used by bundlerepo, but it's a little hard to tell.
     def compressed(self):
-        return self._type is not None
+        return self._type is not None and self._type != 'UN'
     def read(self, l):
         return self._stream.read(l)
     def seek(self, pos):
@@ -250,7 +252,7 @@
         # no new manifest will be created and the manifest group will
         # be empty during the pull
         self.manifestheader()
-        repo.manifest.addgroup(self, revmap, trp)
+        repo.manifestlog._revlog.addgroup(self, revmap, trp)
         repo.ui.progress(_('manifests'), None)
         self.callback = None
 
@@ -330,11 +332,12 @@
 
                 needfiles = {}
                 if repo.ui.configbool('server', 'validate', default=False):
+                    cl = repo.changelog
+                    ml = repo.manifestlog
                     # validate incoming csets have their manifests
                     for cset in xrange(clstart, clend):
-                        mfnode = repo.changelog.read(
-                            repo.changelog.node(cset))[0]
-                        mfest = repo.manifestlog[mfnode].readdelta()
+                        mfnode = cl.changelogrevision(cset).manifest
+                        mfest = ml[mfnode].readdelta()
                         # store file nodes we must see
                         for f, n in mfest.iteritems():
                             needfiles.setdefault(f, set()).add(n)
@@ -479,7 +482,7 @@
             # If we get here, there are directory manifests in the changegroup
             d = chunkdata["filename"]
             repo.ui.debug("adding %s revisions\n" % d)
-            dirlog = repo.manifest.dirlog(d)
+            dirlog = repo.manifestlog._revlog.dirlog(d)
             if not dirlog.addgroup(self, revmap, trp):
                 raise error.Abort(_("received dir revlog group is empty"))
 
@@ -587,7 +590,7 @@
     def _packmanifests(self, dir, mfnodes, lookuplinknode):
         """Pack flat manifests into a changegroup stream."""
         assert not dir
-        for chunk in self.group(mfnodes, self._repo.manifest,
+        for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
                                 lookuplinknode, units=_('manifests')):
             yield chunk
 
@@ -676,7 +679,8 @@
     def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
                           fnodes):
         repo = self._repo
-        dirlog = repo.manifest.dirlog
+        mfl = repo.manifestlog
+        dirlog = mfl._revlog.dirlog
         tmfnodes = {'': mfs}
 
         # Callback for the manifest, used to collect linkrevs for filelog
@@ -704,7 +708,7 @@
                 treemanifests to send.
                 """
                 clnode = tmfnodes[dir][x]
-                mdata = dirlog(dir).readshallowfast(x)
+                mdata = mfl.get(dir, x).readfast(shallow=True)
                 for p, n, fl in mdata.iterentries():
                     if fl == 't': # subdirectory manifest
                         subdir = dir + p + '/'
@@ -779,7 +783,7 @@
         prefix = ''
         if revlog.iscensored(base) or revlog.iscensored(rev):
             try:
-                delta = revlog.revision(node)
+                delta = revlog.revision(node, raw=True)
             except error.CensoredNodeError as e:
                 delta = e.tombstone
             if base == nullrev:
@@ -788,7 +792,7 @@
                 baselen = revlog.rawsize(base)
                 prefix = mdiff.replacediffheader(baselen, len(delta))
         elif base == nullrev:
-            delta = revlog.revision(node)
+            delta = revlog.revision(node, raw=True)
             prefix = mdiff.trivialdiffheader(len(delta))
         else:
             delta = revlog.revdiff(base, rev)
@@ -850,8 +854,10 @@
     def _packmanifests(self, dir, mfnodes, lookuplinknode):
         if dir:
             yield self.fileheader(dir)
-        for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
-                                lookuplinknode, units=_('manifests')):
+
+        dirlog = self._repo.manifestlog._revlog.dirlog(dir)
+        for chunk in self.group(mfnodes, dirlog, lookuplinknode,
+                                units=_('manifests')):
             yield chunk
 
     def _manifestsdone(self):
@@ -868,24 +874,21 @@
              '03': (cg3packer, cg3unpacker),
 }
 
-def allsupportedversions(ui):
+def allsupportedversions(repo):
     versions = set(_packermap.keys())
-    versions.discard('03')
-    if (ui.configbool('experimental', 'changegroup3') or
-        ui.configbool('experimental', 'treemanifest')):
-        versions.add('03')
+    if not (repo.ui.configbool('experimental', 'changegroup3') or
+            repo.ui.configbool('experimental', 'treemanifest') or
+            'treemanifest' in repo.requirements):
+        versions.discard('03')
     return versions
 
 # Changegroup versions that can be applied to the repo
 def supportedincomingversions(repo):
-    versions = allsupportedversions(repo.ui)
-    if 'treemanifest' in repo.requirements:
-        versions.add('03')
-    return versions
+    return allsupportedversions(repo)
 
 # Changegroup versions that can be created from the repo
 def supportedoutgoingversions(repo):
-    versions = allsupportedversions(repo.ui)
+    versions = allsupportedversions(repo)
     if 'treemanifest' in repo.requirements:
         # Versions 01 and 02 support only flat manifests and it's just too
         # expensive to convert between the flat manifest and tree manifest on
@@ -894,7 +897,6 @@
         # support versions 01 and 02.
         versions.discard('01')
         versions.discard('02')
-        versions.add('03')
     return versions
 
 def safeversion(repo):
--- a/mercurial/changelog.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/changelog.py	Wed Jan 18 11:43:36 2017 -0500
@@ -79,9 +79,10 @@
         self.fp = fp
         self.offset = fp.tell()
         self.size = vfs.fstat(fp).st_size
+        self._end = self.size
 
     def end(self):
-        return self.size + len("".join(self.data))
+        return self._end
     def tell(self):
         return self.offset
     def flush(self):
@@ -121,6 +122,7 @@
     def write(self, s):
         self.data.append(str(s))
         self.offset += len(s)
+        self._end += len(s)
 
 def _divertopener(opener, target):
     """build an opener that writes in 'target.a' instead of 'target'"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/chgserver.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,588 @@
+# chgserver.py - command server extension for cHg
+#
+# Copyright 2011 Yuya Nishihara <yuya@tcha.org>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""command server extension for cHg
+
+'S' channel (read/write)
+    propagate ui.system() request to client
+
+'attachio' command
+    attach client's stdio passed by sendmsg()
+
+'chdir' command
+    change current directory
+
+'setenv' command
+    replace os.environ completely
+
+'setumask' command
+    set umask
+
+'validate' command
+    reload the config and check if the server is up to date
+
+Config
+------
+
+::
+
+  [chgserver]
+  idletimeout = 3600 # seconds, after which an idle server will exit
+  skiphash = False   # whether to skip config or env change checks
+"""
+
+from __future__ import absolute_import
+
+import errno
+import hashlib
+import inspect
+import os
+import re
+import struct
+import time
+
+from .i18n import _
+
+from . import (
+    commandserver,
+    encoding,
+    error,
+    extensions,
+    osutil,
+    pycompat,
+    util,
+)
+
+_log = commandserver.log
+
+def _hashlist(items):
+    """return sha1 hexdigest for a list"""
+    return hashlib.sha1(str(items)).hexdigest()
+
+# sensitive config sections affecting confighash
+_configsections = [
+    'alias',  # affects global state commands.table
+    'extdiff',  # uisetup will register new commands
+    'extensions',
+]
+
+# sensitive environment variables affecting confighash
+_envre = re.compile(r'''\A(?:
+                    CHGHG
+                    |HG(?:[A-Z].*)?
+                    |LANG(?:UAGE)?
+                    |LC_.*
+                    |LD_.*
+                    |PATH
+                    |PYTHON.*
+                    |TERM(?:INFO)?
+                    |TZ
+                    )\Z''', re.X)
+
+def _confighash(ui):
+    """return a quick hash for detecting config/env changes
+
+    confighash is the hash of sensitive config items and environment variables.
+
+    for chgserver, it is designed that once confighash changes, the server is
+    not qualified to serve its client and should redirect the client to a new
+    server. different from mtimehash, confighash change will not mark the
+    server outdated and exit since the user can have different configs at the
+    same time.
+    """
+    sectionitems = []
+    for section in _configsections:
+        sectionitems.append(ui.configitems(section))
+    sectionhash = _hashlist(sectionitems)
+    envitems = [(k, v) for k, v in encoding.environ.iteritems()
+                if _envre.match(k)]
+    envhash = _hashlist(sorted(envitems))
+    return sectionhash[:6] + envhash[:6]
+
+def _getmtimepaths(ui):
+    """get a list of paths that should be checked to detect change
+
+    The list will include:
+    - extensions (will not cover all files for complex extensions)
+    - mercurial/__version__.py
+    - python binary
+    """
+    modules = [m for n, m in extensions.extensions(ui)]
+    try:
+        from . import __version__
+        modules.append(__version__)
+    except ImportError:
+        pass
+    files = [pycompat.sysexecutable]
+    for m in modules:
+        try:
+            files.append(inspect.getabsfile(m))
+        except TypeError:
+            pass
+    return sorted(set(files))
+
+def _mtimehash(paths):
+    """return a quick hash for detecting file changes
+
+    mtimehash calls stat on given paths and calculate a hash based on size and
+    mtime of each file. mtimehash does not read file content because reading is
+    expensive. therefore it's not 100% reliable for detecting content changes.
+    it's possible to return different hashes for same file contents.
+    it's also possible to return a same hash for different file contents for
+    some carefully crafted situation.
+
+    for chgserver, it is designed that once mtimehash changes, the server is
+    considered outdated immediately and should no longer provide service.
+
+    mtimehash is not included in confighash because we only know the paths of
+    extensions after importing them (there is imp.find_module but that faces
+    race conditions). We need to calculate confighash without importing.
+    """
+    def trystat(path):
+        try:
+            st = os.stat(path)
+            return (st.st_mtime, st.st_size)
+        except OSError:
+            # could be ENOENT, EPERM etc. not fatal in any case
+            pass
+    return _hashlist(map(trystat, paths))[:12]
+
+class hashstate(object):
+    """a structure storing confighash, mtimehash, paths used for mtimehash"""
+    def __init__(self, confighash, mtimehash, mtimepaths):
+        self.confighash = confighash
+        self.mtimehash = mtimehash
+        self.mtimepaths = mtimepaths
+
+    @staticmethod
+    def fromui(ui, mtimepaths=None):
+        if mtimepaths is None:
+            mtimepaths = _getmtimepaths(ui)
+        confighash = _confighash(ui)
+        mtimehash = _mtimehash(mtimepaths)
+        _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
+        return hashstate(confighash, mtimehash, mtimepaths)
+
+def _newchgui(srcui, csystem, attachio):
+    class chgui(srcui.__class__):
+        def __init__(self, src=None):
+            super(chgui, self).__init__(src)
+            if src:
+                self._csystem = getattr(src, '_csystem', csystem)
+            else:
+                self._csystem = csystem
+
+        def system(self, cmd, environ=None, cwd=None, onerr=None,
+                   errprefix=None):
+            # fallback to the original system method if the output needs to be
+            # captured (to self._buffers), or the output stream is not stdout
+            # (e.g. stderr, cStringIO), because the chg client is not aware of
+            # these situations and will behave differently (write to stdout).
+            if (any(s[1] for s in self._bufferstates)
+                or not util.safehasattr(self.fout, 'fileno')
+                or self.fout.fileno() != util.stdout.fileno()):
+                return super(chgui, self).system(cmd, environ, cwd, onerr,
+                                                 errprefix)
+            self.flush()
+            rc = self._csystem(cmd, util.shellenviron(environ), cwd)
+            if rc and onerr:
+                errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
+                                    util.explainexit(rc)[0])
+                if errprefix:
+                    errmsg = '%s: %s' % (errprefix, errmsg)
+                raise onerr(errmsg)
+            return rc
+
+        def _runpager(self, cmd):
+            self._csystem(cmd, util.shellenviron(), type='pager',
+                          cmdtable={'attachio': attachio})
+
+    return chgui(srcui)
+
+def _loadnewui(srcui, args):
+    from . import dispatch  # avoid cycle
+
+    newui = srcui.__class__.load()
+    for a in ['fin', 'fout', 'ferr', 'environ']:
+        setattr(newui, a, getattr(srcui, a))
+    if util.safehasattr(srcui, '_csystem'):
+        newui._csystem = srcui._csystem
+
+    # command line args
+    args = args[:]
+    dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
+
+    # stolen from tortoisehg.util.copydynamicconfig()
+    for section, name, value in srcui.walkconfig():
+        source = srcui.configsource(section, name)
+        if ':' in source or source == '--config':
+            # path:line or command line
+            continue
+        newui.setconfig(section, name, value, source)
+
+    # load wd and repo config, copied from dispatch.py
+    cwds = dispatch._earlygetopt(['--cwd'], args)
+    cwd = cwds and os.path.realpath(cwds[-1]) or None
+    rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
+    path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
+
+    return (newui, newlui)
+
+class channeledsystem(object):
+    """Propagate ui.system() request in the following format:
+
+    payload length (unsigned int),
+    type, '\0',
+    cmd, '\0',
+    cwd, '\0',
+    envkey, '=', val, '\0',
+    ...
+    envkey, '=', val
+
+    if type == 'system', waits for:
+
+    exitcode length (unsigned int),
+    exitcode (int)
+
+    if type == 'pager', repetitively waits for a command name ending with '\n'
+    and executes it defined by cmdtable, or exits the loop if the command name
+    is empty.
+    """
+    def __init__(self, in_, out, channel):
+        self.in_ = in_
+        self.out = out
+        self.channel = channel
+
+    def __call__(self, cmd, environ, cwd=None, type='system', cmdtable=None):
+        args = [type, util.quotecommand(cmd), os.path.abspath(cwd or '.')]
+        args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
+        data = '\0'.join(args)
+        self.out.write(struct.pack('>cI', self.channel, len(data)))
+        self.out.write(data)
+        self.out.flush()
+
+        if type == 'system':
+            length = self.in_.read(4)
+            length, = struct.unpack('>I', length)
+            if length != 4:
+                raise error.Abort(_('invalid response'))
+            rc, = struct.unpack('>i', self.in_.read(4))
+            return rc
+        elif type == 'pager':
+            while True:
+                cmd = self.in_.readline()[:-1]
+                if not cmd:
+                    break
+                if cmdtable and cmd in cmdtable:
+                    _log('pager subcommand: %s' % cmd)
+                    cmdtable[cmd]()
+                else:
+                    raise error.Abort(_('unexpected command: %s') % cmd)
+        else:
+            raise error.ProgrammingError('invalid S channel type: %s' % type)
+
+_iochannels = [
+    # server.ch, ui.fp, mode
+    ('cin', 'fin', 'rb'),
+    ('cout', 'fout', 'wb'),
+    ('cerr', 'ferr', 'wb'),
+]
+
+class chgcmdserver(commandserver.server):
+    def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
+        super(chgcmdserver, self).__init__(
+            _newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
+            repo, fin, fout)
+        self.clientsock = sock
+        self._oldios = []  # original (self.ch, ui.fp, fd) before "attachio"
+        self.hashstate = hashstate
+        self.baseaddress = baseaddress
+        if hashstate is not None:
+            self.capabilities = self.capabilities.copy()
+            self.capabilities['validate'] = chgcmdserver.validate
+
+    def cleanup(self):
+        super(chgcmdserver, self).cleanup()
+        # dispatch._runcatch() does not flush outputs if exception is not
+        # handled by dispatch._dispatch()
+        self.ui.flush()
+        self._restoreio()
+
+    def attachio(self):
+        """Attach to client's stdio passed via unix domain socket; all
+        channels except cresult will no longer be used
+        """
+        # tell client to sendmsg() with 1-byte payload, which makes it
+        # distinctive from "attachio\n" command consumed by client.read()
+        self.clientsock.sendall(struct.pack('>cI', 'I', 1))
+        clientfds = osutil.recvfds(self.clientsock.fileno())
+        _log('received fds: %r\n' % clientfds)
+
+        ui = self.ui
+        ui.flush()
+        first = self._saveio()
+        for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
+            assert fd > 0
+            fp = getattr(ui, fn)
+            os.dup2(fd, fp.fileno())
+            os.close(fd)
+            if not first:
+                continue
+            # reset buffering mode when client is first attached. as we want
+            # to see output immediately on pager, the mode stays unchanged
+            # when client re-attached. ferr is unchanged because it should
+            # be unbuffered no matter if it is a tty or not.
+            if fn == 'ferr':
+                newfp = fp
+            else:
+                # make it line buffered explicitly because the default is
+                # decided on first write(), where fout could be a pager.
+                if fp.isatty():
+                    bufsize = 1  # line buffered
+                else:
+                    bufsize = -1  # system default
+                newfp = os.fdopen(fp.fileno(), mode, bufsize)
+                setattr(ui, fn, newfp)
+            setattr(self, cn, newfp)
+
+        self.cresult.write(struct.pack('>i', len(clientfds)))
+
+    def _saveio(self):
+        if self._oldios:
+            return False
+        ui = self.ui
+        for cn, fn, _mode in _iochannels:
+            ch = getattr(self, cn)
+            fp = getattr(ui, fn)
+            fd = os.dup(fp.fileno())
+            self._oldios.append((ch, fp, fd))
+        return True
+
+    def _restoreio(self):
+        ui = self.ui
+        for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
+            newfp = getattr(ui, fn)
+            # close newfp while it's associated with client; otherwise it
+            # would be closed when newfp is deleted
+            if newfp is not fp:
+                newfp.close()
+            # restore original fd: fp is open again
+            os.dup2(fd, fp.fileno())
+            os.close(fd)
+            setattr(self, cn, ch)
+            setattr(ui, fn, fp)
+        del self._oldios[:]
+
+    def validate(self):
+        """Reload the config and check if the server is up to date
+
+        Read a list of '\0' separated arguments.
+        Write a non-empty list of '\0' separated instruction strings or '\0'
+        if the list is empty.
+        An instruction string could be either:
+            - "unlink $path", the client should unlink the path to stop the
+              outdated server.
+            - "redirect $path", the client should attempt to connect to $path
+              first. If it does not work, start a new server. It implies
+              "reconnect".
+            - "exit $n", the client should exit directly with code n.
+              This may happen if we cannot parse the config.
+            - "reconnect", the client should close the connection and
+              reconnect.
+        If neither "reconnect" nor "redirect" is included in the instruction
+        list, the client can continue with this server after completing all
+        the instructions.
+        """
+        from . import dispatch  # avoid cycle
+
+        args = self._readlist()
+        try:
+            self.ui, lui = _loadnewui(self.ui, args)
+        except error.ParseError as inst:
+            dispatch._formatparse(self.ui.warn, inst)
+            self.ui.flush()
+            self.cresult.write('exit 255')
+            return
+        newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
+        insts = []
+        if newhash.mtimehash != self.hashstate.mtimehash:
+            addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
+            insts.append('unlink %s' % addr)
+            # mtimehash is empty if one or more extensions fail to load.
+            # to be compatible with hg, still serve the client this time.
+            if self.hashstate.mtimehash:
+                insts.append('reconnect')
+        if newhash.confighash != self.hashstate.confighash:
+            addr = _hashaddress(self.baseaddress, newhash.confighash)
+            insts.append('redirect %s' % addr)
+        _log('validate: %s\n' % insts)
+        self.cresult.write('\0'.join(insts) or '\0')
+
+    def chdir(self):
+        """Change current directory
+
+        Note that the behavior of --cwd option is bit different from this.
+        It does not affect --config parameter.
+        """
+        path = self._readstr()
+        if not path:
+            return
+        _log('chdir to %r\n' % path)
+        os.chdir(path)
+
+    def setumask(self):
+        """Change umask"""
+        mask = struct.unpack('>I', self._read(4))[0]
+        _log('setumask %r\n' % mask)
+        os.umask(mask)
+
+    def runcommand(self):
+        return super(chgcmdserver, self).runcommand()
+
+    def setenv(self):
+        """Clear and update os.environ
+
+        Note that not all variables can make an effect on the running process.
+        """
+        l = self._readlist()
+        try:
+            newenv = dict(s.split('=', 1) for s in l)
+        except ValueError:
+            raise ValueError('unexpected value in setenv request')
+        _log('setenv: %r\n' % sorted(newenv.keys()))
+        encoding.environ.clear()
+        encoding.environ.update(newenv)
+
+    capabilities = commandserver.server.capabilities.copy()
+    capabilities.update({'attachio': attachio,
+                         'chdir': chdir,
+                         'runcommand': runcommand,
+                         'setenv': setenv,
+                         'setumask': setumask})
+
+    if util.safehasattr(osutil, 'setprocname'):
+        def setprocname(self):
+            """Change process title"""
+            name = self._readstr()
+            _log('setprocname: %r\n' % name)
+            osutil.setprocname(name)
+        capabilities['setprocname'] = setprocname
+
+def _tempaddress(address):
+    return '%s.%d.tmp' % (address, os.getpid())
+
+def _hashaddress(address, hashstr):
+    # if the basename of address contains '.', use only the left part. this
+    # makes it possible for the client to pass 'server.tmp$PID' and follow by
+    # an atomic rename to avoid locking when spawning new servers.
+    dirname, basename = os.path.split(address)
+    basename = basename.split('.', 1)[0]
+    return '%s-%s' % (os.path.join(dirname, basename), hashstr)
+
+class chgunixservicehandler(object):
+    """Set of operations for chg services"""
+
+    pollinterval = 1  # [sec]
+
+    def __init__(self, ui):
+        self.ui = ui
+        self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600)
+        self._lastactive = time.time()
+
+    def bindsocket(self, sock, address):
+        self._inithashstate(address)
+        self._checkextensions()
+        self._bind(sock)
+        self._createsymlink()
+
+    def _inithashstate(self, address):
+        self._baseaddress = address
+        if self.ui.configbool('chgserver', 'skiphash', False):
+            self._hashstate = None
+            self._realaddress = address
+            return
+        self._hashstate = hashstate.fromui(self.ui)
+        self._realaddress = _hashaddress(address, self._hashstate.confighash)
+
+    def _checkextensions(self):
+        if not self._hashstate:
+            return
+        if extensions.notloaded():
+            # one or more extensions failed to load. mtimehash becomes
+            # meaningless because we do not know the paths of those extensions.
+            # set mtimehash to an illegal hash value to invalidate the server.
+            self._hashstate.mtimehash = ''
+
+    def _bind(self, sock):
+        # use a unique temp address so we can stat the file and do ownership
+        # check later
+        tempaddress = _tempaddress(self._realaddress)
+        util.bindunixsocket(sock, tempaddress)
+        self._socketstat = os.stat(tempaddress)
+        # rename will replace the old socket file if exists atomically. the
+        # old server will detect ownership change and exit.
+        util.rename(tempaddress, self._realaddress)
+
+    def _createsymlink(self):
+        if self._baseaddress == self._realaddress:
+            return
+        tempaddress = _tempaddress(self._baseaddress)
+        os.symlink(os.path.basename(self._realaddress), tempaddress)
+        util.rename(tempaddress, self._baseaddress)
+
+    def _issocketowner(self):
+        try:
+            stat = os.stat(self._realaddress)
+            return (stat.st_ino == self._socketstat.st_ino and
+                    stat.st_mtime == self._socketstat.st_mtime)
+        except OSError:
+            return False
+
+    def unlinksocket(self, address):
+        if not self._issocketowner():
+            return
+        # it is possible to have a race condition here that we may
+        # remove another server's socket file. but that's okay
+        # since that server will detect and exit automatically and
+        # the client will start a new server on demand.
+        try:
+            os.unlink(self._realaddress)
+        except OSError as exc:
+            if exc.errno != errno.ENOENT:
+                raise
+
+    def printbanner(self, address):
+        # no "listening at" message should be printed to simulate hg behavior
+        pass
+
+    def shouldexit(self):
+        if not self._issocketowner():
+            self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
+            return True
+        if time.time() - self._lastactive > self._idletimeout:
+            self.ui.debug('being idle too long. exiting.\n')
+            return True
+        return False
+
+    def newconnection(self):
+        self._lastactive = time.time()
+
+    def createcmdserver(self, repo, conn, fin, fout):
+        return chgcmdserver(self.ui, repo, fin, fout, conn,
+                            self._hashstate, self._baseaddress)
+
+def chgunixservice(ui, repo, opts):
+    # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
+    # start another chg. drop it to avoid possible side effects.
+    if 'CHGINTERNALMARK' in encoding.environ:
+        del encoding.environ['CHGINTERNALMARK']
+
+    if repo:
+        # one chgserver can serve multiple repos. drop repo information
+        ui.setconfig('bundle', 'mainreporoot', '', 'repo')
+    h = chgunixservicehandler(ui)
+    return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
--- a/mercurial/cmdutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/cmdutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -10,7 +10,6 @@
 import errno
 import os
 import re
-import sys
 import tempfile
 
 from .i18n import _
@@ -27,16 +26,19 @@
     changelog,
     copies,
     crecord as crecordmod,
+    dirstateguard as dirstateguardmod,
     encoding,
     error,
     formatter,
     graphmod,
     lock as lockmod,
     match as matchmod,
+    mergeutil,
     obsolete,
     patch,
     pathutil,
     phases,
+    pycompat,
     repair,
     revlog,
     revset,
@@ -47,6 +49,10 @@
 )
 stringio = util.stringio
 
+# special string such that everything below this line will be ingored in the
+# editor text
+_linebelow = "^HG: ------------------------ >8 ------------------------$"
+
 def ishunk(x):
     hunkclasses = (crecordmod.uihunk, patch.recordhunk)
     return isinstance(x, hunkclasses)
@@ -83,7 +89,7 @@
         else:
             recordfn = crecordmod.chunkselector
 
-        return crecordmod.filterpatch(ui, originalhunks, recordfn)
+        return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
 
     else:
         return patch.filterpatch(ui, originalhunks, operation)
@@ -225,7 +231,8 @@
                              + crecordmod.patchhelptext
                              + fp.read())
                 reviewedpatch = ui.edit(patchtext, "",
-                                        extra={"suffix": ".diff"})
+                                        extra={"suffix": ".diff"},
+                                        tmpdir=repo.path)
                 fp.truncate(0)
                 fp.write(reviewedpatch)
                 fp.seek(0)
@@ -349,15 +356,23 @@
 
     return p
 
-def bailifchanged(repo, merge=True):
+def bailifchanged(repo, merge=True, hint=None):
+    """ enforce the precondition that working directory must be clean.
+
+    'merge' can be set to false if a pending uncommitted merge should be
+    ignored (such as when 'update --check' runs).
+
+    'hint' is the usual hint given to Abort exception.
+    """
+
     if merge and repo.dirstate.p2() != nullid:
-        raise error.Abort(_('outstanding uncommitted merge'))
+        raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
     modified, added, removed, deleted = repo.status()[:4]
     if modified or added or removed or deleted:
-        raise error.Abort(_('uncommitted changes'))
+        raise error.Abort(_('uncommitted changes'), hint=hint)
     ctx = repo[None]
     for s in sorted(ctx.substate):
-        ctx.sub(s).bailifchanged()
+        ctx.sub(s).bailifchanged(hint=hint)
 
 def logmessage(ui, opts):
     """ get the log message according to -m and -l option """
@@ -555,11 +570,11 @@
             if 'treemanifest' not in repo.requirements:
                 raise error.Abort(_("--dir can only be used on repos with "
                                    "treemanifest enabled"))
-            dirlog = repo.manifest.dirlog(dir)
+            dirlog = repo.manifestlog._revlog.dirlog(dir)
             if len(dirlog):
                 r = dirlog
         elif mf:
-            r = repo.manifest
+            r = repo.manifestlog._revlog
         elif file_:
             filelog = repo.file(file_)
             if len(filelog):
@@ -569,7 +584,7 @@
             raise error.CommandError(cmd, _('invalid arguments'))
         if not os.path.isfile(file_):
             raise error.Abort(_("revlog '%s' not found") % file_)
-        r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
+        r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False),
                           file_[:-2] + ".i")
     return r
 
@@ -729,7 +744,7 @@
             else:
                 striplen = len(abspfx)
             if striplen:
-                striplen += len(os.sep)
+                striplen += len(pycompat.ossep)
             res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
         elif destdirexists:
             res = lambda p: os.path.join(dest,
@@ -763,12 +778,12 @@
                 abspfx = util.localpath(abspfx)
                 striplen = len(abspfx)
                 if striplen:
-                    striplen += len(os.sep)
+                    striplen += len(pycompat.ossep)
                 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
                     score = evalpath(striplen)
                     striplen1 = len(os.path.split(abspfx)[0])
                     if striplen1:
-                        striplen1 += len(os.sep)
+                        striplen1 += len(pycompat.ossep)
                     if evalpath(striplen1) > score:
                         striplen = striplen1
                 res = lambda p: os.path.join(dest,
@@ -819,93 +834,6 @@
 
     return errors != 0
 
-def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
-    runargs=None, appendpid=False):
-    '''Run a command as a service.'''
-
-    def writepid(pid):
-        if opts['pid_file']:
-            if appendpid:
-                mode = 'a'
-            else:
-                mode = 'w'
-            fp = open(opts['pid_file'], mode)
-            fp.write(str(pid) + '\n')
-            fp.close()
-
-    if opts['daemon'] and not opts['daemon_postexec']:
-        # Signal child process startup with file removal
-        lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
-        os.close(lockfd)
-        try:
-            if not runargs:
-                runargs = util.hgcmd() + sys.argv[1:]
-            runargs.append('--daemon-postexec=unlink:%s' % lockpath)
-            # Don't pass --cwd to the child process, because we've already
-            # changed directory.
-            for i in xrange(1, len(runargs)):
-                if runargs[i].startswith('--cwd='):
-                    del runargs[i]
-                    break
-                elif runargs[i].startswith('--cwd'):
-                    del runargs[i:i + 2]
-                    break
-            def condfn():
-                return not os.path.exists(lockpath)
-            pid = util.rundetached(runargs, condfn)
-            if pid < 0:
-                raise error.Abort(_('child process failed to start'))
-            writepid(pid)
-        finally:
-            try:
-                os.unlink(lockpath)
-            except OSError as e:
-                if e.errno != errno.ENOENT:
-                    raise
-        if parentfn:
-            return parentfn(pid)
-        else:
-            return
-
-    if initfn:
-        initfn()
-
-    if not opts['daemon']:
-        writepid(util.getpid())
-
-    if opts['daemon_postexec']:
-        try:
-            os.setsid()
-        except AttributeError:
-            pass
-        for inst in opts['daemon_postexec']:
-            if inst.startswith('unlink:'):
-                lockpath = inst[7:]
-                os.unlink(lockpath)
-            elif inst.startswith('chdir:'):
-                os.chdir(inst[6:])
-            elif inst != 'none':
-                raise error.Abort(_('invalid value for --daemon-postexec: %s')
-                                  % inst)
-        util.hidewindow()
-        sys.stdout.flush()
-        sys.stderr.flush()
-
-        nullfd = os.open(os.devnull, os.O_RDWR)
-        logfilefd = nullfd
-        if logfile:
-            logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
-        os.dup2(nullfd, 0)
-        os.dup2(logfilefd, 1)
-        os.dup2(logfilefd, 2)
-        if nullfd not in (0, 1, 2):
-            os.close(nullfd)
-        if logfile and logfilefd not in (0, 1, 2):
-            os.close(logfilefd)
-
-    if runfn:
-        return runfn()
-
 ## facility to let extension process additional data into an import patch
 # list of identifier to be executed in order
 extrapreimport = []  # run before commit
@@ -1202,8 +1130,7 @@
         chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
                             prefix=prefix, relroot=relroot)
         for chunk, label in patch.diffstatui(util.iterlines(chunks),
-                                             width=width,
-                                             git=diffopts.git):
+                                             width=width):
             write(chunk, label=label)
     else:
         for chunk, label in patch.diffui(repo, node1, node2, match,
@@ -1228,6 +1155,14 @@
             sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
                      stat=stat, fp=fp, prefix=prefix)
 
+def _changesetlabels(ctx):
+    labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
+    if ctx.troubled():
+        labels.append('changeset.troubled')
+        for trouble in ctx.troubles():
+            labels.append('trouble.%s' % trouble)
+    return ' '.join(labels)
+
 class changeset_printer(object):
     '''show changeset information when templating not requested.'''
 
@@ -1288,7 +1223,7 @@
 
         # i18n: column positioning for "hg log"
         self.ui.write(_("changeset:   %d:%s\n") % revnode,
-                      label='log.changeset changeset.%s' % ctx.phasestr())
+                      label=_changesetlabels(ctx))
 
         # branches are shown first before any other names due to backwards
         # compatibility
@@ -1324,7 +1259,8 @@
             mnode = ctx.manifestnode()
             # i18n: column positioning for "hg log"
             self.ui.write(_("manifest:    %d:%s\n") %
-                          (self.repo.manifest.rev(mnode), hex(mnode)),
+                          (self.repo.manifestlog._revlog.rev(mnode),
+                           hex(mnode)),
                           label='ui.debug log.manifest')
         # i18n: column positioning for "hg log"
         self.ui.write(_("user:        %s\n") % ctx.user(),
@@ -1333,6 +1269,11 @@
         self.ui.write(_("date:        %s\n") % date,
                       label='log.date')
 
+        if ctx.troubled():
+            # i18n: column positioning for "hg log"
+            self.ui.write(_("trouble:     %s\n") % ', '.join(ctx.troubles()),
+                          label='log.trouble')
+
         if self.ui.debugflag:
             files = ctx.p1().status(ctx)[:3]
             for key, value in zip([# i18n: column positioning for "hg log"
@@ -1508,6 +1449,7 @@
             'parent': '{rev}:{node|formatnode} ',
             'manifest': '{rev}:{node|formatnode}',
             'file_copy': '{name} ({source})',
+            'envvar': '{key}={value}',
             'extra': '{key}={value|stringescape}'
             }
         # filecopy is preserved for compatibility reasons
@@ -2566,11 +2508,14 @@
     # for performance to avoid the cost of parsing the manifest.
     if len(matcher.files()) == 1 and not matcher.anypats():
         file = matcher.files()[0]
-        mf = repo.manifest
+        mfl = repo.manifestlog
         mfnode = ctx.manifestnode()
-        if mfnode and mf.find(mfnode, file)[0]:
-            write(file)
-            return 0
+        try:
+            if mfnode and mfl[mfnode].find(file)[0]:
+                write(file)
+                return 0
+        except KeyError:
+            pass
 
     for abs in ctx.walk(matcher):
         write(abs)
@@ -2827,7 +2772,7 @@
         committext = buildcommittext(repo, ctx, subs, extramsg)
 
     # run editor in the repository root
-    olddir = os.getcwd()
+    olddir = pycompat.getcwd()
     os.chdir(repo.root)
 
     # make in-memory changes visible to external process
@@ -2836,8 +2781,17 @@
     pending = tr and tr.writepending() and repo.root
 
     editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
-                        editform=editform, pending=pending)
-    text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
+                              editform=editform, pending=pending,
+                              tmpdir=repo.path)
+    text = editortext
+
+    # strip away anything below this special string (used for editors that want
+    # to display the diff)
+    stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
+    if stripbelow:
+        text = text[:stripbelow.start()]
+
+    text = re.sub("(?m)^HG:.*(\n|$)", "", text)
     os.chdir(olddir)
 
     if finishdesc:
@@ -3249,13 +3203,18 @@
         fc = ctx[f]
         repo.wwrite(f, fc.data(), fc.flags())
 
+    def doremove(f):
+        try:
+            util.unlinkpath(repo.wjoin(f))
+        except OSError:
+            pass
+        repo.dirstate.remove(f)
+
     audit_path = pathutil.pathauditor(repo.root)
     for f in actions['forget'][0]:
         if interactive:
-            choice = \
-                repo.ui.promptchoice(
-                    _("forget added file %s (yn)?$$ &Yes $$ &No")
-                    % f)
+            choice = repo.ui.promptchoice(
+                _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
             if choice == 0:
                 repo.dirstate.drop(f)
             else:
@@ -3264,11 +3223,15 @@
             repo.dirstate.drop(f)
     for f in actions['remove'][0]:
         audit_path(f)
-        try:
-            util.unlinkpath(repo.wjoin(f))
-        except OSError:
-            pass
-        repo.dirstate.remove(f)
+        if interactive:
+            choice = repo.ui.promptchoice(
+                _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
+            if choice == 0:
+                doremove(f)
+            else:
+                excluded_files.append(repo.wjoin(f))
+        else:
+            doremove(f)
     for f in actions['drop'][0]:
         audit_path(f)
         repo.dirstate.remove(f)
@@ -3403,6 +3366,11 @@
 
     return cmd
 
+def checkunresolved(ms):
+    ms._repo.ui.deprecwarn('checkunresolved moved from cmdutil to mergeutil',
+                           '4.1')
+    return mergeutil.checkunresolved(ms)
+
 # a list of (ui, repo, otherpeer, opts, missing) functions called by
 # commands.outgoing.  "missing" is "missing" of the result of
 # "findcommonoutgoing()"
@@ -3463,7 +3431,7 @@
     '''Check for an unfinished operation and return the command to finish
     it.
 
-    afterresolvedstates tupples define a .hg/{file} and the corresponding
+    afterresolvedstates tuples define a .hg/{file} and the corresponding
     command needed to finish it.
 
     Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
@@ -3510,57 +3478,9 @@
         hint = after[0]
     raise error.Abort(_('no %s in progress') % task, hint=hint)
 
-class dirstateguard(object):
-    '''Restore dirstate at unexpected failure.
-
-    At the construction, this class does:
-
-    - write current ``repo.dirstate`` out, and
-    - save ``.hg/dirstate`` into the backup file
-
-    This restores ``.hg/dirstate`` from backup file, if ``release()``
-    is invoked before ``close()``.
-
-    This just removes the backup file at ``close()`` before ``release()``.
-    '''
-
+class dirstateguard(dirstateguardmod.dirstateguard):
     def __init__(self, repo, name):
-        self._repo = repo
-        self._active = False
-        self._closed = False
-        self._suffix = '.backup.%s.%d' % (name, id(self))
-        repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
-        self._active = True
-
-    def __del__(self):
-        if self._active: # still active
-            # this may occur, even if this class is used correctly:
-            # for example, releasing other resources like transaction
-            # may raise exception before ``dirstateguard.release`` in
-            # ``release(tr, ....)``.
-            self._abort()
-
-    def close(self):
-        if not self._active: # already inactivated
-            msg = (_("can't close already inactivated backup: dirstate%s")
-                   % self._suffix)
-            raise error.Abort(msg)
-
-        self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
-                                         self._suffix)
-        self._active = False
-        self._closed = True
-
-    def _abort(self):
-        self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
-                                           self._suffix)
-        self._active = False
-
-    def release(self):
-        if not self._closed:
-            if not self._active: # already inactivated
-                msg = (_("can't release already inactivated backup:"
-                         " dirstate%s")
-                       % self._suffix)
-                raise error.Abort(msg)
-            self._abort()
+        dirstateguardmod.dirstateguard.__init__(self, repo, name)
+        repo.ui.deprecwarn(
+            'dirstateguard has moved from cmdutil to dirstateguard',
+            '4.1')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/color.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,65 @@
+# utility for color output for Mercurial commands
+#
+# Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com> and other
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+_styles = {'grep.match': 'red bold',
+           'grep.linenumber': 'green',
+           'grep.rev': 'green',
+           'grep.change': 'green',
+           'grep.sep': 'cyan',
+           'grep.filename': 'magenta',
+           'grep.user': 'magenta',
+           'grep.date': 'magenta',
+           'bookmarks.active': 'green',
+           'branches.active': 'none',
+           'branches.closed': 'black bold',
+           'branches.current': 'green',
+           'branches.inactive': 'none',
+           'diff.changed': 'white',
+           'diff.deleted': 'red',
+           'diff.diffline': 'bold',
+           'diff.extended': 'cyan bold',
+           'diff.file_a': 'red bold',
+           'diff.file_b': 'green bold',
+           'diff.hunk': 'magenta',
+           'diff.inserted': 'green',
+           'diff.tab': '',
+           'diff.trailingwhitespace': 'bold red_background',
+           'changeset.public' : '',
+           'changeset.draft' : '',
+           'changeset.secret' : '',
+           'diffstat.deleted': 'red',
+           'diffstat.inserted': 'green',
+           'histedit.remaining': 'red bold',
+           'ui.prompt': 'yellow',
+           'log.changeset': 'yellow',
+           'patchbomb.finalsummary': '',
+           'patchbomb.from': 'magenta',
+           'patchbomb.to': 'cyan',
+           'patchbomb.subject': 'green',
+           'patchbomb.diffstats': '',
+           'rebase.rebased': 'blue',
+           'rebase.remaining': 'red bold',
+           'resolve.resolved': 'green bold',
+           'resolve.unresolved': 'red bold',
+           'shelve.age': 'cyan',
+           'shelve.newest': 'green bold',
+           'shelve.name': 'blue bold',
+           'status.added': 'green bold',
+           'status.clean': 'none',
+           'status.copied': 'none',
+           'status.deleted': 'cyan bold underline',
+           'status.ignored': 'black bold',
+           'status.modified': 'blue bold',
+           'status.removed': 'red bold',
+           'status.unknown': 'magenta bold underline',
+           'tags.normal': 'green',
+           'tags.local': 'black bold'}
+
+def loadcolortable(ui, extname, colortable):
+    _styles.update(colortable)
--- a/mercurial/commands.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/commands.py	Wed Jan 18 11:43:36 2017 -0500
@@ -9,12 +9,10 @@
 
 import difflib
 import errno
-import operator
 import os
-import random
 import re
-import shlex
 import socket
+import string
 import sys
 import tempfile
 import time
@@ -34,25 +32,19 @@
     bundle2,
     changegroup,
     cmdutil,
-    commandserver,
-    context,
     copies,
-    dagparser,
-    dagutil,
     destutil,
+    dirstateguard,
     discovery,
     encoding,
     error,
     exchange,
     extensions,
-    fileset,
     formatter,
     graphmod,
     hbisect,
     help,
     hg,
-    hgweb,
-    localrepo,
     lock as lockmod,
     merge as mergemod,
     minirst,
@@ -61,18 +53,17 @@
     phases,
     policy,
     pvec,
+    pycompat,
     repair,
     revlog,
     revset,
     scmutil,
-    setdiscovery,
-    simplemerge,
+    server,
     sshserver,
     sslutil,
     streamclone,
     templatekw,
     templater,
-    treediscovery,
     ui as uimod,
     util,
 )
@@ -686,7 +677,7 @@
     bheads = repo.branchheads(branch)
     rctx = scmutil.revsingle(repo, hex(parent))
     if not opts.get('merge') and op1 != node:
-        dsguard = cmdutil.dirstateguard(repo, 'backout')
+        dsguard = dirstateguard.dirstateguard(repo, 'backout')
         try:
             ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
                          'backout')
@@ -832,7 +823,7 @@
 
           hg log --graph -r "bisect(range)"
 
-      See :hg:`help revsets` for more about the `bisect()` keyword.
+      See :hg:`help revisions.bisect` for more about the `bisect()` predicate.
 
     Returns 0 on success.
     """
@@ -1392,7 +1383,17 @@
         assert cgversion == '02'
         bversion = 'HG20'
 
-    bundle2.writebundle(ui, cg, fname, bversion, compression=bcompression)
+    # TODO compression options should be derived from bundlespec parsing.
+    # This is a temporary hack to allow adjusting bundle compression
+    # level without a) formalizing the bundlespec changes to declare it
+    # b) introducing a command flag.
+    compopts = {}
+    complevel = ui.configint('experimental', 'bundlecomplevel')
+    if complevel is not None:
+        compopts['level'] = complevel
+
+    bundle2.writebundle(ui, cg, fname, bversion, compression=bcompression,
+                        compopts=compopts)
 
 @command('cat',
     [('o', 'output', '',
@@ -1812,29 +1813,28 @@
             raise error.Abort(_('only one config item permitted'))
     matched = False
     for section, name, value in ui.walkconfig(untrusted=untrusted):
+        source = ui.configsource(section, name, untrusted)
         value = str(value)
         if fm.isplain():
+            source = source or 'none'
             value = value.replace('\n', '\\n')
         entryname = section + '.' + name
         if values:
             for v in values:
                 if v == section:
                     fm.startitem()
-                    fm.condwrite(ui.debugflag, 'source', '%s: ',
-                                 ui.configsource(section, name, untrusted))
+                    fm.condwrite(ui.debugflag, 'source', '%s: ', source)
                     fm.write('name value', '%s=%s\n', entryname, value)
                     matched = True
                 elif v == entryname:
                     fm.startitem()
-                    fm.condwrite(ui.debugflag, 'source', '%s: ',
-                                 ui.configsource(section, name, untrusted))
+                    fm.condwrite(ui.debugflag, 'source', '%s: ', source)
                     fm.write('value', '%s\n', value)
                     fm.data(name=entryname)
                     matched = True
         else:
             fm.startitem()
-            fm.condwrite(ui.debugflag, 'source', '%s: ',
-                         ui.configsource(section, name, untrusted))
+            fm.condwrite(ui.debugflag, 'source', '%s: ', source)
             fm.write('name value', '%s=%s\n', entryname, value)
             matched = True
     fm.end()
@@ -1866,806 +1866,6 @@
     with repo.wlock(False):
         return cmdutil.copy(ui, repo, pats, opts)
 
-@command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
-def debugancestor(ui, repo, *args):
-    """find the ancestor revision of two revisions in a given index"""
-    if len(args) == 3:
-        index, rev1, rev2 = args
-        r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
-        lookup = r.lookup
-    elif len(args) == 2:
-        if not repo:
-            raise error.Abort(_("there is no Mercurial repository here "
-                               "(.hg not found)"))
-        rev1, rev2 = args
-        r = repo.changelog
-        lookup = repo.lookup
-    else:
-        raise error.Abort(_('either two or three arguments required'))
-    a = r.ancestor(lookup(rev1), lookup(rev2))
-    ui.write("%d:%s\n" % (r.rev(a), hex(a)))
-
-@command('debugbuilddag',
-    [('m', 'mergeable-file', None, _('add single file mergeable changes')),
-    ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
-    ('n', 'new-file', None, _('add new file at each rev'))],
-    _('[OPTION]... [TEXT]'))
-def debugbuilddag(ui, repo, text=None,
-                  mergeable_file=False,
-                  overwritten_file=False,
-                  new_file=False):
-    """builds a repo with a given DAG from scratch in the current empty repo
-
-    The description of the DAG is read from stdin if not given on the
-    command line.
-
-    Elements:
-
-     - "+n" is a linear run of n nodes based on the current default parent
-     - "." is a single node based on the current default parent
-     - "$" resets the default parent to null (implied at the start);
-           otherwise the default parent is always the last node created
-     - "<p" sets the default parent to the backref p
-     - "*p" is a fork at parent p, which is a backref
-     - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
-     - "/p2" is a merge of the preceding node and p2
-     - ":tag" defines a local tag for the preceding node
-     - "@branch" sets the named branch for subsequent nodes
-     - "#...\\n" is a comment up to the end of the line
-
-    Whitespace between the above elements is ignored.
-
-    A backref is either
-
-     - a number n, which references the node curr-n, where curr is the current
-       node, or
-     - the name of a local tag you placed earlier using ":tag", or
-     - empty to denote the default parent.
-
-    All string valued-elements are either strictly alphanumeric, or must
-    be enclosed in double quotes ("..."), with "\\" as escape character.
-    """
-
-    if text is None:
-        ui.status(_("reading DAG from stdin\n"))
-        text = ui.fin.read()
-
-    cl = repo.changelog
-    if len(cl) > 0:
-        raise error.Abort(_('repository is not empty'))
-
-    # determine number of revs in DAG
-    total = 0
-    for type, data in dagparser.parsedag(text):
-        if type == 'n':
-            total += 1
-
-    if mergeable_file:
-        linesperrev = 2
-        # make a file with k lines per rev
-        initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
-        initialmergedlines.append("")
-
-    tags = []
-
-    wlock = lock = tr = None
-    try:
-        wlock = repo.wlock()
-        lock = repo.lock()
-        tr = repo.transaction("builddag")
-
-        at = -1
-        atbranch = 'default'
-        nodeids = []
-        id = 0
-        ui.progress(_('building'), id, unit=_('revisions'), total=total)
-        for type, data in dagparser.parsedag(text):
-            if type == 'n':
-                ui.note(('node %s\n' % str(data)))
-                id, ps = data
-
-                files = []
-                fctxs = {}
-
-                p2 = None
-                if mergeable_file:
-                    fn = "mf"
-                    p1 = repo[ps[0]]
-                    if len(ps) > 1:
-                        p2 = repo[ps[1]]
-                        pa = p1.ancestor(p2)
-                        base, local, other = [x[fn].data() for x in (pa, p1,
-                                                                     p2)]
-                        m3 = simplemerge.Merge3Text(base, local, other)
-                        ml = [l.strip() for l in m3.merge_lines()]
-                        ml.append("")
-                    elif at > 0:
-                        ml = p1[fn].data().split("\n")
-                    else:
-                        ml = initialmergedlines
-                    ml[id * linesperrev] += " r%i" % id
-                    mergedtext = "\n".join(ml)
-                    files.append(fn)
-                    fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
-
-                if overwritten_file:
-                    fn = "of"
-                    files.append(fn)
-                    fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
-
-                if new_file:
-                    fn = "nf%i" % id
-                    files.append(fn)
-                    fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
-                    if len(ps) > 1:
-                        if not p2:
-                            p2 = repo[ps[1]]
-                        for fn in p2:
-                            if fn.startswith("nf"):
-                                files.append(fn)
-                                fctxs[fn] = p2[fn]
-
-                def fctxfn(repo, cx, path):
-                    return fctxs.get(path)
-
-                if len(ps) == 0 or ps[0] < 0:
-                    pars = [None, None]
-                elif len(ps) == 1:
-                    pars = [nodeids[ps[0]], None]
-                else:
-                    pars = [nodeids[p] for p in ps]
-                cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
-                                    date=(id, 0),
-                                    user="debugbuilddag",
-                                    extra={'branch': atbranch})
-                nodeid = repo.commitctx(cx)
-                nodeids.append(nodeid)
-                at = id
-            elif type == 'l':
-                id, name = data
-                ui.note(('tag %s\n' % name))
-                tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
-            elif type == 'a':
-                ui.note(('branch %s\n' % data))
-                atbranch = data
-            ui.progress(_('building'), id, unit=_('revisions'), total=total)
-        tr.close()
-
-        if tags:
-            repo.vfs.write("localtags", "".join(tags))
-    finally:
-        ui.progress(_('building'), None)
-        release(tr, lock, wlock)
-
-@command('debugbundle',
-        [('a', 'all', None, _('show all details')),
-         ('', 'spec', None, _('print the bundlespec of the bundle'))],
-        _('FILE'),
-        norepo=True)
-def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
-    """lists the contents of a bundle"""
-    with hg.openpath(ui, bundlepath) as f:
-        if spec:
-            spec = exchange.getbundlespec(ui, f)
-            ui.write('%s\n' % spec)
-            return
-
-        gen = exchange.readbundle(ui, f, bundlepath)
-        if isinstance(gen, bundle2.unbundle20):
-            return _debugbundle2(ui, gen, all=all, **opts)
-        _debugchangegroup(ui, gen, all=all, **opts)
-
-def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
-    indent_string = ' ' * indent
-    if all:
-        ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
-                 % indent_string)
-
-        def showchunks(named):
-            ui.write("\n%s%s\n" % (indent_string, named))
-            chain = None
-            for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
-                node = chunkdata['node']
-                p1 = chunkdata['p1']
-                p2 = chunkdata['p2']
-                cs = chunkdata['cs']
-                deltabase = chunkdata['deltabase']
-                delta = chunkdata['delta']
-                ui.write("%s%s %s %s %s %s %s\n" %
-                         (indent_string, hex(node), hex(p1), hex(p2),
-                          hex(cs), hex(deltabase), len(delta)))
-                chain = node
-
-        chunkdata = gen.changelogheader()
-        showchunks("changelog")
-        chunkdata = gen.manifestheader()
-        showchunks("manifest")
-        for chunkdata in iter(gen.filelogheader, {}):
-            fname = chunkdata['filename']
-            showchunks(fname)
-    else:
-        if isinstance(gen, bundle2.unbundle20):
-            raise error.Abort(_('use debugbundle2 for this file'))
-        chunkdata = gen.changelogheader()
-        chain = None
-        for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
-            node = chunkdata['node']
-            ui.write("%s%s\n" % (indent_string, hex(node)))
-            chain = node
-
-def _debugbundle2(ui, gen, all=None, **opts):
-    """lists the contents of a bundle2"""
-    if not isinstance(gen, bundle2.unbundle20):
-        raise error.Abort(_('not a bundle2 file'))
-    ui.write(('Stream params: %s\n' % repr(gen.params)))
-    for part in gen.iterparts():
-        ui.write('%s -- %r\n' % (part.type, repr(part.params)))
-        if part.type == 'changegroup':
-            version = part.params.get('version', '01')
-            cg = changegroup.getunbundler(version, part, 'UN')
-            _debugchangegroup(ui, cg, all=all, indent=4, **opts)
-
-@command('debugcreatestreamclonebundle', [], 'FILE')
-def debugcreatestreamclonebundle(ui, repo, fname):
-    """create a stream clone bundle file
-
-    Stream bundles are special bundles that are essentially archives of
-    revlog files. They are commonly used for cloning very quickly.
-    """
-    requirements, gen = streamclone.generatebundlev1(repo)
-    changegroup.writechunks(ui, gen, fname)
-
-    ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
-
-@command('debugapplystreamclonebundle', [], 'FILE')
-def debugapplystreamclonebundle(ui, repo, fname):
-    """apply a stream clone bundle file"""
-    f = hg.openpath(ui, fname)
-    gen = exchange.readbundle(ui, f, fname)
-    gen.apply(repo)
-
-@command('debugcheckstate', [], '')
-def debugcheckstate(ui, repo):
-    """validate the correctness of the current dirstate"""
-    parent1, parent2 = repo.dirstate.parents()
-    m1 = repo[parent1].manifest()
-    m2 = repo[parent2].manifest()
-    errors = 0
-    for f in repo.dirstate:
-        state = repo.dirstate[f]
-        if state in "nr" and f not in m1:
-            ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
-            errors += 1
-        if state in "a" and f in m1:
-            ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
-            errors += 1
-        if state in "m" and f not in m1 and f not in m2:
-            ui.warn(_("%s in state %s, but not in either manifest\n") %
-                    (f, state))
-            errors += 1
-    for f in m1:
-        state = repo.dirstate[f]
-        if state not in "nrm":
-            ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
-            errors += 1
-    if errors:
-        error = _(".hg/dirstate inconsistent with current parent's manifest")
-        raise error.Abort(error)
-
-@command('debugcommands', [], _('[COMMAND]'), norepo=True)
-def debugcommands(ui, cmd='', *args):
-    """list all available commands and options"""
-    for cmd, vals in sorted(table.iteritems()):
-        cmd = cmd.split('|')[0].strip('^')
-        opts = ', '.join([i[1] for i in vals[1]])
-        ui.write('%s: %s\n' % (cmd, opts))
-
-@command('debugcomplete',
-    [('o', 'options', None, _('show the command options'))],
-    _('[-o] CMD'),
-    norepo=True)
-def debugcomplete(ui, cmd='', **opts):
-    """returns the completion list associated with the given command"""
-
-    if opts.get('options'):
-        options = []
-        otables = [globalopts]
-        if cmd:
-            aliases, entry = cmdutil.findcmd(cmd, table, False)
-            otables.append(entry[1])
-        for t in otables:
-            for o in t:
-                if "(DEPRECATED)" in o[3]:
-                    continue
-                if o[0]:
-                    options.append('-%s' % o[0])
-                options.append('--%s' % o[1])
-        ui.write("%s\n" % "\n".join(options))
-        return
-
-    cmdlist, unused_allcmds = cmdutil.findpossible(cmd, table)
-    if ui.verbose:
-        cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
-    ui.write("%s\n" % "\n".join(sorted(cmdlist)))
-
-@command('debugdag',
-    [('t', 'tags', None, _('use tags as labels')),
-    ('b', 'branches', None, _('annotate with branch names')),
-    ('', 'dots', None, _('use dots for runs')),
-    ('s', 'spaces', None, _('separate elements by spaces'))],
-    _('[OPTION]... [FILE [REV]...]'),
-    optionalrepo=True)
-def debugdag(ui, repo, file_=None, *revs, **opts):
-    """format the changelog or an index DAG as a concise textual description
-
-    If you pass a revlog index, the revlog's DAG is emitted. If you list
-    revision numbers, they get labeled in the output as rN.
-
-    Otherwise, the changelog DAG of the current repo is emitted.
-    """
-    spaces = opts.get('spaces')
-    dots = opts.get('dots')
-    if file_:
-        rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
-        revs = set((int(r) for r in revs))
-        def events():
-            for r in rlog:
-                yield 'n', (r, list(p for p in rlog.parentrevs(r)
-                                        if p != -1))
-                if r in revs:
-                    yield 'l', (r, "r%i" % r)
-    elif repo:
-        cl = repo.changelog
-        tags = opts.get('tags')
-        branches = opts.get('branches')
-        if tags:
-            labels = {}
-            for l, n in repo.tags().items():
-                labels.setdefault(cl.rev(n), []).append(l)
-        def events():
-            b = "default"
-            for r in cl:
-                if branches:
-                    newb = cl.read(cl.node(r))[5]['branch']
-                    if newb != b:
-                        yield 'a', newb
-                        b = newb
-                yield 'n', (r, list(p for p in cl.parentrevs(r)
-                                        if p != -1))
-                if tags:
-                    ls = labels.get(r)
-                    if ls:
-                        for l in ls:
-                            yield 'l', (r, l)
-    else:
-        raise error.Abort(_('need repo for changelog dag'))
-
-    for line in dagparser.dagtextlines(events(),
-                                       addspaces=spaces,
-                                       wraplabels=True,
-                                       wrapannotations=True,
-                                       wrapnonlinear=dots,
-                                       usedots=dots,
-                                       maxlinewidth=70):
-        ui.write(line)
-        ui.write("\n")
-
-@command('debugdata', debugrevlogopts, _('-c|-m|FILE REV'))
-def debugdata(ui, repo, file_, rev=None, **opts):
-    """dump the contents of a data file revision"""
-    if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
-        if rev is not None:
-            raise error.CommandError('debugdata', _('invalid arguments'))
-        file_, rev = None, file_
-    elif rev is None:
-        raise error.CommandError('debugdata', _('invalid arguments'))
-    r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
-    try:
-        ui.write(r.revision(r.lookup(rev)))
-    except KeyError:
-        raise error.Abort(_('invalid revision identifier %s') % rev)
-
-@command('debugdate',
-    [('e', 'extended', None, _('try extended date formats'))],
-    _('[-e] DATE [RANGE]'),
-    norepo=True, optionalrepo=True)
-def debugdate(ui, date, range=None, **opts):
-    """parse and display a date"""
-    if opts["extended"]:
-        d = util.parsedate(date, util.extendeddateformats)
-    else:
-        d = util.parsedate(date)
-    ui.write(("internal: %s %s\n") % d)
-    ui.write(("standard: %s\n") % util.datestr(d))
-    if range:
-        m = util.matchdate(range)
-        ui.write(("match: %s\n") % m(d[0]))
-
-@command('debugdiscovery',
-    [('', 'old', None, _('use old-style discovery')),
-    ('', 'nonheads', None,
-     _('use old-style discovery with non-heads included')),
-    ] + remoteopts,
-    _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
-def debugdiscovery(ui, repo, remoteurl="default", **opts):
-    """runs the changeset discovery protocol in isolation"""
-    remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
-                                      opts.get('branch'))
-    remote = hg.peer(repo, opts, remoteurl)
-    ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
-
-    # make sure tests are repeatable
-    random.seed(12323)
-
-    def doit(localheads, remoteheads, remote=remote):
-        if opts.get('old'):
-            if localheads:
-                raise error.Abort('cannot use localheads with old style '
-                                 'discovery')
-            if not util.safehasattr(remote, 'branches'):
-                # enable in-client legacy support
-                remote = localrepo.locallegacypeer(remote.local())
-            common, _in, hds = treediscovery.findcommonincoming(repo, remote,
-                                                                force=True)
-            common = set(common)
-            if not opts.get('nonheads'):
-                ui.write(("unpruned common: %s\n") %
-                         " ".join(sorted(short(n) for n in common)))
-                dag = dagutil.revlogdag(repo.changelog)
-                all = dag.ancestorset(dag.internalizeall(common))
-                common = dag.externalizeall(dag.headsetofconnecteds(all))
-        else:
-            common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
-        common = set(common)
-        rheads = set(hds)
-        lheads = set(repo.heads())
-        ui.write(("common heads: %s\n") %
-                 " ".join(sorted(short(n) for n in common)))
-        if lheads <= common:
-            ui.write(("local is subset\n"))
-        elif rheads <= common:
-            ui.write(("remote is subset\n"))
-
-    serverlogs = opts.get('serverlog')
-    if serverlogs:
-        for filename in serverlogs:
-            with open(filename, 'r') as logfile:
-                line = logfile.readline()
-                while line:
-                    parts = line.strip().split(';')
-                    op = parts[1]
-                    if op == 'cg':
-                        pass
-                    elif op == 'cgss':
-                        doit(parts[2].split(' '), parts[3].split(' '))
-                    elif op == 'unb':
-                        doit(parts[3].split(' '), parts[2].split(' '))
-                    line = logfile.readline()
-    else:
-        remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
-                                                 opts.get('remote_head'))
-        localrevs = opts.get('local_head')
-        doit(localrevs, remoterevs)
-
-@command('debugextensions', formatteropts, [], norepo=True)
-def debugextensions(ui, **opts):
-    '''show information about active extensions'''
-    exts = extensions.extensions(ui)
-    hgver = util.version()
-    fm = ui.formatter('debugextensions', opts)
-    for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
-        isinternal = extensions.ismoduleinternal(extmod)
-        extsource = extmod.__file__
-        if isinternal:
-            exttestedwith = []  # never expose magic string to users
-        else:
-            exttestedwith = getattr(extmod, 'testedwith', '').split()
-        extbuglink = getattr(extmod, 'buglink', None)
-
-        fm.startitem()
-
-        if ui.quiet or ui.verbose:
-            fm.write('name', '%s\n', extname)
-        else:
-            fm.write('name', '%s', extname)
-            if isinternal or hgver in exttestedwith:
-                fm.plain('\n')
-            elif not exttestedwith:
-                fm.plain(_(' (untested!)\n'))
-            else:
-                lasttestedversion = exttestedwith[-1]
-                fm.plain(' (%s!)\n' % lasttestedversion)
-
-        fm.condwrite(ui.verbose and extsource, 'source',
-                 _('  location: %s\n'), extsource or "")
-
-        if ui.verbose:
-            fm.plain(_('  bundled: %s\n') % ['no', 'yes'][isinternal])
-        fm.data(bundled=isinternal)
-
-        fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
-                     _('  tested with: %s\n'),
-                     fm.formatlist(exttestedwith, name='ver'))
-
-        fm.condwrite(ui.verbose and extbuglink, 'buglink',
-                 _('  bug reporting: %s\n'), extbuglink or "")
-
-    fm.end()
-
-@command('debugfileset',
-    [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
-    _('[-r REV] FILESPEC'))
-def debugfileset(ui, repo, expr, **opts):
-    '''parse and apply a fileset specification'''
-    ctx = scmutil.revsingle(repo, opts.get('rev'), None)
-    if ui.verbose:
-        tree = fileset.parse(expr)
-        ui.note(fileset.prettyformat(tree), "\n")
-
-    for f in ctx.getfileset(expr):
-        ui.write("%s\n" % f)
-
-@command('debugfsinfo', [], _('[PATH]'), norepo=True)
-def debugfsinfo(ui, path="."):
-    """show information detected about current filesystem"""
-    util.writefile('.debugfsinfo', '')
-    ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
-    ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
-    ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
-    ui.write(('case-sensitive: %s\n') % (util.fscasesensitive('.debugfsinfo')
-                                and 'yes' or 'no'))
-    os.unlink('.debugfsinfo')
-
-@command('debuggetbundle',
-    [('H', 'head', [], _('id of head node'), _('ID')),
-    ('C', 'common', [], _('id of common node'), _('ID')),
-    ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
-    _('REPO FILE [-H|-C ID]...'),
-    norepo=True)
-def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
-    """retrieves a bundle from a repo
-
-    Every ID must be a full-length hex node id string. Saves the bundle to the
-    given file.
-    """
-    repo = hg.peer(ui, opts, repopath)
-    if not repo.capable('getbundle'):
-        raise error.Abort("getbundle() not supported by target repository")
-    args = {}
-    if common:
-        args['common'] = [bin(s) for s in common]
-    if head:
-        args['heads'] = [bin(s) for s in head]
-    # TODO: get desired bundlecaps from command line.
-    args['bundlecaps'] = None
-    bundle = repo.getbundle('debug', **args)
-
-    bundletype = opts.get('type', 'bzip2').lower()
-    btypes = {'none': 'HG10UN',
-              'bzip2': 'HG10BZ',
-              'gzip': 'HG10GZ',
-              'bundle2': 'HG20'}
-    bundletype = btypes.get(bundletype)
-    if bundletype not in bundle2.bundletypes:
-        raise error.Abort(_('unknown bundle type specified with --type'))
-    bundle2.writebundle(ui, bundle, bundlepath, bundletype)
-
-@command('debugignore', [], '[FILE]')
-def debugignore(ui, repo, *files, **opts):
-    """display the combined ignore pattern and information about ignored files
-
-    With no argument display the combined ignore pattern.
-
-    Given space separated file names, shows if the given file is ignored and
-    if so, show the ignore rule (file and line number) that matched it.
-    """
-    ignore = repo.dirstate._ignore
-    if not files:
-        # Show all the patterns
-        includepat = getattr(ignore, 'includepat', None)
-        if includepat is not None:
-            ui.write("%s\n" % includepat)
-        else:
-            raise error.Abort(_("no ignore patterns found"))
-    else:
-        for f in files:
-            nf = util.normpath(f)
-            ignored = None
-            ignoredata = None
-            if nf != '.':
-                if ignore(nf):
-                    ignored = nf
-                    ignoredata = repo.dirstate._ignorefileandline(nf)
-                else:
-                    for p in util.finddirs(nf):
-                        if ignore(p):
-                            ignored = p
-                            ignoredata = repo.dirstate._ignorefileandline(p)
-                            break
-            if ignored:
-                if ignored == nf:
-                    ui.write(_("%s is ignored\n") % f)
-                else:
-                    ui.write(_("%s is ignored because of "
-                               "containing folder %s\n")
-                             % (f, ignored))
-                ignorefile, lineno, line = ignoredata
-                ui.write(_("(ignore rule in %s, line %d: '%s')\n")
-                         % (ignorefile, lineno, line))
-            else:
-                ui.write(_("%s is not ignored\n") % f)
-
-@command('debugindex', debugrevlogopts +
-    [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
-    _('[-f FORMAT] -c|-m|FILE'),
-    optionalrepo=True)
-def debugindex(ui, repo, file_=None, **opts):
-    """dump the contents of an index file"""
-    r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
-    format = opts.get('format', 0)
-    if format not in (0, 1):
-        raise error.Abort(_("unknown format %d") % format)
-
-    generaldelta = r.version & revlog.REVLOGGENERALDELTA
-    if generaldelta:
-        basehdr = ' delta'
-    else:
-        basehdr = '  base'
-
-    if ui.debugflag:
-        shortfn = hex
-    else:
-        shortfn = short
-
-    # There might not be anything in r, so have a sane default
-    idlen = 12
-    for i in r:
-        idlen = len(shortfn(r.node(i)))
-        break
-
-    if format == 0:
-        ui.write(("   rev    offset  length " + basehdr + " linkrev"
-                 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
-    elif format == 1:
-        ui.write(("   rev flag   offset   length"
-                 "     size " + basehdr + "   link     p1     p2"
-                 " %s\n") % "nodeid".rjust(idlen))
-
-    for i in r:
-        node = r.node(i)
-        if generaldelta:
-            base = r.deltaparent(i)
-        else:
-            base = r.chainbase(i)
-        if format == 0:
-            try:
-                pp = r.parents(node)
-            except Exception:
-                pp = [nullid, nullid]
-            ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
-                    i, r.start(i), r.length(i), base, r.linkrev(i),
-                    shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
-        elif format == 1:
-            pr = r.parentrevs(i)
-            ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
-                    i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
-                    base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
-
-@command('debugindexdot', debugrevlogopts,
-    _('-c|-m|FILE'), optionalrepo=True)
-def debugindexdot(ui, repo, file_=None, **opts):
-    """dump an index DAG as a graphviz dot file"""
-    r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
-    ui.write(("digraph G {\n"))
-    for i in r:
-        node = r.node(i)
-        pp = r.parents(node)
-        ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
-        if pp[1] != nullid:
-            ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
-    ui.write("}\n")
-
-@command('debugdeltachain',
-    debugrevlogopts + formatteropts,
-    _('-c|-m|FILE'),
-    optionalrepo=True)
-def debugdeltachain(ui, repo, file_=None, **opts):
-    """dump information about delta chains in a revlog
-
-    Output can be templatized. Available template keywords are:
-
-    :``rev``:       revision number
-    :``chainid``:   delta chain identifier (numbered by unique base)
-    :``chainlen``:  delta chain length to this revision
-    :``prevrev``:   previous revision in delta chain
-    :``deltatype``: role of delta / how it was computed
-    :``compsize``:  compressed size of revision
-    :``uncompsize``: uncompressed size of revision
-    :``chainsize``: total size of compressed revisions in chain
-    :``chainratio``: total chain size divided by uncompressed revision size
-                    (new delta chains typically start at ratio 2.00)
-    :``lindist``:   linear distance from base revision in delta chain to end
-                    of this revision
-    :``extradist``: total size of revisions not part of this delta chain from
-                    base of delta chain to end of this revision; a measurement
-                    of how much extra data we need to read/seek across to read
-                    the delta chain for this revision
-    :``extraratio``: extradist divided by chainsize; another representation of
-                    how much unrelated data is needed to load this delta chain
-    """
-    r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
-    index = r.index
-    generaldelta = r.version & revlog.REVLOGGENERALDELTA
-
-    def revinfo(rev):
-        e = index[rev]
-        compsize = e[1]
-        uncompsize = e[2]
-        chainsize = 0
-
-        if generaldelta:
-            if e[3] == e[5]:
-                deltatype = 'p1'
-            elif e[3] == e[6]:
-                deltatype = 'p2'
-            elif e[3] == rev - 1:
-                deltatype = 'prev'
-            elif e[3] == rev:
-                deltatype = 'base'
-            else:
-                deltatype = 'other'
-        else:
-            if e[3] == rev:
-                deltatype = 'base'
-            else:
-                deltatype = 'prev'
-
-        chain = r._deltachain(rev)[0]
-        for iterrev in chain:
-            e = index[iterrev]
-            chainsize += e[1]
-
-        return compsize, uncompsize, deltatype, chain, chainsize
-
-    fm = ui.formatter('debugdeltachain', opts)
-
-    fm.plain('    rev  chain# chainlen     prev   delta       '
-             'size    rawsize  chainsize     ratio   lindist extradist '
-             'extraratio\n')
-
-    chainbases = {}
-    for rev in r:
-        comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
-        chainbase = chain[0]
-        chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
-        basestart = r.start(chainbase)
-        revstart = r.start(rev)
-        lineardist = revstart + comp - basestart
-        extradist = lineardist - chainsize
-        try:
-            prevrev = chain[-2]
-        except IndexError:
-            prevrev = -1
-
-        chainratio = float(chainsize) / float(uncomp)
-        extraratio = float(extradist) / float(chainsize)
-
-        fm.startitem()
-        fm.write('rev chainid chainlen prevrev deltatype compsize '
-                 'uncompsize chainsize chainratio lindist extradist '
-                 'extraratio',
-                 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
-                 rev, chainid, len(chain), prevrev, deltatype, comp,
-                 uncomp, chainsize, chainratio, lineardist, extradist,
-                 extraratio,
-                 rev=rev, chainid=chainid, chainlen=len(chain),
-                 prevrev=prevrev, deltatype=deltatype, compsize=comp,
-                 uncompsize=uncomp, chainsize=chainsize,
-                 chainratio=chainratio, lindist=lineardist,
-                 extradist=extradist, extraratio=extraratio)
-
-    fm.end()
-
 @command('debuginstall', [] + formatteropts, '', norepo=True)
 def debuginstall(ui, **opts):
     '''test Mercurial installation
@@ -2698,9 +1898,9 @@
 
     # Python
     fm.write('pythonexe', _("checking Python executable (%s)\n"),
-             sys.executable)
+             pycompat.sysexecutable)
     fm.write('pythonver', _("checking Python version (%s)\n"),
-             ("%s.%s.%s" % sys.version_info[:3]))
+             ("%d.%d.%d" % sys.version_info[:3]))
     fm.write('pythonlib', _("checking Python lib (%s)...\n"),
              os.path.dirname(os.__file__))
 
@@ -2750,6 +1950,22 @@
         problems += 1
     fm.condwrite(err, 'extensionserror', " %s\n", err)
 
+    compengines = util.compengines._engines.values()
+    fm.write('compengines', _('checking registered compression engines (%s)\n'),
+             fm.formatlist(sorted(e.name() for e in compengines),
+                           name='compengine', fmt='%s', sep=', '))
+    fm.write('compenginesavail', _('checking available compression engines '
+                                   '(%s)\n'),
+             fm.formatlist(sorted(e.name() for e in compengines
+                                  if e.available()),
+                           name='compengine', fmt='%s', sep=', '))
+    wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
+    fm.write('compenginesserver', _('checking available compression engines '
+                                    'for wire protocol (%s)\n'),
+             fm.formatlist([e.name() for e in wirecompengines
+                            if e.wireprotosupport()],
+                           name='compengine', fmt='%s', sep=', '))
+
     # templates
     p = templater.templatepaths()
     fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
@@ -2780,7 +1996,7 @@
     editor = ui.geteditor()
     editor = util.expandpath(editor)
     fm.write('editor', _("checking commit editor... (%s)\n"), editor)
-    cmdpath = util.findexe(shlex.split(editor)[0])
+    cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
     fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
                  _(" No commit editor set and can't find %s in PATH\n"
                    " (specify a commit editor in your configuration"
@@ -3067,7 +2283,7 @@
 
         with repo.lock():
             n = repair.deleteobsmarkers(repo.obsstore, indices)
-            ui.write(_('deleted %i obsolescense markers\n') % n)
+            ui.write(_('deleted %i obsolescence markers\n') % n)
 
         return
 
@@ -3153,16 +2369,16 @@
 
     def complete(path, acceptable):
         dirstate = repo.dirstate
-        spec = os.path.normpath(os.path.join(os.getcwd(), path))
-        rootdir = repo.root + os.sep
+        spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
+        rootdir = repo.root + pycompat.ossep
         if spec != repo.root and not spec.startswith(rootdir):
             return [], []
         if os.path.isdir(spec):
             spec += '/'
         spec = spec[len(rootdir):]
-        fixpaths = os.sep != '/'
+        fixpaths = pycompat.ossep != '/'
         if fixpaths:
-            spec = spec.replace(os.sep, '/')
+            spec = spec.replace(pycompat.ossep, '/')
         speclen = len(spec)
         fullpaths = opts['full']
         files, dirs = set(), set()
@@ -3170,11 +2386,11 @@
         for f, st in dirstate.iteritems():
             if f.startswith(spec) and st[0] in acceptable:
                 if fixpaths:
-                    f = f.replace('/', os.sep)
+                    f = f.replace('/', pycompat.ossep)
                 if fullpaths:
                     addfile(f)
                     continue
-                s = f.find(os.sep, speclen)
+                s = f.find(pycompat.ossep, speclen)
                 if s >= 0:
                     adddir(f[:s])
                 else:
@@ -3366,6 +2582,8 @@
     datasize = [None, 0, 0]
     fullsize = [None, 0, 0]
     deltasize = [None, 0, 0]
+    chunktypecounts = {}
+    chunktypesizes = {}
 
     def addsize(size, l):
         if l[0] is None or size < l[0]:
@@ -3403,6 +2621,20 @@
             elif delta != nullrev:
                 numother += 1
 
+        # Obtain data on the raw chunks in the revlog.
+        chunk = r._chunkraw(rev, rev)[1]
+        if chunk:
+            chunktype = chunk[0]
+        else:
+            chunktype = 'empty'
+
+        if chunktype not in chunktypecounts:
+            chunktypecounts[chunktype] = 0
+            chunktypesizes[chunktype] = 0
+
+        chunktypecounts[chunktype] += 1
+        chunktypesizes[chunktype] += size
+
     # Adjust size min value for empty cases
     for size in (datasize, fullsize, deltasize):
         if size[0] is None:
@@ -3454,6 +2686,24 @@
     ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
     ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
 
+    def fmtchunktype(chunktype):
+        if chunktype == 'empty':
+            return '    %s     : ' % chunktype
+        elif chunktype in string.ascii_letters:
+            return '    0x%s (%s)  : ' % (hex(chunktype), chunktype)
+        else:
+            return '    0x%s      : ' % hex(chunktype)
+
+    ui.write('\n')
+    ui.write(('chunks        : ') + fmt2 % numrevs)
+    for chunktype in sorted(chunktypecounts):
+        ui.write(fmtchunktype(chunktype))
+        ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
+    ui.write(('chunks size   : ') + fmt2 % totalsize)
+    for chunktype in sorted(chunktypecounts):
+        ui.write(fmtchunktype(chunktype))
+        ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
+
     ui.write('\n')
     fmt = dfmtstr(max(avgchainlen, compratio))
     ui.write(('avg chain length  : ') + fmt % avgchainlen)
@@ -3755,7 +3005,7 @@
     if not items:
         return
     f = lambda fn: fn
-    if ui.configbool('ui', 'slash') and os.sep != '/':
+    if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
         f = lambda fn: util.normpath(fn)
     fmt = 'f  %%-%ds  %%-%ds  %%s' % (
         max([len(abs) for abs in items]),
@@ -4120,8 +3370,7 @@
 
           hg log -r "sort(all(), date)"
 
-    See :hg:`help revisions` and :hg:`help revsets` for more about
-    specifying revisions.
+    See :hg:`help revisions` for more about specifying revisions.
 
     Returns 0 on successful completion.
     '''
@@ -4655,15 +3904,15 @@
 
     keep = opts.get('system') or []
     if len(keep) == 0:
-        if sys.platform.startswith('win'):
+        if pycompat.sysplatform.startswith('win'):
             keep.append('windows')
-        elif sys.platform == 'OpenVMS':
+        elif pycompat.sysplatform == 'OpenVMS':
             keep.append('vms')
-        elif sys.platform == 'plan9':
+        elif pycompat.sysplatform == 'plan9':
             keep.append('plan9')
         else:
             keep.append('unix')
-            keep.append(sys.platform.lower())
+            keep.append(pycompat.sysplatform.lower())
     if ui.verbose:
         keep.append('verbose')
 
@@ -5011,7 +4260,7 @@
             lock = repo.lock()
             tr = repo.transaction('import')
         else:
-            dsguard = cmdutil.dirstateguard(repo, 'import')
+            dsguard = dirstateguard.dirstateguard(repo, 'import')
         parents = repo[None].parents()
         for patchurl in patches:
             if patchurl == '-':
@@ -5330,8 +4579,8 @@
 
     See :hg:`help dates` for a list of formats valid for -d/--date.
 
-    See :hg:`help revisions` and :hg:`help revsets` for more about
-    specifying and ordering revisions.
+    See :hg:`help revisions` for more about specifying and ordering
+    revisions.
 
     See :hg:`help templates` for more about pre-packaged styles and
     specifying custom templates.
@@ -6574,11 +5823,8 @@
         s = sshserver.sshserver(ui, repo)
         s.serve_forever()
 
-    if opts["cmdserver"]:
-        service = commandserver.createservice(ui, repo, opts)
-    else:
-        service = hgweb.createservice(ui, repo, opts)
-    return cmdutil.service(opts, initfn=service.init, runfn=service.run)
+    service = server.createservice(ui, repo, opts)
+    return server.runservice(opts, initfn=service.init, runfn=service.run)
 
 @command('^status|st',
     [('A', 'all', None, _('show status of all files')),
@@ -6751,7 +5997,7 @@
         # shows a working directory parent *changeset*:
         # i18n: column positioning for "hg summary"
         ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
-                 label='log.changeset changeset.%s' % p.phasestr())
+                 label=cmdutil._changesetlabels(p))
         ui.write(' '.join(p.tags()), label='log.tag')
         if p.bookmarks():
             marks.extend(p.bookmarks())
@@ -6760,6 +6006,11 @@
                 ui.write(_(' (empty repository)'))
             else:
                 ui.write(_(' (no revision checked out)'))
+        if p.troubled():
+            ui.write(' ('
+                     + ', '.join(ui.label(trouble, 'trouble.%s' % trouble)
+                                 for trouble in p.troubles())
+                     + ')')
         ui.write('\n')
         if p.description():
             ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
@@ -7230,10 +6481,10 @@
     changeset (see :hg:`help parents`).
 
     If the changeset is not a descendant or ancestor of the working
-    directory's parent, the update is aborted. With the -c/--check
-    option, the working directory is checked for uncommitted changes; if
-    none are found, the working directory is updated to the specified
-    changeset.
+    directory's parent and there are uncommitted changes, the update is
+    aborted. With the -c/--check option, the working directory is checked
+    for uncommitted changes; if none are found, the working directory is
+    updated to the specified changeset.
 
     .. container:: verbose
 
--- a/mercurial/commandserver.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/commandserver.py	Wed Jan 18 11:43:36 2017 -0500
@@ -15,13 +15,13 @@
 import signal
 import socket
 import struct
-import sys
 import traceback
 
 from .i18n import _
 from . import (
     encoding,
     error,
+    pycompat,
     util,
 )
 
@@ -54,8 +54,8 @@
     def write(self, data):
         if not data:
             return
-        self.out.write(struct.pack('>cI', self.channel, len(data)))
-        self.out.write(data)
+        # single write() to guarantee the same atomicity as the underlying file
+        self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
         self.out.flush()
 
     def __getattr__(self, attr):
@@ -153,7 +153,7 @@
     based stream to fout.
     """
     def __init__(self, ui, repo, fin, fout):
-        self.cwd = os.getcwd()
+        self.cwd = pycompat.getcwd()
 
         # developer config: cmdserver.log
         logpath = ui.config("cmdserver", "log", None)
@@ -304,8 +304,8 @@
     ui.flush()
     newfiles = []
     nullfd = os.open(os.devnull, os.O_RDWR)
-    for f, sysf, mode in [(ui.fin, sys.stdin, 'rb'),
-                          (ui.fout, sys.stdout, 'wb')]:
+    for f, sysf, mode in [(ui.fin, util.stdin, 'rb'),
+                          (ui.fout, util.stdout, 'wb')]:
         if f is sysf:
             newfd = os.dup(f.fileno())
             os.dup2(nullfd, f.fileno())
@@ -530,15 +530,3 @@
             _serverequest(self.ui, self.repo, conn, h.createcmdserver)
         finally:
             gc.collect()  # trigger __del__ since worker process uses os._exit
-
-_servicemap = {
-    'pipe': pipeservice,
-    'unix': unixforkingservice,
-    }
-
-def createservice(ui, repo, opts):
-    mode = opts['cmdserver']
-    try:
-        return _servicemap[mode](ui, repo, opts)
-    except KeyError:
-        raise error.Abort(_('unknown mode %s') % mode)
--- a/mercurial/config.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/config.py	Wed Jan 18 11:43:36 2017 -0500
@@ -90,13 +90,13 @@
             self._source.pop((section, item), None)
 
     def parse(self, src, data, sections=None, remap=None, include=None):
-        sectionre = util.re.compile(r'\[([^\[]+)\]')
-        itemre = util.re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
-        contre = util.re.compile(r'\s+(\S|\S.*\S)\s*$')
-        emptyre = util.re.compile(r'(;|#|\s*$)')
-        commentre = util.re.compile(r'(;|#)')
-        unsetre = util.re.compile(r'%unset\s+(\S+)')
-        includere = util.re.compile(r'%include\s+(\S|\S.*\S)\s*$')
+        sectionre = util.re.compile(br'\[([^\[]+)\]')
+        itemre = util.re.compile(br'([^=\s][^=]*?)\s*=\s*(.*\S|)')
+        contre = util.re.compile(br'\s+(\S|\S.*\S)\s*$')
+        emptyre = util.re.compile(br'(;|#|\s*$)')
+        commentre = util.re.compile(br'(;|#)')
+        unsetre = util.re.compile(br'%unset\s+(\S+)')
+        includere = util.re.compile(br'%include\s+(\S|\S.*\S)\s*$')
         section = ""
         item = None
         line = 0
--- a/mercurial/context.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/context.py	Wed Jan 18 11:43:36 2017 -0500
@@ -14,8 +14,11 @@
 
 from .i18n import _
 from .node import (
+    addednodeid,
     bin,
     hex,
+    modifiednodeid,
+    newnodeid,
     nullid,
     nullrev,
     short,
@@ -39,11 +42,6 @@
 
 propertycache = util.propertycache
 
-# Phony node value to stand-in for new files in some uses of
-# manifests. Manifests support 21-byte hashes for nodes which are
-# dirty in the working copy.
-_newnode = '!' * 21
-
 nonascii = re.compile(r'[^\x21-\x7f]').search
 
 class basectx(object):
@@ -142,7 +140,7 @@
                 removed.append(fn)
             elif flag1 != flag2:
                 modified.append(fn)
-            elif node2 != _newnode:
+            elif node2 != newnodeid:
                 # When comparing files between two commits, we save time by
                 # not comparing the file contents when the nodeids differ.
                 # Note that this means we incorrectly report a reverted change
@@ -178,6 +176,8 @@
         return hex(self.node())
     def manifest(self):
         return self._manifest
+    def manifestctx(self):
+        return self._manifestctx
     def repo(self):
         return self._repo
     def phasestr(self):
@@ -259,8 +259,10 @@
             if path in self._manifestdelta:
                 return (self._manifestdelta[path],
                         self._manifestdelta.flags(path))
-        node, flag = self._repo.manifest.find(self._changeset.manifest, path)
-        if not node:
+        mfl = self._repo.manifestlog
+        try:
+            node, flag = mfl[self._changeset.manifest].find(path)
+        except KeyError:
             raise error.ManifestLookupError(self._node, path,
                                             _('not found in manifest'))
 
@@ -528,12 +530,15 @@
 
     @propertycache
     def _manifest(self):
-        return self._repo.manifestlog[self._changeset.manifest].read()
+        return self._manifestctx.read()
+
+    @propertycache
+    def _manifestctx(self):
+        return self._repo.manifestlog[self._changeset.manifest]
 
     @propertycache
     def _manifestdelta(self):
-        mfnode = self._changeset.manifest
-        return self._repo.manifestlog[mfnode].readdelta()
+        return self._manifestctx.readdelta()
 
     @propertycache
     def _parents(self):
@@ -680,8 +685,7 @@
         elif '_descendantrev' in self.__dict__:
             # this file context was created from a revision with a known
             # descendant, we can (lazily) correct for linkrev aliases
-            return self._adjustlinkrev(self._path, self._filelog,
-                                       self._filenode, self._descendantrev)
+            return self._adjustlinkrev(self._descendantrev)
         else:
             return self._filelog.linkrev(self._filerev)
 
@@ -709,7 +713,10 @@
             return False
 
     def __str__(self):
-        return "%s@%s" % (self.path(), self._changectx)
+        try:
+            return "%s@%s" % (self.path(), self._changectx)
+        except error.LookupError:
+            return "%s@???" % self.path()
 
     def __repr__(self):
         return "<%s %s>" % (type(self).__name__, str(self))
@@ -808,17 +815,13 @@
 
         return True
 
-    def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
+    def _adjustlinkrev(self, srcrev, inclusive=False):
         """return the first ancestor of <srcrev> introducing <fnode>
 
         If the linkrev of the file revision does not point to an ancestor of
         srcrev, we'll walk down the ancestors until we find one introducing
         this file revision.
 
-        :repo: a localrepository object (used to access changelog and manifest)
-        :path: the file path
-        :fnode: the nodeid of the file revision
-        :filelog: the filelog of this path
         :srcrev: the changeset revision we search ancestors from
         :inclusive: if true, the src revision will also be checked
         """
@@ -826,8 +829,7 @@
         cl = repo.unfiltered().changelog
         mfl = repo.manifestlog
         # fetch the linkrev
-        fr = filelog.rev(fnode)
-        lkr = filelog.linkrev(fr)
+        lkr = self.linkrev()
         # hack to reuse ancestor computation when searching for renames
         memberanc = getattr(self, '_ancestrycontext', None)
         iteranc = None
@@ -844,6 +846,8 @@
         if lkr not in memberanc:
             if iteranc is None:
                 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
+            fnode = self._filenode
+            path = self._path
             for a in iteranc:
                 ac = cl.read(a) # get changeset data (we avoid object creation)
                 if path in ac[3]: # checking the 'files' field.
@@ -871,8 +875,7 @@
         noctx = not ('_changeid' in attrs or '_changectx' in attrs)
         if noctx or self.rev() == lkr:
             return self.linkrev()
-        return self._adjustlinkrev(self._path, self._filelog, self._filenode,
-                                   self.rev(), inclusive=True)
+        return self._adjustlinkrev(self.rev(), inclusive=True)
 
     def _parentfilectx(self, path, fileid, filelog):
         """create parent filectx keeping ancestry info for _adjustlinkrev()"""
@@ -1107,6 +1110,9 @@
         return filectx(self._repo, self._path, fileid=fileid,
                        filelog=self._filelog, changeid=changeid)
 
+    def rawdata(self):
+        return self._filelog.revision(self._filenode, raw=True)
+
     def data(self):
         try:
             return self._filelog.read(self._filenode)
@@ -1150,6 +1156,42 @@
         return [filectx(self._repo, self._path, fileid=x,
                         filelog=self._filelog) for x in c]
 
+def _changesrange(fctx1, fctx2, linerange2, diffopts):
+    """Return `(diffinrange, linerange1)` where `diffinrange` is True
+    if diff from fctx2 to fctx1 has changes in linerange2 and
+    `linerange1` is the new line range for fctx1.
+    """
+    blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
+    filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
+    diffinrange = any(stype == '!' for _, stype in filteredblocks)
+    return diffinrange, linerange1
+
+def blockancestors(fctx, fromline, toline):
+    """Yield ancestors of `fctx` with respect to the block of lines within
+    `fromline`-`toline` range.
+    """
+    diffopts = patch.diffopts(fctx._repo.ui)
+    visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
+    while visit:
+        c, linerange2 = visit.pop(max(visit))
+        pl = c.parents()
+        if not pl:
+            # The block originates from the initial revision.
+            yield c
+            continue
+        inrange = False
+        for p in pl:
+            inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
+            inrange = inrange or inrangep
+            if linerange1[0] == linerange1[1]:
+                # Parent's linerange is empty, meaning that the block got
+                # introduced in this revision; no need to go futher in this
+                # branch.
+                continue
+            visit[p.linkrev(), p.filenode()] = p, linerange1
+        if inrange:
+            yield c
+
 class committablectx(basectx):
     """A committablectx object provides common functionality for a context that
     wants the ability to commit, e.g. workingctx or memctx."""
@@ -1231,23 +1273,13 @@
         """
         parents = self.parents()
 
-        man1 = parents[0].manifest()
-        man = man1.copy()
-        if len(parents) > 1:
-            man2 = self.p2().manifest()
-            def getman(f):
-                if f in man1:
-                    return man1
-                return man2
-        else:
-            getman = lambda f: man1
+        man = parents[0].manifest().copy()
 
-        copied = self._repo.dirstate.copies()
         ff = self._flagfunc
-        for i, l in (("a", self._status.added), ("m", self._status.modified)):
+        for i, l in ((addednodeid, self._status.added),
+                     (modifiednodeid, self._status.modified)):
             for f in l:
-                orig = copied.get(f, f)
-                man[f] = getman(orig).get(orig, nullid) + i
+                man[f] = i
                 try:
                     man.setflag(f, ff(f))
                 except OSError:
@@ -1582,7 +1614,7 @@
         """
         mf = self._repo['.']._manifestmatches(match, s)
         for f in s.modified + s.added:
-            mf[f] = _newnode
+            mf[f] = newnodeid
             mf.setflag(f, self.flags(f))
         for f in s.removed:
             if f in mf:
@@ -1982,3 +2014,101 @@
     def write(self, data, flags):
         """wraps repo.wwrite"""
         self._data = data
+
+class metadataonlyctx(committablectx):
+    """Like memctx but it's reusing the manifest of different commit.
+    Intended to be used by lightweight operations that are creating
+    metadata-only changes.
+
+    Revision information is supplied at initialization time.  'repo' is the
+    current localrepo, 'ctx' is original revision which manifest we're reuisng
+    'parents' is a sequence of two parent revisions identifiers (pass None for
+    every missing parent), 'text' is the commit.
+
+    user receives the committer name and defaults to current repository
+    username, date is the commit date in any format supported by
+    util.parsedate() and defaults to current date, extra is a dictionary of
+    metadata or is left empty.
+    """
+    def __new__(cls, repo, originalctx, *args, **kwargs):
+        return super(metadataonlyctx, cls).__new__(cls, repo)
+
+    def __init__(self, repo, originalctx, parents, text, user=None, date=None,
+                 extra=None, editor=False):
+        super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
+        self._rev = None
+        self._node = None
+        self._originalctx = originalctx
+        self._manifestnode = originalctx.manifestnode()
+        parents = [(p or nullid) for p in parents]
+        p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
+
+        # sanity check to ensure that the reused manifest parents are
+        # manifests of our commit parents
+        mp1, mp2 = self.manifestctx().parents
+        if p1 != nullid and p1.manifestctx().node() != mp1:
+            raise RuntimeError('can\'t reuse the manifest: '
+                               'its p1 doesn\'t match the new ctx p1')
+        if p2 != nullid and p2.manifestctx().node() != mp2:
+            raise RuntimeError('can\'t reuse the manifest: '
+                               'its p2 doesn\'t match the new ctx p2')
+
+        self._files = originalctx.files()
+        self.substate = {}
+
+        if extra:
+            self._extra = extra.copy()
+        else:
+            self._extra = {}
+
+        if self._extra.get('branch', '') == '':
+            self._extra['branch'] = 'default'
+
+        if editor:
+            self._text = editor(self._repo, self, [])
+            self._repo.savecommitmessage(self._text)
+
+    def manifestnode(self):
+        return self._manifestnode
+
+    @propertycache
+    def _manifestctx(self):
+        return self._repo.manifestlog[self._manifestnode]
+
+    def filectx(self, path, filelog=None):
+        return self._originalctx.filectx(path, filelog=filelog)
+
+    def commit(self):
+        """commit context to the repo"""
+        return self._repo.commitctx(self)
+
+    @property
+    def _manifest(self):
+        return self._originalctx.manifest()
+
+    @propertycache
+    def _status(self):
+        """Calculate exact status from ``files`` specified in the ``origctx``
+        and parents manifests.
+        """
+        man1 = self.p1().manifest()
+        p2 = self._parents[1]
+        # "1 < len(self._parents)" can't be used for checking
+        # existence of the 2nd parent, because "metadataonlyctx._parents" is
+        # explicitly initialized by the list, of which length is 2.
+        if p2.node() != nullid:
+            man2 = p2.manifest()
+            managing = lambda f: f in man1 or f in man2
+        else:
+            managing = lambda f: f in man1
+
+        modified, added, removed = [], [], []
+        for f in self._files:
+            if not managing(f):
+                added.append(f)
+            elif self[f]:
+                modified.append(f)
+            else:
+                removed.append(f)
+
+        return scmutil.status(modified, added, removed, [], [], [], [])
--- a/mercurial/copies.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/copies.py	Wed Jan 18 11:43:36 2017 -0500
@@ -278,7 +278,7 @@
         ac = repo.changelog.ancestors(revs, inclusive=True)
         ctx._ancestrycontext = ac
     def makectx(f, n):
-        if len(n) != 20:  # in a working context?
+        if n in node.wdirnodes:  # in a working context?
             if ctx.rev() is None:
                 return ctx.filectx(f)
             return repo[None][f]
@@ -310,8 +310,8 @@
     Find moves and copies between context c1 and c2 that are relevant
     for merging. 'base' will be used as the merge base.
 
-    Returns four dicts: "copy", "movewithdir", "diverge", and
-    "renamedelete".
+    Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
+    "dirmove".
 
     "copy" is a mapping from destination name -> source name,
     where source is in c1 and destination is in c2 or vice-versa.
@@ -326,20 +326,24 @@
 
     "renamedelete" is a mapping of source name -> list of destination
     names for files deleted in c1 that were renamed in c2 or vice-versa.
+
+    "dirmove" is a mapping of detected source dir -> destination dir renames.
+    This is needed for handling changes to new files previously grafted into
+    renamed directories.
     """
     # avoid silly behavior for update from empty dir
     if not c1 or not c2 or c1 == c2:
-        return {}, {}, {}, {}
+        return {}, {}, {}, {}, {}
 
     # avoid silly behavior for parent -> working dir
     if c2.node() is None and c1.node() == repo.dirstate.p1():
-        return repo.dirstate.copies(), {}, {}, {}
+        return repo.dirstate.copies(), {}, {}, {}, {}
 
     # Copy trace disabling is explicitly below the node == p1 logic above
     # because the logic above is required for a simple copy to be kept across a
     # rebase.
     if repo.ui.configbool('experimental', 'disablecopytrace'):
-        return {}, {}, {}, {}
+        return {}, {}, {}, {}, {}
 
     # In certain scenarios (e.g. graft, update or rebase), base can be
     # overridden We still need to know a real common ancestor in this case We
@@ -365,7 +369,7 @@
     limit = _findlimit(repo, c1.rev(), c2.rev())
     if limit is None:
         # no common ancestor, no copies
-        return {}, {}, {}, {}
+        return {}, {}, {}, {}, {}
     repo.ui.debug("  searching for copies back to rev %d\n" % limit)
 
     m1 = c1.manifest()
@@ -503,7 +507,7 @@
     del divergeset
 
     if not fullcopy:
-        return copy, {}, diverge, renamedelete
+        return copy, {}, diverge, renamedelete, {}
 
     repo.ui.debug("  checking for directory renames\n")
 
@@ -541,7 +545,7 @@
     del d1, d2, invalid
 
     if not dirmove:
-        return copy, {}, diverge, renamedelete
+        return copy, {}, diverge, renamedelete, {}
 
     for d in dirmove:
         repo.ui.debug("   discovered dir src: '%s' -> dst: '%s'\n" %
@@ -561,7 +565,7 @@
                                        "dst: '%s'\n") % (f, df))
                     break
 
-    return copy, movewithdir, diverge, renamedelete
+    return copy, movewithdir, diverge, renamedelete, dirmove
 
 def _related(f1, f2, limit):
     """return True if f1 and f2 filectx have a common ancestor
--- a/mercurial/crecord.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/crecord.py	Wed Jan 18 11:43:36 2017 -0500
@@ -14,14 +14,13 @@
 import os
 import re
 import signal
-import struct
-import sys
 
 from .i18n import _
 from . import (
     encoding,
     error,
     patch as patchmod,
+    scmutil,
     util,
 )
 stringio = util.stringio
@@ -52,11 +51,7 @@
 
 try:
     import curses
-    import fcntl
-    import termios
     curses.error
-    fcntl.ioctl
-    termios.TIOCGWINSZ
 except ImportError:
     # I have no idea if wcurses works with crecord...
     try:
@@ -75,8 +70,6 @@
     """
     return curses and ui.interface("chunkselector") == "curses"
 
-_origstdout = sys.__stdout__ # used by gethw()
-
 class patchnode(object):
     """abstract class for patch graph nodes
     (i.e. patchroot, header, hunk, hunkline)
@@ -440,7 +433,7 @@
     def __repr__(self):
         return '<hunk %r@%d>' % (self.filename(), self.fromline)
 
-def filterpatch(ui, chunks, chunkselector):
+def filterpatch(ui, chunks, chunkselector, operation=None):
     """interactively filter patch chunks into applied-only chunks"""
     chunks = list(chunks)
     # convert chunks list into structure suitable for displaying/modifying
@@ -453,7 +446,7 @@
     uiheaders = [uiheader(h) for h in headers]
     # let user choose headers/hunks/lines, and mark their applied flags
     # accordingly
-    ret = chunkselector(ui, uiheaders)
+    ret = chunkselector(ui, uiheaders, operation=operation)
     appliedhunklist = []
     for hdr in uiheaders:
         if (hdr.applied and
@@ -473,25 +466,13 @@
 
     return (appliedhunklist, ret)
 
-def gethw():
-    """
-    magically get the current height and width of the window (without initscr)
-
-    this is a rip-off of a rip-off - taken from the bpython code.  it is
-    useful / necessary because otherwise curses.initscr() must be called,
-    which can leave the terminal in a nasty state after exiting.
-    """
-    h, w = struct.unpack(
-        "hhhh", fcntl.ioctl(_origstdout, termios.TIOCGWINSZ, "\000"*8))[0:2]
-    return h, w
-
-def chunkselector(ui, headerlist):
+def chunkselector(ui, headerlist, operation=None):
     """
     curses interface to get selection of chunks, and mark the applied flags
     of the chosen chunks.
     """
     ui.write(_('starting interactive selection\n'))
-    chunkselector = curseschunkselector(headerlist, ui)
+    chunkselector = curseschunkselector(headerlist, ui, operation)
     f = signal.getsignal(signal.SIGTSTP)
     curses.wrapper(chunkselector.main)
     if chunkselector.initerr is not None:
@@ -505,12 +486,12 @@
         return f(testfn, *args, **kwargs)
     return u
 
-def testchunkselector(testfn, ui, headerlist):
+def testchunkselector(testfn, ui, headerlist, operation=None):
     """
     test interface to get selection of chunks, and mark the applied flags
     of the chosen chunks.
     """
-    chunkselector = curseschunkselector(headerlist, ui)
+    chunkselector = curseschunkselector(headerlist, ui, operation)
     if testfn and os.path.exists(testfn):
         testf = open(testfn)
         testcommands = map(lambda x: x.rstrip('\n'), testf.readlines())
@@ -520,8 +501,14 @@
                 break
     return chunkselector.opts
 
+_headermessages = { # {operation: text}
+    'revert': _('Select hunks to revert'),
+    'discard': _('Select hunks to discard'),
+    None: _('Select hunks to record'),
+}
+
 class curseschunkselector(object):
-    def __init__(self, headerlist, ui):
+    def __init__(self, headerlist, ui, operation=None):
         # put the headers into a patch object
         self.headerlist = patch(headerlist)
 
@@ -560,7 +547,7 @@
         # keeps track of the number of lines in the pad
         self.numpadlines = None
 
-        self.numstatuslines = 2
+        self.numstatuslines = 1
 
         # keep a running count of the number of lines printed to the pad
         # (used for determining when the selected item begins/ends)
@@ -575,6 +562,11 @@
         # if the last 'toggle all' command caused all changes to be applied
         self.waslasttoggleallapplied = True
 
+        # affects some ui text
+        if operation not in _headermessages:
+            raise RuntimeError('unexpected operation: %s' % operation)
+        self.operation = operation
+
     def uparrowevent(self):
         """
         try to select the previous item to the current item that has the
@@ -962,6 +954,45 @@
         self.linesprintedtopadsofar += linesprinted
         return t
 
+    def _getstatuslinesegments(self):
+        """-> [str]. return segments"""
+        selected = self.currentselecteditem.applied
+        segments = [
+            _headermessages[self.operation],
+            '-',
+            _('[x]=selected **=collapsed'),
+            _('c: confirm'),
+            _('q: abort'),
+            _('arrow keys: move/expand/collapse'),
+            _('space: deselect') if selected else _('space: select'),
+            _('?: help'),
+        ]
+        return segments
+
+    def _getstatuslines(self):
+        """() -> [str]. return short help used in the top status window"""
+        if self.errorstr is not None:
+            lines = [self.errorstr, _('Press any key to continue')]
+        else:
+            # wrap segments to lines
+            segments = self._getstatuslinesegments()
+            width = self.xscreensize
+            lines = []
+            lastwidth = width
+            for s in segments:
+                w = encoding.colwidth(s)
+                sep = ' ' * (1 + (s and s[0] not in '-['))
+                if lastwidth + w + len(sep) >= width:
+                    lines.append(s)
+                    lastwidth = w
+                else:
+                    lines[-1] += sep + s
+                    lastwidth += w + len(sep)
+        if len(lines) != self.numstatuslines:
+            self.numstatuslines = len(lines)
+            self.statuswin.resize(self.numstatuslines, self.xscreensize)
+        return [util.ellipsis(l, self.xscreensize - 1) for l in lines]
+
     def updatescreen(self):
         self.statuswin.erase()
         self.chunkpad.erase()
@@ -970,25 +1001,13 @@
 
         # print out the status lines at the top
         try:
-            if self.errorstr is not None:
-                printstring(self.statuswin, self.errorstr, pairname='legend')
-                printstring(self.statuswin, 'Press any key to continue',
-                            pairname='legend')
-                self.statuswin.refresh()
-                return
-            line1 = ("SELECT CHUNKS: (j/k/up/dn/pgup/pgdn) move cursor; "
-                   "(space/A) toggle hunk/all; (e)dit hunk;")
-            line2 = (" (f)old/unfold; (c)onfirm applied; (q)uit; (?) help "
-                   "| [X]=hunk applied **=folded, toggle [a]mend mode")
-
-            printstring(self.statuswin,
-                        util.ellipsis(line1, self.xscreensize - 1),
-                        pairname="legend")
-            printstring(self.statuswin,
-                        util.ellipsis(line2, self.xscreensize - 1),
-                        pairname="legend")
+            for line in self._getstatuslines():
+                printstring(self.statuswin, line, pairname="legend")
+            self.statuswin.refresh()
         except curses.error:
             pass
+        if self.errorstr is not None:
+            return
 
         # print out the patch in the remaining part of the window
         try:
@@ -996,14 +1015,11 @@
             self.updatescroll()
             self.chunkpad.refresh(self.firstlineofpadtoprint, 0,
                                   self.numstatuslines, 0,
-                                  self.yscreensize + 1 - self.numstatuslines,
+                                  self.yscreensize - self.numstatuslines,
                                   self.xscreensize)
         except curses.error:
             pass
 
-        # refresh([pminrow, pmincol, sminrow, smincol, smaxrow, smaxcol])
-        self.statuswin.refresh()
-
     def getstatusprefixstring(self, item):
         """
         create a string to prefix a line with which indicates whether 'item'
@@ -1259,7 +1275,7 @@
         "handle window resizing"
         try:
             curses.endwin()
-            self.yscreensize, self.xscreensize = gethw()
+            self.xscreensize, self.yscreensize = scmutil.termsize(self.ui)
             self.statuswin.resize(self.numstatuslines, self.xscreensize)
             self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
             self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
@@ -1321,7 +1337,8 @@
 
     def helpwindow(self):
         "print a help window to the screen.  exit after any keypress."
-        helptext = """            [press any key to return to the patch-display]
+        helptext = _(
+            """            [press any key to return to the patch-display]
 
 crecord allows you to interactively choose among the changes you have made,
 and confirm only those changes you select for further processing by the command
@@ -1345,7 +1362,7 @@
                       c : confirm selected changes
                       r : review/edit and confirm selected changes
                       q : quit without confirming (no changes will be made)
-                      ? : help (what you're currently reading)"""
+                      ? : help (what you're currently reading)""")
 
         helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
         helplines = helptext.split("\n")
@@ -1384,7 +1401,7 @@
     def reviewcommit(self):
         """ask for 'y' to be pressed to confirm selected. return True if
         confirmed."""
-        confirmtext = (
+        confirmtext = _(
 """if you answer yes to the following, the your currently chosen patch chunks
 will be loaded into an editor.  you may modify the patch from the editor, and
 save the changes if you wish to change the patch.  otherwise, you can just
@@ -1416,19 +1433,19 @@
         except ValueError:
             ver = 1
         if ver < 2.19:
-            msg = ("The amend option is unavailable with hg versions < 2.2\n\n"
-                   "Press any key to continue.")
+            msg = _("The amend option is unavailable with hg versions < 2.2\n\n"
+                    "Press any key to continue.")
         elif opts.get('amend') is None:
             opts['amend'] = True
-            msg = ("Amend option is turned on -- commiting the currently "
-                   "selected changes will not create a new changeset, but "
-                   "instead update the most recently committed changeset.\n\n"
-                   "Press any key to continue.")
+            msg = _("Amend option is turned on -- committing the currently "
+                    "selected changes will not create a new changeset, but "
+                    "instead update the most recently committed changeset.\n\n"
+                    "Press any key to continue.")
         elif opts.get('amend') is True:
             opts['amend'] = None
-            msg = ("Amend option is turned off -- commiting the currently "
-                   "selected changes will create a new changeset.\n\n"
-                   "Press any key to continue.")
+            msg = _("Amend option is turned off -- committing the currently "
+                    "selected changes will create a new changeset.\n\n"
+                    "Press any key to continue.")
         if not test:
             self.confirmationwindow(msg)
 
@@ -1571,6 +1588,8 @@
             return True
         elif keypressed in [' '] or (test and keypressed in ["TOGGLE"]):
             self.toggleapply()
+            if self.ui.configbool('experimental', 'spacemovesdown'):
+                self.downarrowevent()
         elif keypressed in ['A']:
             self.toggleall()
         elif keypressed in ['e']:
@@ -1629,7 +1648,7 @@
         except curses.error:
             self.initerr = _('this diff is too large to be displayed')
             return
-        # initialize selecteitemendline (initial start-line is 0)
+        # initialize selecteditemendline (initial start-line is 0)
         self.selecteditemendline = self.getnumlinesdisplayed(
             self.currentselecteditem, recursechildren=False)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/debugcommands.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,877 @@
+# debugcommands.py - command processing for debug* commands
+#
+# Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import operator
+import os
+import random
+
+from .i18n import _
+from .node import (
+    bin,
+    hex,
+    nullid,
+    short,
+)
+from . import (
+    bundle2,
+    changegroup,
+    cmdutil,
+    commands,
+    context,
+    dagparser,
+    dagutil,
+    error,
+    exchange,
+    extensions,
+    fileset,
+    hg,
+    localrepo,
+    lock as lockmod,
+    pycompat,
+    repair,
+    revlog,
+    scmutil,
+    setdiscovery,
+    simplemerge,
+    streamclone,
+    treediscovery,
+    util,
+)
+
+release = lockmod.release
+
+# We reuse the command table from commands because it is easier than
+# teaching dispatch about multiple tables.
+command = cmdutil.command(commands.table)
+
+@command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
+def debugancestor(ui, repo, *args):
+    """find the ancestor revision of two revisions in a given index"""
+    if len(args) == 3:
+        index, rev1, rev2 = args
+        r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False), index)
+        lookup = r.lookup
+    elif len(args) == 2:
+        if not repo:
+            raise error.Abort(_('there is no Mercurial repository here '
+                                '(.hg not found)'))
+        rev1, rev2 = args
+        r = repo.changelog
+        lookup = repo.lookup
+    else:
+        raise error.Abort(_('either two or three arguments required'))
+    a = r.ancestor(lookup(rev1), lookup(rev2))
+    ui.write('%d:%s\n' % (r.rev(a), hex(a)))
+
+@command('debugapplystreamclonebundle', [], 'FILE')
+def debugapplystreamclonebundle(ui, repo, fname):
+    """apply a stream clone bundle file"""
+    f = hg.openpath(ui, fname)
+    gen = exchange.readbundle(ui, f, fname)
+    gen.apply(repo)
+
+@command('debugbuilddag',
+    [('m', 'mergeable-file', None, _('add single file mergeable changes')),
+    ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
+    ('n', 'new-file', None, _('add new file at each rev'))],
+    _('[OPTION]... [TEXT]'))
+def debugbuilddag(ui, repo, text=None,
+                  mergeable_file=False,
+                  overwritten_file=False,
+                  new_file=False):
+    """builds a repo with a given DAG from scratch in the current empty repo
+
+    The description of the DAG is read from stdin if not given on the
+    command line.
+
+    Elements:
+
+     - "+n" is a linear run of n nodes based on the current default parent
+     - "." is a single node based on the current default parent
+     - "$" resets the default parent to null (implied at the start);
+           otherwise the default parent is always the last node created
+     - "<p" sets the default parent to the backref p
+     - "*p" is a fork at parent p, which is a backref
+     - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
+     - "/p2" is a merge of the preceding node and p2
+     - ":tag" defines a local tag for the preceding node
+     - "@branch" sets the named branch for subsequent nodes
+     - "#...\\n" is a comment up to the end of the line
+
+    Whitespace between the above elements is ignored.
+
+    A backref is either
+
+     - a number n, which references the node curr-n, where curr is the current
+       node, or
+     - the name of a local tag you placed earlier using ":tag", or
+     - empty to denote the default parent.
+
+    All string valued-elements are either strictly alphanumeric, or must
+    be enclosed in double quotes ("..."), with "\\" as escape character.
+    """
+
+    if text is None:
+        ui.status(_("reading DAG from stdin\n"))
+        text = ui.fin.read()
+
+    cl = repo.changelog
+    if len(cl) > 0:
+        raise error.Abort(_('repository is not empty'))
+
+    # determine number of revs in DAG
+    total = 0
+    for type, data in dagparser.parsedag(text):
+        if type == 'n':
+            total += 1
+
+    if mergeable_file:
+        linesperrev = 2
+        # make a file with k lines per rev
+        initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
+        initialmergedlines.append("")
+
+    tags = []
+
+    wlock = lock = tr = None
+    try:
+        wlock = repo.wlock()
+        lock = repo.lock()
+        tr = repo.transaction("builddag")
+
+        at = -1
+        atbranch = 'default'
+        nodeids = []
+        id = 0
+        ui.progress(_('building'), id, unit=_('revisions'), total=total)
+        for type, data in dagparser.parsedag(text):
+            if type == 'n':
+                ui.note(('node %s\n' % str(data)))
+                id, ps = data
+
+                files = []
+                fctxs = {}
+
+                p2 = None
+                if mergeable_file:
+                    fn = "mf"
+                    p1 = repo[ps[0]]
+                    if len(ps) > 1:
+                        p2 = repo[ps[1]]
+                        pa = p1.ancestor(p2)
+                        base, local, other = [x[fn].data() for x in (pa, p1,
+                                                                     p2)]
+                        m3 = simplemerge.Merge3Text(base, local, other)
+                        ml = [l.strip() for l in m3.merge_lines()]
+                        ml.append("")
+                    elif at > 0:
+                        ml = p1[fn].data().split("\n")
+                    else:
+                        ml = initialmergedlines
+                    ml[id * linesperrev] += " r%i" % id
+                    mergedtext = "\n".join(ml)
+                    files.append(fn)
+                    fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
+
+                if overwritten_file:
+                    fn = "of"
+                    files.append(fn)
+                    fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
+
+                if new_file:
+                    fn = "nf%i" % id
+                    files.append(fn)
+                    fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
+                    if len(ps) > 1:
+                        if not p2:
+                            p2 = repo[ps[1]]
+                        for fn in p2:
+                            if fn.startswith("nf"):
+                                files.append(fn)
+                                fctxs[fn] = p2[fn]
+
+                def fctxfn(repo, cx, path):
+                    return fctxs.get(path)
+
+                if len(ps) == 0 or ps[0] < 0:
+                    pars = [None, None]
+                elif len(ps) == 1:
+                    pars = [nodeids[ps[0]], None]
+                else:
+                    pars = [nodeids[p] for p in ps]
+                cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
+                                    date=(id, 0),
+                                    user="debugbuilddag",
+                                    extra={'branch': atbranch})
+                nodeid = repo.commitctx(cx)
+                nodeids.append(nodeid)
+                at = id
+            elif type == 'l':
+                id, name = data
+                ui.note(('tag %s\n' % name))
+                tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
+            elif type == 'a':
+                ui.note(('branch %s\n' % data))
+                atbranch = data
+            ui.progress(_('building'), id, unit=_('revisions'), total=total)
+        tr.close()
+
+        if tags:
+            repo.vfs.write("localtags", "".join(tags))
+    finally:
+        ui.progress(_('building'), None)
+        release(tr, lock, wlock)
+
+def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
+    indent_string = ' ' * indent
+    if all:
+        ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
+                 % indent_string)
+
+        def showchunks(named):
+            ui.write("\n%s%s\n" % (indent_string, named))
+            chain = None
+            for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
+                node = chunkdata['node']
+                p1 = chunkdata['p1']
+                p2 = chunkdata['p2']
+                cs = chunkdata['cs']
+                deltabase = chunkdata['deltabase']
+                delta = chunkdata['delta']
+                ui.write("%s%s %s %s %s %s %s\n" %
+                         (indent_string, hex(node), hex(p1), hex(p2),
+                          hex(cs), hex(deltabase), len(delta)))
+                chain = node
+
+        chunkdata = gen.changelogheader()
+        showchunks("changelog")
+        chunkdata = gen.manifestheader()
+        showchunks("manifest")
+        for chunkdata in iter(gen.filelogheader, {}):
+            fname = chunkdata['filename']
+            showchunks(fname)
+    else:
+        if isinstance(gen, bundle2.unbundle20):
+            raise error.Abort(_('use debugbundle2 for this file'))
+        chunkdata = gen.changelogheader()
+        chain = None
+        for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
+            node = chunkdata['node']
+            ui.write("%s%s\n" % (indent_string, hex(node)))
+            chain = node
+
+def _debugbundle2(ui, gen, all=None, **opts):
+    """lists the contents of a bundle2"""
+    if not isinstance(gen, bundle2.unbundle20):
+        raise error.Abort(_('not a bundle2 file'))
+    ui.write(('Stream params: %s\n' % repr(gen.params)))
+    for part in gen.iterparts():
+        ui.write('%s -- %r\n' % (part.type, repr(part.params)))
+        if part.type == 'changegroup':
+            version = part.params.get('version', '01')
+            cg = changegroup.getunbundler(version, part, 'UN')
+            _debugchangegroup(ui, cg, all=all, indent=4, **opts)
+
+@command('debugbundle',
+        [('a', 'all', None, _('show all details')),
+         ('', 'spec', None, _('print the bundlespec of the bundle'))],
+        _('FILE'),
+        norepo=True)
+def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
+    """lists the contents of a bundle"""
+    with hg.openpath(ui, bundlepath) as f:
+        if spec:
+            spec = exchange.getbundlespec(ui, f)
+            ui.write('%s\n' % spec)
+            return
+
+        gen = exchange.readbundle(ui, f, bundlepath)
+        if isinstance(gen, bundle2.unbundle20):
+            return _debugbundle2(ui, gen, all=all, **opts)
+        _debugchangegroup(ui, gen, all=all, **opts)
+
+@command('debugcheckstate', [], '')
+def debugcheckstate(ui, repo):
+    """validate the correctness of the current dirstate"""
+    parent1, parent2 = repo.dirstate.parents()
+    m1 = repo[parent1].manifest()
+    m2 = repo[parent2].manifest()
+    errors = 0
+    for f in repo.dirstate:
+        state = repo.dirstate[f]
+        if state in "nr" and f not in m1:
+            ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
+            errors += 1
+        if state in "a" and f in m1:
+            ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
+            errors += 1
+        if state in "m" and f not in m1 and f not in m2:
+            ui.warn(_("%s in state %s, but not in either manifest\n") %
+                    (f, state))
+            errors += 1
+    for f in m1:
+        state = repo.dirstate[f]
+        if state not in "nrm":
+            ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
+            errors += 1
+    if errors:
+        error = _(".hg/dirstate inconsistent with current parent's manifest")
+        raise error.Abort(error)
+
+@command('debugcommands', [], _('[COMMAND]'), norepo=True)
+def debugcommands(ui, cmd='', *args):
+    """list all available commands and options"""
+    for cmd, vals in sorted(commands.table.iteritems()):
+        cmd = cmd.split('|')[0].strip('^')
+        opts = ', '.join([i[1] for i in vals[1]])
+        ui.write('%s: %s\n' % (cmd, opts))
+
+@command('debugcomplete',
+    [('o', 'options', None, _('show the command options'))],
+    _('[-o] CMD'),
+    norepo=True)
+def debugcomplete(ui, cmd='', **opts):
+    """returns the completion list associated with the given command"""
+
+    if opts.get('options'):
+        options = []
+        otables = [commands.globalopts]
+        if cmd:
+            aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
+            otables.append(entry[1])
+        for t in otables:
+            for o in t:
+                if "(DEPRECATED)" in o[3]:
+                    continue
+                if o[0]:
+                    options.append('-%s' % o[0])
+                options.append('--%s' % o[1])
+        ui.write("%s\n" % "\n".join(options))
+        return
+
+    cmdlist, unused_allcmds = cmdutil.findpossible(cmd, commands.table)
+    if ui.verbose:
+        cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
+    ui.write("%s\n" % "\n".join(sorted(cmdlist)))
+
+@command('debugcreatestreamclonebundle', [], 'FILE')
+def debugcreatestreamclonebundle(ui, repo, fname):
+    """create a stream clone bundle file
+
+    Stream bundles are special bundles that are essentially archives of
+    revlog files. They are commonly used for cloning very quickly.
+    """
+    requirements, gen = streamclone.generatebundlev1(repo)
+    changegroup.writechunks(ui, gen, fname)
+
+    ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
+
+@command('debugdag',
+    [('t', 'tags', None, _('use tags as labels')),
+    ('b', 'branches', None, _('annotate with branch names')),
+    ('', 'dots', None, _('use dots for runs')),
+    ('s', 'spaces', None, _('separate elements by spaces'))],
+    _('[OPTION]... [FILE [REV]...]'),
+    optionalrepo=True)
+def debugdag(ui, repo, file_=None, *revs, **opts):
+    """format the changelog or an index DAG as a concise textual description
+
+    If you pass a revlog index, the revlog's DAG is emitted. If you list
+    revision numbers, they get labeled in the output as rN.
+
+    Otherwise, the changelog DAG of the current repo is emitted.
+    """
+    spaces = opts.get('spaces')
+    dots = opts.get('dots')
+    if file_:
+        rlog = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False),
+                             file_)
+        revs = set((int(r) for r in revs))
+        def events():
+            for r in rlog:
+                yield 'n', (r, list(p for p in rlog.parentrevs(r)
+                                        if p != -1))
+                if r in revs:
+                    yield 'l', (r, "r%i" % r)
+    elif repo:
+        cl = repo.changelog
+        tags = opts.get('tags')
+        branches = opts.get('branches')
+        if tags:
+            labels = {}
+            for l, n in repo.tags().items():
+                labels.setdefault(cl.rev(n), []).append(l)
+        def events():
+            b = "default"
+            for r in cl:
+                if branches:
+                    newb = cl.read(cl.node(r))[5]['branch']
+                    if newb != b:
+                        yield 'a', newb
+                        b = newb
+                yield 'n', (r, list(p for p in cl.parentrevs(r)
+                                        if p != -1))
+                if tags:
+                    ls = labels.get(r)
+                    if ls:
+                        for l in ls:
+                            yield 'l', (r, l)
+    else:
+        raise error.Abort(_('need repo for changelog dag'))
+
+    for line in dagparser.dagtextlines(events(),
+                                       addspaces=spaces,
+                                       wraplabels=True,
+                                       wrapannotations=True,
+                                       wrapnonlinear=dots,
+                                       usedots=dots,
+                                       maxlinewidth=70):
+        ui.write(line)
+        ui.write("\n")
+
+@command('debugdata', commands.debugrevlogopts, _('-c|-m|FILE REV'))
+def debugdata(ui, repo, file_, rev=None, **opts):
+    """dump the contents of a data file revision"""
+    if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
+        if rev is not None:
+            raise error.CommandError('debugdata', _('invalid arguments'))
+        file_, rev = None, file_
+    elif rev is None:
+        raise error.CommandError('debugdata', _('invalid arguments'))
+    r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
+    try:
+        ui.write(r.revision(r.lookup(rev), raw=True))
+    except KeyError:
+        raise error.Abort(_('invalid revision identifier %s') % rev)
+
+@command('debugdate',
+    [('e', 'extended', None, _('try extended date formats'))],
+    _('[-e] DATE [RANGE]'),
+    norepo=True, optionalrepo=True)
+def debugdate(ui, date, range=None, **opts):
+    """parse and display a date"""
+    if opts["extended"]:
+        d = util.parsedate(date, util.extendeddateformats)
+    else:
+        d = util.parsedate(date)
+    ui.write(("internal: %s %s\n") % d)
+    ui.write(("standard: %s\n") % util.datestr(d))
+    if range:
+        m = util.matchdate(range)
+        ui.write(("match: %s\n") % m(d[0]))
+
+@command('debugdeltachain',
+    commands.debugrevlogopts + commands.formatteropts,
+    _('-c|-m|FILE'),
+    optionalrepo=True)
+def debugdeltachain(ui, repo, file_=None, **opts):
+    """dump information about delta chains in a revlog
+
+    Output can be templatized. Available template keywords are:
+
+    :``rev``:       revision number
+    :``chainid``:   delta chain identifier (numbered by unique base)
+    :``chainlen``:  delta chain length to this revision
+    :``prevrev``:   previous revision in delta chain
+    :``deltatype``: role of delta / how it was computed
+    :``compsize``:  compressed size of revision
+    :``uncompsize``: uncompressed size of revision
+    :``chainsize``: total size of compressed revisions in chain
+    :``chainratio``: total chain size divided by uncompressed revision size
+                    (new delta chains typically start at ratio 2.00)
+    :``lindist``:   linear distance from base revision in delta chain to end
+                    of this revision
+    :``extradist``: total size of revisions not part of this delta chain from
+                    base of delta chain to end of this revision; a measurement
+                    of how much extra data we need to read/seek across to read
+                    the delta chain for this revision
+    :``extraratio``: extradist divided by chainsize; another representation of
+                    how much unrelated data is needed to load this delta chain
+    """
+    r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
+    index = r.index
+    generaldelta = r.version & revlog.REVLOGGENERALDELTA
+
+    def revinfo(rev):
+        e = index[rev]
+        compsize = e[1]
+        uncompsize = e[2]
+        chainsize = 0
+
+        if generaldelta:
+            if e[3] == e[5]:
+                deltatype = 'p1'
+            elif e[3] == e[6]:
+                deltatype = 'p2'
+            elif e[3] == rev - 1:
+                deltatype = 'prev'
+            elif e[3] == rev:
+                deltatype = 'base'
+            else:
+                deltatype = 'other'
+        else:
+            if e[3] == rev:
+                deltatype = 'base'
+            else:
+                deltatype = 'prev'
+
+        chain = r._deltachain(rev)[0]
+        for iterrev in chain:
+            e = index[iterrev]
+            chainsize += e[1]
+
+        return compsize, uncompsize, deltatype, chain, chainsize
+
+    fm = ui.formatter('debugdeltachain', opts)
+
+    fm.plain('    rev  chain# chainlen     prev   delta       '
+             'size    rawsize  chainsize     ratio   lindist extradist '
+             'extraratio\n')
+
+    chainbases = {}
+    for rev in r:
+        comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
+        chainbase = chain[0]
+        chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
+        basestart = r.start(chainbase)
+        revstart = r.start(rev)
+        lineardist = revstart + comp - basestart
+        extradist = lineardist - chainsize
+        try:
+            prevrev = chain[-2]
+        except IndexError:
+            prevrev = -1
+
+        chainratio = float(chainsize) / float(uncomp)
+        extraratio = float(extradist) / float(chainsize)
+
+        fm.startitem()
+        fm.write('rev chainid chainlen prevrev deltatype compsize '
+                 'uncompsize chainsize chainratio lindist extradist '
+                 'extraratio',
+                 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
+                 rev, chainid, len(chain), prevrev, deltatype, comp,
+                 uncomp, chainsize, chainratio, lineardist, extradist,
+                 extraratio,
+                 rev=rev, chainid=chainid, chainlen=len(chain),
+                 prevrev=prevrev, deltatype=deltatype, compsize=comp,
+                 uncompsize=uncomp, chainsize=chainsize,
+                 chainratio=chainratio, lindist=lineardist,
+                 extradist=extradist, extraratio=extraratio)
+
+    fm.end()
+
+@command('debugdiscovery',
+    [('', 'old', None, _('use old-style discovery')),
+    ('', 'nonheads', None,
+     _('use old-style discovery with non-heads included')),
+    ] + commands.remoteopts,
+    _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
+def debugdiscovery(ui, repo, remoteurl="default", **opts):
+    """runs the changeset discovery protocol in isolation"""
+    remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
+                                      opts.get('branch'))
+    remote = hg.peer(repo, opts, remoteurl)
+    ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
+
+    # make sure tests are repeatable
+    random.seed(12323)
+
+    def doit(localheads, remoteheads, remote=remote):
+        if opts.get('old'):
+            if localheads:
+                raise error.Abort('cannot use localheads with old style '
+                                 'discovery')
+            if not util.safehasattr(remote, 'branches'):
+                # enable in-client legacy support
+                remote = localrepo.locallegacypeer(remote.local())
+            common, _in, hds = treediscovery.findcommonincoming(repo, remote,
+                                                                force=True)
+            common = set(common)
+            if not opts.get('nonheads'):
+                ui.write(("unpruned common: %s\n") %
+                         " ".join(sorted(short(n) for n in common)))
+                dag = dagutil.revlogdag(repo.changelog)
+                all = dag.ancestorset(dag.internalizeall(common))
+                common = dag.externalizeall(dag.headsetofconnecteds(all))
+        else:
+            common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
+        common = set(common)
+        rheads = set(hds)
+        lheads = set(repo.heads())
+        ui.write(("common heads: %s\n") %
+                 " ".join(sorted(short(n) for n in common)))
+        if lheads <= common:
+            ui.write(("local is subset\n"))
+        elif rheads <= common:
+            ui.write(("remote is subset\n"))
+
+    serverlogs = opts.get('serverlog')
+    if serverlogs:
+        for filename in serverlogs:
+            with open(filename, 'r') as logfile:
+                line = logfile.readline()
+                while line:
+                    parts = line.strip().split(';')
+                    op = parts[1]
+                    if op == 'cg':
+                        pass
+                    elif op == 'cgss':
+                        doit(parts[2].split(' '), parts[3].split(' '))
+                    elif op == 'unb':
+                        doit(parts[3].split(' '), parts[2].split(' '))
+                    line = logfile.readline()
+    else:
+        remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
+                                                 opts.get('remote_head'))
+        localrevs = opts.get('local_head')
+        doit(localrevs, remoterevs)
+
+@command('debugextensions', commands.formatteropts, [], norepo=True)
+def debugextensions(ui, **opts):
+    '''show information about active extensions'''
+    exts = extensions.extensions(ui)
+    hgver = util.version()
+    fm = ui.formatter('debugextensions', opts)
+    for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
+        isinternal = extensions.ismoduleinternal(extmod)
+        extsource = extmod.__file__
+        if isinternal:
+            exttestedwith = []  # never expose magic string to users
+        else:
+            exttestedwith = getattr(extmod, 'testedwith', '').split()
+        extbuglink = getattr(extmod, 'buglink', None)
+
+        fm.startitem()
+
+        if ui.quiet or ui.verbose:
+            fm.write('name', '%s\n', extname)
+        else:
+            fm.write('name', '%s', extname)
+            if isinternal or hgver in exttestedwith:
+                fm.plain('\n')
+            elif not exttestedwith:
+                fm.plain(_(' (untested!)\n'))
+            else:
+                lasttestedversion = exttestedwith[-1]
+                fm.plain(' (%s!)\n' % lasttestedversion)
+
+        fm.condwrite(ui.verbose and extsource, 'source',
+                 _('  location: %s\n'), extsource or "")
+
+        if ui.verbose:
+            fm.plain(_('  bundled: %s\n') % ['no', 'yes'][isinternal])
+        fm.data(bundled=isinternal)
+
+        fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
+                     _('  tested with: %s\n'),
+                     fm.formatlist(exttestedwith, name='ver'))
+
+        fm.condwrite(ui.verbose and extbuglink, 'buglink',
+                 _('  bug reporting: %s\n'), extbuglink or "")
+
+    fm.end()
+
+@command('debugfileset',
+    [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
+    _('[-r REV] FILESPEC'))
+def debugfileset(ui, repo, expr, **opts):
+    '''parse and apply a fileset specification'''
+    ctx = scmutil.revsingle(repo, opts.get('rev'), None)
+    if ui.verbose:
+        tree = fileset.parse(expr)
+        ui.note(fileset.prettyformat(tree), "\n")
+
+    for f in ctx.getfileset(expr):
+        ui.write("%s\n" % f)
+
+@command('debugfsinfo', [], _('[PATH]'), norepo=True)
+def debugfsinfo(ui, path="."):
+    """show information detected about current filesystem"""
+    util.writefile('.debugfsinfo', '')
+    ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
+    ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
+    ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
+    ui.write(('case-sensitive: %s\n') % (util.fscasesensitive('.debugfsinfo')
+                                and 'yes' or 'no'))
+    os.unlink('.debugfsinfo')
+
+@command('debuggetbundle',
+    [('H', 'head', [], _('id of head node'), _('ID')),
+    ('C', 'common', [], _('id of common node'), _('ID')),
+    ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
+    _('REPO FILE [-H|-C ID]...'),
+    norepo=True)
+def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
+    """retrieves a bundle from a repo
+
+    Every ID must be a full-length hex node id string. Saves the bundle to the
+    given file.
+    """
+    repo = hg.peer(ui, opts, repopath)
+    if not repo.capable('getbundle'):
+        raise error.Abort("getbundle() not supported by target repository")
+    args = {}
+    if common:
+        args['common'] = [bin(s) for s in common]
+    if head:
+        args['heads'] = [bin(s) for s in head]
+    # TODO: get desired bundlecaps from command line.
+    args['bundlecaps'] = None
+    bundle = repo.getbundle('debug', **args)
+
+    bundletype = opts.get('type', 'bzip2').lower()
+    btypes = {'none': 'HG10UN',
+              'bzip2': 'HG10BZ',
+              'gzip': 'HG10GZ',
+              'bundle2': 'HG20'}
+    bundletype = btypes.get(bundletype)
+    if bundletype not in bundle2.bundletypes:
+        raise error.Abort(_('unknown bundle type specified with --type'))
+    bundle2.writebundle(ui, bundle, bundlepath, bundletype)
+
+@command('debugignore', [], '[FILE]')
+def debugignore(ui, repo, *files, **opts):
+    """display the combined ignore pattern and information about ignored files
+
+    With no argument display the combined ignore pattern.
+
+    Given space separated file names, shows if the given file is ignored and
+    if so, show the ignore rule (file and line number) that matched it.
+    """
+    ignore = repo.dirstate._ignore
+    if not files:
+        # Show all the patterns
+        includepat = getattr(ignore, 'includepat', None)
+        if includepat is not None:
+            ui.write("%s\n" % includepat)
+        else:
+            raise error.Abort(_("no ignore patterns found"))
+    else:
+        for f in files:
+            nf = util.normpath(f)
+            ignored = None
+            ignoredata = None
+            if nf != '.':
+                if ignore(nf):
+                    ignored = nf
+                    ignoredata = repo.dirstate._ignorefileandline(nf)
+                else:
+                    for p in util.finddirs(nf):
+                        if ignore(p):
+                            ignored = p
+                            ignoredata = repo.dirstate._ignorefileandline(p)
+                            break
+            if ignored:
+                if ignored == nf:
+                    ui.write(_("%s is ignored\n") % f)
+                else:
+                    ui.write(_("%s is ignored because of "
+                               "containing folder %s\n")
+                             % (f, ignored))
+                ignorefile, lineno, line = ignoredata
+                ui.write(_("(ignore rule in %s, line %d: '%s')\n")
+                         % (ignorefile, lineno, line))
+            else:
+                ui.write(_("%s is not ignored\n") % f)
+
+@command('debugindex', commands.debugrevlogopts +
+    [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
+    _('[-f FORMAT] -c|-m|FILE'),
+    optionalrepo=True)
+def debugindex(ui, repo, file_=None, **opts):
+    """dump the contents of an index file"""
+    r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
+    format = opts.get('format', 0)
+    if format not in (0, 1):
+        raise error.Abort(_("unknown format %d") % format)
+
+    generaldelta = r.version & revlog.REVLOGGENERALDELTA
+    if generaldelta:
+        basehdr = ' delta'
+    else:
+        basehdr = '  base'
+
+    if ui.debugflag:
+        shortfn = hex
+    else:
+        shortfn = short
+
+    # There might not be anything in r, so have a sane default
+    idlen = 12
+    for i in r:
+        idlen = len(shortfn(r.node(i)))
+        break
+
+    if format == 0:
+        ui.write(("   rev    offset  length " + basehdr + " linkrev"
+                 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
+    elif format == 1:
+        ui.write(("   rev flag   offset   length"
+                 "     size " + basehdr + "   link     p1     p2"
+                 " %s\n") % "nodeid".rjust(idlen))
+
+    for i in r:
+        node = r.node(i)
+        if generaldelta:
+            base = r.deltaparent(i)
+        else:
+            base = r.chainbase(i)
+        if format == 0:
+            try:
+                pp = r.parents(node)
+            except Exception:
+                pp = [nullid, nullid]
+            ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
+                    i, r.start(i), r.length(i), base, r.linkrev(i),
+                    shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
+        elif format == 1:
+            pr = r.parentrevs(i)
+            ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
+                    i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
+                    base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
+
+@command('debugindexdot', commands.debugrevlogopts,
+    _('-c|-m|FILE'), optionalrepo=True)
+def debugindexdot(ui, repo, file_=None, **opts):
+    """dump an index DAG as a graphviz dot file"""
+    r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
+    ui.write(("digraph G {\n"))
+    for i in r:
+        node = r.node(i)
+        pp = r.parents(node)
+        ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
+        if pp[1] != nullid:
+            ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
+    ui.write("}\n")
+
+@command('debugupgraderepo', [
+    ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
+    ('', 'run', False, _('performs an upgrade')),
+])
+def debugupgraderepo(ui, repo, run=False, optimize=None):
+    """upgrade a repository to use different features
+
+    If no arguments are specified, the repository is evaluated for upgrade
+    and a list of problems and potential optimizations is printed.
+
+    With ``--run``, a repository upgrade is performed. Behavior of the upgrade
+    can be influenced via additional arguments. More details will be provided
+    by the command output when run without ``--run``.
+
+    During the upgrade, the repository will be locked and no writes will be
+    allowed.
+
+    At the end of the upgrade, the repository may not be readable while new
+    repository data is swapped in. This window will be as long as it takes to
+    rename some directories inside the ``.hg`` directory. On most machines, this
+    should complete almost instantaneously and the chances of a consumer being
+    unable to access the repository should be low.
+    """
+    return repair.upgraderepo(ui, repo, run=run, optimize=optimize)
--- a/mercurial/destutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/destutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -133,7 +133,7 @@
         assert node is not None, "'tip' exists even in empty repository"
     return node, movemark, None
 
-# order in which each step should be evalutated
+# order in which each step should be evaluated
 # steps are run until one finds a destination
 destupdatesteps = ['evolution', 'bookmark', 'branch', 'branchfallback']
 # mapping to ease extension overriding steps.
--- a/mercurial/dirstate.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/dirstate.py	Wed Jan 18 11:43:36 2017 -0500
@@ -21,6 +21,7 @@
     osutil,
     parsers,
     pathutil,
+    pycompat,
     scmutil,
     util,
 )
@@ -66,7 +67,7 @@
 
     This returns '(fp, is_pending_opened)' tuple.
     '''
-    if root == os.environ.get('HG_PENDING'):
+    if root == encoding.environ.get('HG_PENDING'):
         try:
             return (vfs('%s.pending' % filename), True)
         except IOError as inst:
@@ -215,7 +216,7 @@
 
     @propertycache
     def _slash(self):
-        return self._ui.configbool('ui', 'slash') and os.sep != '/'
+        return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
 
     @propertycache
     def _checklink(self):
@@ -270,7 +271,7 @@
 
     @propertycache
     def _cwd(self):
-        return os.getcwd()
+        return pycompat.getcwd()
 
     def getcwd(self):
         '''Return the path from which a canonical path is calculated.
@@ -285,7 +286,7 @@
         # self._root ends with a path separator if self._root is '/' or 'C:\'
         rootsep = self._root
         if not util.endswithsep(rootsep):
-            rootsep += os.sep
+            rootsep += pycompat.ossep
         if cwd.startswith(rootsep):
             return cwd[len(rootsep):]
         else:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/dirstateguard.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,69 @@
+# dirstateguard.py - class to allow restoring dirstate after failure
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from .i18n import _
+
+from . import (
+    error,
+)
+
+class dirstateguard(object):
+    '''Restore dirstate at unexpected failure.
+
+    At the construction, this class does:
+
+    - write current ``repo.dirstate`` out, and
+    - save ``.hg/dirstate`` into the backup file
+
+    This restores ``.hg/dirstate`` from backup file, if ``release()``
+    is invoked before ``close()``.
+
+    This just removes the backup file at ``close()`` before ``release()``.
+    '''
+
+    def __init__(self, repo, name):
+        self._repo = repo
+        self._active = False
+        self._closed = False
+        self._suffix = '.backup.%s.%d' % (name, id(self))
+        repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
+        self._active = True
+
+    def __del__(self):
+        if self._active: # still active
+            # this may occur, even if this class is used correctly:
+            # for example, releasing other resources like transaction
+            # may raise exception before ``dirstateguard.release`` in
+            # ``release(tr, ....)``.
+            self._abort()
+
+    def close(self):
+        if not self._active: # already inactivated
+            msg = (_("can't close already inactivated backup: dirstate%s")
+                   % self._suffix)
+            raise error.Abort(msg)
+
+        self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
+                                         self._suffix)
+        self._active = False
+        self._closed = True
+
+    def _abort(self):
+        self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
+                                           self._suffix)
+        self._active = False
+
+    def release(self):
+        if not self._closed:
+            if not self._active: # already inactivated
+                msg = (_("can't release already inactivated backup:"
+                         " dirstate%s")
+                       % self._suffix)
+                raise error.Abort(msg)
+            self._abort()
--- a/mercurial/dispatch.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/dispatch.py	Wed Jan 18 11:43:36 2017 -0500
@@ -10,12 +10,11 @@
 import atexit
 import difflib
 import errno
+import getopt
 import os
 import pdb
 import re
-import shlex
 import signal
-import socket
 import sys
 import time
 import traceback
@@ -25,7 +24,9 @@
 
 from . import (
     cmdutil,
+    color,
     commands,
+    debugcommands,
     demandimport,
     encoding,
     error,
@@ -35,7 +36,9 @@
     hg,
     hook,
     profiling,
+    pycompat,
     revset,
+    scmutil,
     templatefilters,
     templatekw,
     templater,
@@ -57,7 +60,7 @@
 
 def run():
     "run the command in sys.argv"
-    sys.exit((dispatch(request(sys.argv[1:])) or 0) & 255)
+    sys.exit((dispatch(request(pycompat.sysargv[1:])) or 0) & 255)
 
 def _getsimilar(symbols, value):
     sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
@@ -95,11 +98,11 @@
     elif req.ui:
         ferr = req.ui.ferr
     else:
-        ferr = sys.stderr
+        ferr = util.stderr
 
     try:
         if not req.ui:
-            req.ui = uimod.ui()
+            req.ui = uimod.ui.load()
         if '--traceback' in req.args:
             req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
 
@@ -216,30 +219,15 @@
     return callcatch(ui, _runcatchfunc)
 
 def callcatch(ui, func):
-    """call func() with global exception handling
-
-    return func() if no exception happens. otherwise do some error handling
-    and return an exit code accordingly.
+    """like scmutil.callcatch but handles more high-level exceptions about
+    config parsing and commands. besides, use handlecommandexception to handle
+    uncaught exceptions.
     """
     try:
-        return func()
-    # Global exception handling, alphabetically
-    # Mercurial-specific first, followed by built-in and library exceptions
+        return scmutil.callcatch(ui, func)
     except error.AmbiguousCommand as inst:
         ui.warn(_("hg: command '%s' is ambiguous:\n    %s\n") %
                 (inst.args[0], " ".join(inst.args[1])))
-    except error.ParseError as inst:
-        _formatparse(ui.warn, inst)
-        return -1
-    except error.LockHeld as inst:
-        if inst.errno == errno.ETIMEDOUT:
-            reason = _('timed out waiting for lock held by %s') % inst.locker
-        else:
-            reason = _('lock held by %s') % inst.locker
-        ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
-    except error.LockUnavailable as inst:
-        ui.warn(_("abort: could not lock %s: %s\n") %
-               (inst.desc or inst.filename, inst.strerror))
     except error.CommandError as inst:
         if inst.args[0]:
             ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
@@ -247,34 +235,9 @@
         else:
             ui.warn(_("hg: %s\n") % inst.args[1])
             commands.help_(ui, 'shortlist')
-    except error.OutOfBandError as inst:
-        if inst.args:
-            msg = _("abort: remote error:\n")
-        else:
-            msg = _("abort: remote error\n")
-        ui.warn(msg)
-        if inst.args:
-            ui.warn(''.join(inst.args))
-        if inst.hint:
-            ui.warn('(%s)\n' % inst.hint)
-    except error.RepoError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
-        if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
-    except error.ResponseError as inst:
-        ui.warn(_("abort: %s") % inst.args[0])
-        if not isinstance(inst.args[1], basestring):
-            ui.warn(" %r\n" % (inst.args[1],))
-        elif not inst.args[1]:
-            ui.warn(_(" empty string\n"))
-        else:
-            ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
-    except error.CensoredNodeError as inst:
-        ui.warn(_("abort: file censored %s!\n") % inst)
-    except error.RevlogError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
-    except error.SignalInterrupt:
-        ui.warn(_("killed!\n"))
+    except error.ParseError as inst:
+        _formatparse(ui.warn, inst)
+        return -1
     except error.UnknownCommand as inst:
         ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
         try:
@@ -290,61 +253,11 @@
                     suggested = True
             if not suggested:
                 commands.help_(ui, 'shortlist')
-    except error.InterventionRequired as inst:
-        ui.warn("%s\n" % inst)
-        if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
-        return 1
-    except error.Abort as inst:
-        ui.warn(_("abort: %s\n") % inst)
-        if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
-    except ImportError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
-        m = str(inst).split()[-1]
-        if m in "mpatch bdiff".split():
-            ui.warn(_("(did you forget to compile extensions?)\n"))
-        elif m in "zlib".split():
-            ui.warn(_("(is your Python install correct?)\n"))
-    except IOError as inst:
-        if util.safehasattr(inst, "code"):
-            ui.warn(_("abort: %s\n") % inst)
-        elif util.safehasattr(inst, "reason"):
-            try: # usually it is in the form (errno, strerror)
-                reason = inst.reason.args[1]
-            except (AttributeError, IndexError):
-                # it might be anything, for example a string
-                reason = inst.reason
-            if isinstance(reason, unicode):
-                # SSLError of Python 2.7.9 contains a unicode
-                reason = reason.encode(encoding.encoding, 'replace')
-            ui.warn(_("abort: error: %s\n") % reason)
-        elif (util.safehasattr(inst, "args")
-              and inst.args and inst.args[0] == errno.EPIPE):
-            pass
-        elif getattr(inst, "strerror", None):
-            if getattr(inst, "filename", None):
-                ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
-            else:
-                ui.warn(_("abort: %s\n") % inst.strerror)
-        else:
-            raise
-    except OSError as inst:
-        if getattr(inst, "filename", None) is not None:
-            ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
-        else:
-            ui.warn(_("abort: %s\n") % inst.strerror)
+    except IOError:
+        raise
     except KeyboardInterrupt:
         raise
-    except MemoryError:
-        ui.warn(_("abort: out of memory\n"))
-    except SystemExit as inst:
-        # Commands shouldn't sys.exit directly, but give a return code.
-        # Just in case catch this and and pass exit code to caller.
-        return inst.code
-    except socket.error as inst:
-        ui.warn(_("abort: %s\n") % inst.args[-1])
-    except:  # perhaps re-raises
+    except:  # probably re-raises
         if not handlecommandexception(ui):
             raise
 
@@ -365,7 +278,7 @@
         cmd = re.sub(r'\$(\d+|\$)', replacer, cmd)
         givenargs = [x for i, x in enumerate(givenargs)
                      if i not in nums]
-        args = shlex.split(cmd)
+        args = pycompat.shlexsplit(cmd)
     return args + givenargs
 
 def aliasinterpolate(name, args, cmd):
@@ -437,7 +350,7 @@
             return
 
         try:
-            args = shlex.split(self.definition)
+            args = pycompat.shlexsplit(self.definition)
         except ValueError as inst:
             self.badalias = (_("error in definition for alias '%s': %s")
                              % (self.name, inst))
@@ -536,7 +449,7 @@
 
     try:
         args = fancyopts.fancyopts(args, commands.globalopts, options)
-    except fancyopts.getopt.GetoptError as inst:
+    except getopt.GetoptError as inst:
         raise error.CommandError(None, inst)
 
     if args:
@@ -547,7 +460,7 @@
         args = aliasargs(entry[0], args)
         defaults = ui.config("defaults", cmd)
         if defaults:
-            args = map(util.expandpath, shlex.split(defaults)) + args
+            args = map(util.expandpath, pycompat.shlexsplit(defaults)) + args
         c = list(entry[1])
     else:
         cmd = None
@@ -559,7 +472,7 @@
 
     try:
         args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
-    except fancyopts.getopt.GetoptError as inst:
+    except getopt.GetoptError as inst:
         raise error.CommandError(cmd, inst)
 
     # separate global options back out
@@ -665,7 +578,7 @@
     """
     if wd is None:
         try:
-            wd = os.getcwd()
+            wd = pycompat.getcwd()
         except OSError as e:
             raise error.Abort(_("error getting current working directory: %s") %
                               e.strerror)
@@ -689,7 +602,7 @@
 
     try:
         args = fancyopts.fancyopts(args, commands.globalopts, options)
-    except fancyopts.getopt.GetoptError:
+    except getopt.GetoptError:
         return
 
     if not args:
@@ -712,14 +625,6 @@
         return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
                                   [], {})
 
-def _cmdattr(ui, cmd, func, attr):
-    try:
-        return getattr(func, attr)
-    except AttributeError:
-        ui.deprecwarn("missing attribute '%s', use @command decorator "
-                      "to register '%s'" % (attr, cmd), '3.8')
-        return False
-
 _loaded = set()
 
 # list of (objname, loadermod, loadername) tuple:
@@ -730,6 +635,7 @@
 #   extraobj) arguments
 extraloaders = [
     ('cmdtable', commands, 'loadcmdtable'),
+    ('colortable', color, 'loadcolortable'),
     ('filesetpredicate', fileset, 'loadpredicate'),
     ('revsetpredicate', revset, 'loadpredicate'),
     ('templatefilter', templatefilters, 'loadfilter'),
@@ -768,6 +674,10 @@
 
     # (reposetup is handled in hg.repository)
 
+    # Side-effect of accessing is debugcommands module is guaranteed to be
+    # imported and commands.table is populated.
+    debugcommands.command
+
     addaliases(lui, commands.table)
 
     # All aliases and commands are completely defined, now.
@@ -848,7 +758,7 @@
     with profiling.maybeprofile(lui):
         repo = None
         cmdpats = args[:]
-        if not _cmdattr(ui, cmd, func, 'norepo'):
+        if not func.norepo:
             # use the repo from the request only if we don't have -R
             if not rpath and not cwd:
                 repo = req.repo
@@ -871,9 +781,8 @@
                 except error.RepoError:
                     if rpath and rpath[-1]: # invalid -R path
                         raise
-                    if not _cmdattr(ui, cmd, func, 'optionalrepo'):
-                        if (_cmdattr(ui, cmd, func, 'inferrepo') and
-                            args and not path):
+                    if not func.optionalrepo:
+                        if func.inferrepo and args and not path:
                             # try to infer -R from command args
                             repos = map(cmdutil.findrepo, args)
                             guess = repos[0]
@@ -883,7 +792,7 @@
                         if not path:
                             raise error.RepoError(_("no repository found in"
                                                     " '%s' (.hg not found)")
-                                                  % os.getcwd())
+                                                  % pycompat.getcwd())
                         raise
             if repo:
                 ui = repo.ui
@@ -895,7 +804,8 @@
 
         msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
         ui.log("command", '%s\n', msg)
-        d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
+        strcmdopt = pycompat.strkwargs(cmdoptions)
+        d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
         try:
             return runcommand(lui, repo, cmd, fullargs, ui, options, d,
                               cmdpats, cmdoptions)
--- a/mercurial/encoding.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/encoding.py	Wed Jan 18 11:43:36 2017 -0500
@@ -93,7 +93,7 @@
 try:
     encoding = environ.get("HGENCODING")
     if not encoding:
-        encoding = locale.getpreferredencoding() or 'ascii'
+        encoding = locale.getpreferredencoding().encode('ascii') or 'ascii'
         encoding = _encodingfixers.get(encoding, lambda: encoding)()
 except locale.Error:
     encoding = 'ascii'
--- a/mercurial/error.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/error.py	Wed Jan 18 11:43:36 2017 -0500
@@ -84,7 +84,7 @@
     """Raised when an update is aborted because there is nothing to merge"""
 
 class ManyMergeDestAbort(MergeDestAbort):
-    """Raised when an update is aborted because destination is ambigious"""
+    """Raised when an update is aborted because destination is ambiguous"""
 
 class ResponseExpected(Abort):
     """Raised when an EOF is received for a prompt"""
@@ -168,6 +168,9 @@
 class PushRaced(RuntimeError):
     """An exception raised during unbundling that indicate a push race"""
 
+class ProgrammingError(RuntimeError):
+    """Raised if a mercurial (core or extension) developer made a mistake"""
+
 # bundle2 related errors
 class BundleValueError(ValueError):
     """error raised when bundle2 cannot be processed"""
--- a/mercurial/exchange.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/exchange.py	Wed Jan 18 11:43:36 2017 -0500
@@ -37,12 +37,6 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-# Maps bundle compression human names to internal representation.
-_bundlespeccompressions = {'none': None,
-                           'bzip2': 'BZ',
-                           'gzip': 'GZ',
-                          }
-
 # Maps bundle version human names to changegroup versions.
 _bundlespeccgversions = {'v1': '01',
                          'v2': '02',
@@ -64,7 +58,7 @@
 
     Where <compression> is one of the supported compression formats
     and <type> is (currently) a version string. A ";" can follow the type and
-    all text afterwards is interpretted as URI encoded, ";" delimited key=value
+    all text afterwards is interpreted as URI encoded, ";" delimited key=value
     pairs.
 
     If ``strict`` is True (the default) <compression> is required. Otherwise,
@@ -114,7 +108,7 @@
     if '-' in spec:
         compression, version = spec.split('-', 1)
 
-        if compression not in _bundlespeccompressions:
+        if compression not in util.compengines.supportedbundlenames:
             raise error.UnsupportedBundleSpecification(
                     _('%s compression is not supported') % compression)
 
@@ -130,7 +124,7 @@
 
         spec, params = parseparams(spec)
 
-        if spec in _bundlespeccompressions:
+        if spec in util.compengines.supportedbundlenames:
             compression = spec
             version = 'v1'
             if 'generaldelta' in repo.requirements:
@@ -157,7 +151,8 @@
                       ', '.join(sorted(missingreqs)))
 
     if not externalnames:
-        compression = _bundlespeccompressions[compression]
+        engine = util.compengines.forbundlename(compression)
+        compression = engine.bundletype()[1]
         version = _bundlespeccgversions[version]
     return compression, version, params
 
@@ -196,10 +191,10 @@
     restored.
     """
     def speccompression(alg):
-        for k, v in _bundlespeccompressions.items():
-            if v == alg:
-                return k
-        return None
+        try:
+            return util.compengines.forbundletype(alg).bundletype()[0]
+        except KeyError:
+            return None
 
     b = readbundle(ui, fh, None)
     if isinstance(b, changegroup.cg1unpacker):
@@ -282,7 +277,7 @@
     This function is used to allow testing of the older bundle version"""
     ui = op.repo.ui
     forcebundle1 = False
-    # The goal is this config is to allow developper to choose the bundle
+    # The goal is this config is to allow developer to choose the bundle
     # version used during exchanged. This is especially handy during test.
     # Value is a list of bundle version to be picked from, highest version
     # should be used.
@@ -608,8 +603,21 @@
     explicit = set([repo._bookmarks.expandname(bookmark)
                     for bookmark in pushop.bookmarks])
 
-    comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
+    remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
+    comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
+
+    def safehex(x):
+        if x is None:
+            return x
+        return hex(x)
+
+    def hexifycompbookmarks(bookmarks):
+        for b, scid, dcid in bookmarks:
+            yield b, safehex(scid), safehex(dcid)
+
+    comp = [hexifycompbookmarks(marks) for marks in comp]
     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
+
     for b, scid, dcid in advsrc:
         if b in explicit:
             explicit.remove(b)
@@ -621,7 +629,7 @@
             explicit.remove(b)
             pushop.outbookmarks.append((b, '', scid))
     # search for overwritten bookmark
-    for b, scid, dcid in advdst + diverge + differ:
+    for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
         if b in explicit:
             explicit.remove(b)
             pushop.outbookmarks.append((b, dcid, scid))
@@ -1425,7 +1433,7 @@
     pullop.stepsdone.add('phases')
     publishing = bool(remotephases.get('publishing', False))
     if remotephases and not publishing:
-        # remote is new and unpublishing
+        # remote is new and non-publishing
         pheads, _dr = phases.analyzeremotephases(pullop.repo,
                                                  pullop.pulledsubset,
                                                  remotephases)
@@ -1460,6 +1468,7 @@
     pullop.stepsdone.add('bookmarks')
     repo = pullop.repo
     remotebookmarks = pullop.remotebookmarks
+    remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
     bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
                              pullop.remote.url(),
                              pullop.gettransaction,
@@ -1666,6 +1675,17 @@
     if chunks:
         bundler.newpart('hgtagsfnodes', data=''.join(chunks))
 
+def _getbookmarks(repo, **kwargs):
+    """Returns bookmark to node mapping.
+
+    This function is primarily used to generate `bookmarks` bundle2 part.
+    It is a separate function in order to make it easy to wrap it
+    in extensions. Passing `kwargs` to the function makes it easy to
+    add new parameters in extensions.
+    """
+
+    return dict(bookmod.listbinbookmarks(repo))
+
 def check_heads(repo, their_heads, context):
     """check if the heads of a repo have been modified
 
@@ -1882,18 +1902,22 @@
 
     return newentries
 
-def sortclonebundleentries(ui, entries):
-    prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
-    if not prefers:
-        return list(entries)
+class clonebundleentry(object):
+    """Represents an item in a clone bundles manifest.
+
+    This rich class is needed to support sorting since sorted() in Python 3
+    doesn't support ``cmp`` and our comparison is complex enough that ``key=``
+    won't work.
+    """
 
-    prefers = [p.split('=', 1) for p in prefers]
+    def __init__(self, value, prefers):
+        self.value = value
+        self.prefers = prefers
 
-    # Our sort function.
-    def compareentry(a, b):
-        for prefkey, prefvalue in prefers:
-            avalue = a.get(prefkey)
-            bvalue = b.get(prefkey)
+    def _cmp(self, other):
+        for prefkey, prefvalue in self.prefers:
+            avalue = self.value.get(prefkey)
+            bvalue = other.value.get(prefkey)
 
             # Special case for b missing attribute and a matches exactly.
             if avalue is not None and bvalue is None and avalue == prefvalue:
@@ -1924,7 +1948,33 @@
         # back to index order.
         return 0
 
-    return sorted(entries, cmp=compareentry)
+    def __lt__(self, other):
+        return self._cmp(other) < 0
+
+    def __gt__(self, other):
+        return self._cmp(other) > 0
+
+    def __eq__(self, other):
+        return self._cmp(other) == 0
+
+    def __le__(self, other):
+        return self._cmp(other) <= 0
+
+    def __ge__(self, other):
+        return self._cmp(other) >= 0
+
+    def __ne__(self, other):
+        return self._cmp(other) != 0
+
+def sortclonebundleentries(ui, entries):
+    prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
+    if not prefers:
+        return list(entries)
+
+    prefers = [p.split('=', 1) for p in prefers]
+
+    items = sorted(clonebundleentry(v, prefers) for v in entries)
+    return [i.value for i in items]
 
 def trypullbundlefromurl(ui, repo, url):
     """Attempt to apply a bundle from a URL."""
--- a/mercurial/extensions.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/extensions.py	Wed Jan 18 11:43:36 2017 -0500
@@ -18,6 +18,7 @@
 from . import (
     cmdutil,
     error,
+    pycompat,
     util,
 )
 
@@ -59,6 +60,8 @@
 def loadpath(path, module_name):
     module_name = module_name.replace('.', '_')
     path = util.normpath(util.expandpath(path))
+    module_name = pycompat.fsdecode(module_name)
+    path = pycompat.fsdecode(path)
     if os.path.isdir(path):
         # module/__init__.py style
         d, f = os.path.split(path)
@@ -74,7 +77,7 @@
 
 def _importh(name):
     """import and return the <name> module"""
-    mod = __import__(name)
+    mod = __import__(pycompat.sysstr(name))
     components = name.split('.')
     for comp in components[1:]:
         mod = getattr(mod, comp)
@@ -426,7 +429,7 @@
         file.close()
 
     if doc: # extracting localized synopsis
-        return gettext(doc).splitlines()[0]
+        return gettext(doc)
     else:
         return _('(no help text available)')
 
@@ -448,7 +451,7 @@
     for name, path in paths.iteritems():
         doc = _disabledhelp(path)
         if doc:
-            exts[name] = doc
+            exts[name] = doc.splitlines()[0]
 
     return exts
 
--- a/mercurial/fancyopts.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/fancyopts.py	Wed Jan 18 11:43:36 2017 -0500
@@ -7,10 +7,11 @@
 
 from __future__ import absolute_import
 
-import getopt
-
 from .i18n import _
-from . import error
+from . import (
+    error,
+    pycompat,
+)
 
 # Set of flags to not apply boolean negation logic on
 nevernegate = set([
@@ -34,13 +35,14 @@
         stopindex = args.index('--')
         extraargs = args[stopindex + 1:]
         args = args[:stopindex]
-    opts, parseargs = getopt.getopt(args, options, longoptions)
+    opts, parseargs = pycompat.getoptb(args, options, longoptions)
     args = []
     while parseargs:
         arg = parseargs.pop(0)
         if arg and arg[0] == '-' and len(arg) > 1:
             parseargs.insert(0, arg)
-            topts, newparseargs = getopt.getopt(parseargs, options, longoptions)
+            topts, newparseargs = pycompat.getoptb(parseargs,\
+                                            options, longoptions)
             opts = opts + topts
             parseargs = newparseargs
         else:
@@ -125,7 +127,7 @@
     if gnu:
         parse = gnugetopt
     else:
-        parse = getopt.getopt
+        parse = pycompat.getoptb
     opts, args = parse(args, shortlist, namelist)
 
     # transfer result to state
--- a/mercurial/filelog.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/filelog.py	Wed Jan 18 11:43:36 2017 -0500
@@ -104,9 +104,9 @@
 
         return True
 
-    def checkhash(self, text, p1, p2, node, rev=None):
+    def checkhash(self, text, node, p1=None, p2=None, rev=None):
         try:
-            super(filelog, self).checkhash(text, p1, p2, node, rev=rev)
+            super(filelog, self).checkhash(text, node, p1=p1, p2=p2, rev=rev)
         except error.RevlogError:
             if _censoredtext(text):
                 raise error.CensoredNodeError(self.indexfile, node, text)
--- a/mercurial/filemerge.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/filemerge.py	Wed Jan 18 11:43:36 2017 -0500
@@ -16,6 +16,7 @@
 from .node import nullid, short
 
 from . import (
+    encoding,
     error,
     formatter,
     match,
@@ -165,7 +166,7 @@
                 return (force, force)
 
     # HGMERGE takes next precedence
-    hgmerge = os.environ.get("HGMERGE")
+    hgmerge = encoding.environ.get("HGMERGE")
     if hgmerge:
         if changedelete and not supportscd(hgmerge):
             return ":prompt", None
@@ -189,7 +190,8 @@
         if _toolbool(ui, t, "disabled", False):
             disabled.add(t)
     names = tools.keys()
-    tools = sorted([(-p, t) for t, p in tools.items() if t not in disabled])
+    tools = sorted([(-p, tool) for tool, p in tools.items()
+                    if tool not in disabled])
     uimerge = ui.config("ui", "merge")
     if uimerge:
         # external tools defined in uimerge won't be able to handle
@@ -517,7 +519,8 @@
     return util.ellipsis(mark, 80 - 8)
 
 _defaultconflictmarker = ('{node|short} '
-                          '{ifeq(tags, "tip", "", "{tags} ")}'
+                          '{ifeq(tags, "tip", "", '
+                           'ifeq(tags, "", "", "{tags} "))}'
                           '{if(bookmarks, "{bookmarks} ")}'
                           '{ifeq(branch, "default", "", "{branch} ")}'
                           '- {author|user}: {desc|firstline}')
@@ -575,8 +578,9 @@
     a boolean indicating whether the file was deleted from disk."""
 
     def temp(prefix, ctx):
-        pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
-        (fd, name) = tempfile.mkstemp(prefix=pre)
+        fullbase, ext = os.path.splitext(ctx.path())
+        pre = "%s~%s." % (os.path.basename(fullbase), prefix)
+        (fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext)
         data = repo.wwritedata(ctx.path(), ctx.data())
         f = os.fdopen(fd, "wb")
         f.write(data)
--- a/mercurial/formatter.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/formatter.py	Wed Jan 18 11:43:36 2017 -0500
@@ -5,6 +5,101 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
+"""Generic output formatting for Mercurial
+
+The formatter provides API to show data in various ways. The following
+functions should be used in place of ui.write():
+
+- fm.write() for unconditional output
+- fm.condwrite() to show some extra data conditionally in plain output
+- fm.data() to provide extra data to JSON or template output
+- fm.plain() to show raw text that isn't provided to JSON or template output
+
+To show structured data (e.g. date tuples, dicts, lists), apply fm.format*()
+beforehand so the data is converted to the appropriate data type. Use
+fm.isplain() if you need to convert or format data conditionally which isn't
+supported by the formatter API.
+
+To build nested structure (i.e. a list of dicts), use fm.nested().
+
+See also https://www.mercurial-scm.org/wiki/GenericTemplatingPlan
+
+fm.condwrite() vs 'if cond:':
+
+In most cases, use fm.condwrite() so users can selectively show the data
+in template output. If it's costly to build data, use plain 'if cond:' with
+fm.write().
+
+fm.nested() vs fm.formatdict() (or fm.formatlist()):
+
+fm.nested() should be used to form a tree structure (a list of dicts of
+lists of dicts...) which can be accessed through template keywords, e.g.
+"{foo % "{bar % {...}} {baz % {...}}"}". On the other hand, fm.formatdict()
+exports a dict-type object to template, which can be accessed by e.g.
+"{get(foo, key)}" function.
+
+Doctest helper:
+
+>>> def show(fn, verbose=False, **opts):
+...     import sys
+...     from . import ui as uimod
+...     ui = uimod.ui()
+...     ui.fout = sys.stdout  # redirect to doctest
+...     ui.verbose = verbose
+...     return fn(ui, ui.formatter(fn.__name__, opts))
+
+Basic example:
+
+>>> def files(ui, fm):
+...     files = [('foo', 123, (0, 0)), ('bar', 456, (1, 0))]
+...     for f in files:
+...         fm.startitem()
+...         fm.write('path', '%s', f[0])
+...         fm.condwrite(ui.verbose, 'date', '  %s',
+...                      fm.formatdate(f[2], '%Y-%m-%d %H:%M:%S'))
+...         fm.data(size=f[1])
+...         fm.plain('\\n')
+...     fm.end()
+>>> show(files)
+foo
+bar
+>>> show(files, verbose=True)
+foo  1970-01-01 00:00:00
+bar  1970-01-01 00:00:01
+>>> show(files, template='json')
+[
+ {
+  "date": [0, 0],
+  "path": "foo",
+  "size": 123
+ },
+ {
+  "date": [1, 0],
+  "path": "bar",
+  "size": 456
+ }
+]
+>>> show(files, template='path: {path}\\ndate: {date|rfc3339date}\\n')
+path: foo
+date: 1970-01-01T00:00:00+00:00
+path: bar
+date: 1970-01-01T00:00:01+00:00
+
+Nested example:
+
+>>> def subrepos(ui, fm):
+...     fm.startitem()
+...     fm.write('repo', '[%s]\\n', 'baz')
+...     files(ui, fm.nested('files'))
+...     fm.end()
+>>> show(subrepos)
+[baz]
+foo
+bar
+>>> show(subrepos, template='{repo}: {join(files % "{path}", ", ")}\\n')
+baz: foo, bar
+"""
+
 from __future__ import absolute_import
 
 import os
--- a/mercurial/hbisect.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hbisect.py	Wed Jan 18 11:43:36 2017 -0500
@@ -98,7 +98,7 @@
     tot = len(candidates)
     unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
     if tot == 1 or not unskipped:
-        return ([changelog.node(rev) for rev in candidates], 0, good)
+        return ([changelog.node(c) for c in candidates], 0, good)
     perfect = tot // 2
 
     # find the best node to test
--- a/mercurial/help.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/help.py	Wed Jan 18 11:43:36 2017 -0500
@@ -210,14 +210,12 @@
     (["patterns"], _("File Name Patterns"), loaddoc('patterns')),
     (['environment', 'env'], _('Environment Variables'),
      loaddoc('environment')),
-    (['revisions', 'revs'], _('Specifying Single Revisions'),
-     loaddoc('revisions')),
-    (['multirevs', 'mrevs'], _('Specifying Multiple Revisions'),
-     loaddoc('multirevs')),
-    (['revsets', 'revset'], _("Specifying Revision Sets"), loaddoc('revsets')),
+    (['revisions', 'revs', 'revsets', 'revset', 'multirevs', 'mrevs'],
+      _('Specifying Revisions'), loaddoc('revisions')),
     (['filesets', 'fileset'], _("Specifying File Sets"), loaddoc('filesets')),
     (['diffs'], _('Diff Formats'), loaddoc('diffs')),
-    (['merge-tools', 'mergetools'], _('Merge Tools'), loaddoc('merge-tools')),
+    (['merge-tools', 'mergetools', 'mergetool'], _('Merge Tools'),
+     loaddoc('merge-tools')),
     (['templating', 'templates', 'template', 'style'], _('Template Usage'),
      loaddoc('templates')),
     (['urls'], _('URL Paths'), loaddoc('urls')),
@@ -281,7 +279,7 @@
 addtopicsymbols('filesets', '.. predicatesmarker', fileset.symbols)
 addtopicsymbols('merge-tools', '.. internaltoolsmarker',
                 filemerge.internalsdoc)
-addtopicsymbols('revsets', '.. predicatesmarker', revset.symbols)
+addtopicsymbols('revisions', '.. predicatesmarker', revset.symbols)
 addtopicsymbols('templates', '.. keywordsmarker', templatekw.keywords)
 addtopicsymbols('templates', '.. filtersmarker', templatefilters.filters)
 addtopicsymbols('templates', '.. functionsmarker', templater.funcs)
--- a/mercurial/help/config.txt	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/help/config.txt	Wed Jan 18 11:43:36 2017 -0500
@@ -418,6 +418,28 @@
        "HG: removed {file}\n"                 }{if(files, "",
        "HG: no files changed\n")}
 
+``diff()``
+    String: show the diff (see :hg:`help templates` for detail)
+
+Sometimes it is helpful to show the diff of the changeset in the editor without
+having to prefix 'HG: ' to each line so that highlighting works correctly. For
+this, Mercurial provides a special string which will ignore everything below
+it::
+
+     HG: ------------------------ >8 ------------------------
+
+For example, the template configuration below will show the diff below the
+extra message::
+
+    [committemplate]
+    changeset = {desc}\n\n
+        HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+        HG: {extramsg}
+        HG: ------------------------ >8 ------------------------
+        HG: Do not touch the line above.
+        HG: Everything below will be removed.
+        {diff()}
+
 .. note::
 
    For some problematic encodings (see :hg:`help win32mbcs` for
@@ -1401,7 +1423,7 @@
 
 ``type``
     The type of profiler to use.
-    (default: ls)
+    (default: stat)
 
     ``ls``
       Use Python's built-in instrumenting profiler. This profiler
@@ -1409,9 +1431,9 @@
       first line of a function. This restriction makes it difficult to
       identify the expensive parts of a non-trivial function.
     ``stat``
-      Use a third-party statistical profiler, statprof. This profiler
-      currently runs only on Unix systems, and is most useful for
-      profiling commands that run for longer than about 0.1 seconds.
+      Use a statistical profiler, statprof. This profiler is most
+      useful for profiling commands that run for longer than about 0.1
+      seconds.
 
 ``format``
     Profiling format.  Specific to the ``ls`` instrumenting profiler.
@@ -1426,6 +1448,20 @@
       file, the generated file can directly be loaded into
       kcachegrind.
 
+``statformat``
+    Profiling format for the ``stat`` profiler.
+    (default: hotpath)
+
+    ``hotpath``
+      Show a tree-based display containing the hot path of execution (where
+      most time was spent).
+    ``bymethod``
+      Show a table of methods ordered by how frequently they are active.
+    ``byline``
+      Show a table of lines in files ordered by how frequently they are active.
+    ``json``
+      Render profiling data as JSON.
+
 ``frequency``
     Sampling frequency.  Specific to the ``stat`` sampling profiler.
     (default: 1000)
@@ -1509,6 +1545,21 @@
 
 Controls generic server settings.
 
+``compressionengines``
+    List of compression engines and their relative priority to advertise
+    to clients.
+
+    The order of compression engines determines their priority, the first
+    having the highest priority. If a compression engine is not listed
+    here, it won't be advertised to clients.
+
+    If not set (the default), built-in defaults are used. Run
+    :hg:`debuginstall` to list available compression engines and their
+    default wire protocol priority.
+
+    Older Mercurial clients only support zlib compression and this setting
+    has no effect for legacy clients.
+
 ``uncompressed``
     Whether to allow clients to clone a repository using the
     uncompressed streaming protocol. This transfers about 40% more
@@ -1578,6 +1629,18 @@
 
     This option only impacts the HTTP server.
 
+``zstdlevel``
+    Integer between ``1`` and ``22`` that controls the zstd compression level
+    for wire protocol commands. ``1`` is the minimal amount of compression and
+    ``22`` is the highest amount of compression.
+
+    The default (``3``) should be significantly faster than zlib while likely
+    delivering better compression ratios.
+
+    This option only impacts the HTTP server.
+
+    See also ``server.zliblevel``.
+
 ``smtp``
 --------
 
@@ -2048,6 +2111,20 @@
     Name or email address of the person in charge of the repository.
     (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
 
+``csp``
+    Send a ``Content-Security-Policy`` HTTP header with this value.
+
+    The value may contain a special string ``%nonce%``, which will be replaced
+    by a randomly-generated one-time use value. If the value contains
+    ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
+    one-time property of the nonce. This nonce will also be inserted into
+    ``<script>`` elements containing inline JavaScript.
+
+    Note: lots of HTML content sent by the server is derived from repository
+    data. Please consider the potential for malicious repository data to
+    "inject" itself into generated HTML content as part of your security
+    threat model.
+
 ``deny_push``
     Whether to deny pushing to the repository. If empty or not set,
     push is not denied. If the special value ``*``, all remote users are
--- a/mercurial/help/filesets.txt	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/help/filesets.txt	Wed Jan 18 11:43:36 2017 -0500
@@ -15,6 +15,11 @@
 e.g., ``\n`` is interpreted as a newline. To prevent them from being
 interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
 
+See also :hg:`help patterns`.
+
+Operators
+=========
+
 There is a single prefix operator:
 
 ``not x``
@@ -32,10 +37,16 @@
 ``x - y``
   Files in x but not in y.
 
+Predicates
+==========
+
 The following predicates are supported:
 
 .. predicatesmarker
 
+Examples
+========
+
 Some sample queries:
 
 - Show status of files that appear to be binary in the working directory::
@@ -61,5 +72,3 @@
 - Remove files listed in foo.lst that contain the letter a or b::
 
     hg remove "set: 'listfile:foo.lst' and (**a* or **b*)"
-
-See also :hg:`help patterns`.
--- a/mercurial/help/internals/revlogs.txt	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/help/internals/revlogs.txt	Wed Jan 18 11:43:36 2017 -0500
@@ -85,29 +85,46 @@
 
 0-3 (4 bytes) (rev 0 only)
    Revlog header
+
 0-5 (6 bytes)
    Absolute offset of revision data from beginning of revlog.
+
 6-7 (2 bytes)
-   Bit flags impacting revision behavior.
+   Bit flags impacting revision behavior. The following bit offsets define:
+
+   0: REVIDX_ISCENSORED revision has censor metadata, must be verified.
+
+   1: REVIDX_ELLIPSIS revision hash does not match its data. Used by
+   narrowhg
+
+   2: REVIDX_EXTSTORED revision data is stored externally.
+
 8-11 (4 bytes)
    Compressed length of revision data / chunk as stored in revlog.
+
 12-15 (4 bytes)
-   Uncompressed length of revision data / chunk.
+   Uncompressed length of revision data. This is the size of the full
+   revision data, not the size of the chunk post decompression.
+
 16-19 (4 bytes)
    Base or previous revision this revision's delta was produced against.
    -1 means this revision holds full text (as opposed to a delta).
    For generaldelta repos, this is the previous revision in the delta
    chain. For non-generaldelta repos, this is the base or first
    revision in the delta chain.
+
 20-23 (4 bytes)
    A revision this revision is *linked* to. This allows a revision in
    one revlog to be forever associated with a revision in another
    revlog. For example, a file's revlog may point to the changelog
    revision that introduced it.
+
 24-27 (4 bytes)
    Revision of 1st parent. -1 indicates no parent.
+
 28-31 (4 bytes)
    Revision of 2nd parent. -1 indicates no 2nd parent.
+
 32-63 (32 bytes)
    Hash of revision's full text. Currently, SHA-1 is used and only
    the first 20 bytes of this field are used. The rest of the bytes
--- a/mercurial/help/internals/wireprotocol.txt	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/help/internals/wireprotocol.txt	Wed Jan 18 11:43:36 2017 -0500
@@ -65,11 +65,27 @@
     GET /repo?cmd=capabilities
     X-HgArg-1: foo=bar&baz=hello%20world
 
+The request media type should be chosen based on server support. If the
+``httpmediatype`` server capability is present, the client should send
+the newest mutually supported media type. If this capability is absent,
+the client must assume the server only supports the
+``application/mercurial-0.1`` media type.
+
 The ``Content-Type`` HTTP response header identifies the response as coming
 from Mercurial and can also be used to signal an error has occurred.
 
-The ``application/mercurial-0.1`` media type indicates a generic Mercurial
-response. It matches the media type sent by the client.
+The ``application/mercurial-*`` media types indicate a generic Mercurial
+data type.
+
+The ``application/mercurial-0.1`` media type is raw Mercurial data. It is the
+predecessor of the format below.
+
+The ``application/mercurial-0.2`` media type is compression framed Mercurial
+data. The first byte of the payload indicates the length of the compression
+format identifier that follows. Next are N bytes indicating the compression
+format. e.g. ``zlib``. The remaining bytes are compressed according to that
+compression format. The decompressed data behaves the same as with
+``application/mercurial-0.1``.
 
 The ``application/hg-error`` media type indicates a generic error occurred.
 The content of the HTTP response body typically holds text describing the
@@ -81,15 +97,19 @@
 Clients also accept the ``text/plain`` media type. All other media
 types should cause the client to error.
 
+Behavior of media types is further described in the ``Content Negotiation``
+section below.
+
 Clients should issue a ``User-Agent`` request header that identifies the client.
 The server should not use the ``User-Agent`` for feature detection.
 
-A command returning a ``string`` response issues the
-``application/mercurial-0.1`` media type and the HTTP response body contains
-the raw string value. A ``Content-Length`` header is typically issued.
+A command returning a ``string`` response issues a
+``application/mercurial-0.*`` media type and the HTTP response body contains
+the raw string value (after compression decoding, if used). A
+``Content-Length`` header is typically issued, but not required.
 
-A command returning a ``stream`` response issues the
-``application/mercurial-0.1`` media type and the HTTP response is typically
+A command returning a ``stream`` response issues a
+``application/mercurial-0.*`` media type and the HTTP response is typically
 using *chunked transfer* (``Transfer-Encoding: chunked``).
 
 SSH Transport
@@ -233,6 +253,35 @@
 This capability was introduced at the same time as the ``lookup``
 capability/command.
 
+compression
+-----------
+
+Declares support for negotiating compression formats.
+
+Presence of this capability indicates the server supports dynamic selection
+of compression formats based on the client request.
+
+Servers advertising this capability are required to support the
+``application/mercurial-0.2`` media type in response to commands returning
+streams. Servers may support this media type on any command.
+
+The value of the capability is a comma-delimited list of strings declaring
+supported compression formats. The order of the compression formats is in
+server-preferred order, most preferred first.
+
+The identifiers used by the official Mercurial distribution are:
+
+bzip2
+   bzip2
+none
+   uncompressed / raw data
+zlib
+   zlib (no gzip header)
+zstd
+   zstd
+
+This capability was introduced in Mercurial 4.1 (released February 2017).
+
 getbundle
 ---------
 
@@ -252,6 +301,51 @@
 
 This capability was introduced in Mercurial 1.9 (released July 2011).
 
+httpmediatype
+-------------
+
+Indicates which HTTP media types (``Content-Type`` header) the server is
+capable of receiving and sending.
+
+The value of the capability is a comma-delimited list of strings identifying
+support for media type and transmission direction. The following strings may
+be present:
+
+0.1rx
+   Indicates server support for receiving ``application/mercurial-0.1`` media
+   types.
+
+0.1tx
+   Indicates server support for sending ``application/mercurial-0.1`` media
+   types.
+
+0.2rx
+   Indicates server support for receiving ``application/mercurial-0.2`` media
+   types.
+
+0.2tx
+   Indicates server support for sending ``application/mercurial-0.2`` media
+   types.
+
+minrx=X
+   Minimum media type version the server is capable of receiving. Value is a
+   string like ``0.2``.
+
+   This capability can be used by servers to limit connections from legacy
+   clients not using the latest supported media type. However, only clients
+   with knowledge of this capability will know to consult this value. This
+   capability is present so the client may issue a more user-friendly error
+   when the server has locked out a legacy client.
+
+mintx=X
+   Minimum media type version the server is capable of sending. Value is a
+   string like ``0.1``.
+
+Servers advertising support for the ``application/mercurial-0.2`` media type
+should also advertise the ``compression`` capability.
+
+This capability was introduced in Mercurial 4.1 (released February 2017).
+
 httppostargs
 ------------
 
@@ -416,6 +510,57 @@
 not conforming to the expected command responses is assumed to be not related
 to Mercurial and can be ignored.
 
+Content Negotiation
+===================
+
+The wire protocol has some mechanisms to help peers determine what content
+types and encoding the other side will accept. Historically, these mechanisms
+have been built into commands themselves because most commands only send a
+well-defined response type and only certain commands needed to support
+functionality like compression.
+
+Currently, only the HTTP transport supports content negotiation at the protocol
+layer.
+
+HTTP requests advertise supported response formats via the ``X-HgProto-<N>``
+request header, where ``<N>`` is an integer starting at 1 allowing the logical
+value to span multiple headers. This value consists of a list of
+space-delimited parameters. Each parameter denotes a feature or capability.
+
+The following parameters are defined:
+
+0.1
+   Indicates the client supports receiving ``application/mercurial-0.1``
+   responses.
+
+0.2
+   Indicates the client supports receiving ``application/mercurial-0.2``
+   responses.
+
+comp
+   Indicates compression formats the client can decode. Value is a list of
+   comma delimited strings identifying compression formats ordered from
+   most preferential to least preferential. e.g. ``comp=zstd,zlib,none``.
+
+   This parameter does not have an effect if only the ``0.1`` parameter
+   is defined, as support for ``application/mercurial-0.2`` or greater is
+   required to use arbitrary compression formats.
+
+   If this parameter is not advertised, the server interprets this as
+   equivalent to ``zlib,none``.
+
+Clients may choose to only send this header if the ``httpmediatype``
+server capability is present, as currently all server-side features
+consulting this header require the client to opt in to new protocol features
+advertised via the ``httpmediatype`` capability.
+
+A server that doesn't receive an ``X-HgProto-<N>`` header should infer a
+value of ``0.1``. This is compatible with legacy clients.
+
+A server receiving a request indicating support for multiple media type
+versions may respond with any of the supported media types. Not all servers
+may support all media types on all commands.
+
 Commands
 ========
 
--- a/mercurial/help/multirevs.txt	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-When Mercurial accepts more than one revision, they may be specified
-individually, or provided as a topologically continuous range,
-separated by the ":" character.
-
-The syntax of range notation is [BEGIN]:[END], where BEGIN and END are
-revision identifiers. Both BEGIN and END are optional. If BEGIN is not
-specified, it defaults to revision number 0. If END is not specified,
-it defaults to the tip. The range ":" thus means "all revisions".
-
-If BEGIN is greater than END, revisions are treated in reverse order.
-
-A range acts as a closed interval. This means that a range of 3:5
-gives 3, 4 and 5. Similarly, a range of 9:6 gives 9, 8, 7, and 6.
--- a/mercurial/help/revisions.txt	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/help/revisions.txt	Wed Jan 18 11:43:36 2017 -0500
@@ -1,12 +1,13 @@
-Mercurial supports several ways to specify individual revisions.
+Mercurial supports several ways to specify revisions.
+
+Specifying single revisions
+===========================
 
 A plain integer is treated as a revision number. Negative integers are
 treated as sequential offsets from the tip, with -1 denoting the tip,
 -2 denoting the revision prior to the tip, and so forth.
 
-A 40-digit hexadecimal string is treated as a unique revision
-identifier.
-
+A 40-digit hexadecimal string is treated as a unique revision identifier.
 A hexadecimal string less than 40 characters long is treated as a
 unique revision identifier and is referred to as a short-form
 identifier. A short-form identifier is only valid if it is the prefix
@@ -27,3 +28,196 @@
 working directory is checked out, it is equivalent to null. If an
 uncommitted merge is in progress, "." is the revision of the first
 parent.
+
+Finally, commands that expect a single revision (like ``hg update``) also
+accept revsets (see below for details). When given a revset, they use the
+last revision of the revset. A few commands accept two single revisions
+(like ``hg diff``). When given a revset, they use the first and the last
+revisions of the revset.
+
+Specifying multiple revisions
+=============================
+
+Mercurial supports a functional language for selecting a set of
+revisions. Expressions in this language are called revsets.
+
+The language supports a number of predicates which are joined by infix
+operators. Parenthesis can be used for grouping.
+
+Identifiers such as branch names may need quoting with single or
+double quotes if they contain characters like ``-`` or if they match
+one of the predefined predicates.
+
+Special characters can be used in quoted identifiers by escaping them,
+e.g., ``\n`` is interpreted as a newline. To prevent them from being
+interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
+
+Operators
+=========
+
+There is a single prefix operator:
+
+``not x``
+  Changesets not in x. Short form is ``! x``.
+
+These are the supported infix operators:
+
+``x::y``
+  A DAG range, meaning all changesets that are descendants of x and
+  ancestors of y, including x and y themselves. If the first endpoint
+  is left out, this is equivalent to ``ancestors(y)``, if the second
+  is left out it is equivalent to ``descendants(x)``.
+
+  An alternative syntax is ``x..y``.
+
+``x:y``
+  All changesets with revision numbers between x and y, both
+  inclusive. Either endpoint can be left out, they default to 0 and
+  tip.
+
+``x and y``
+  The intersection of changesets in x and y. Short form is ``x & y``.
+
+``x or y``
+  The union of changesets in x and y. There are two alternative short
+  forms: ``x | y`` and ``x + y``.
+
+``x - y``
+  Changesets in x but not in y.
+
+``x % y``
+  Changesets that are ancestors of x but not ancestors of y (i.e. ::x - ::y).
+  This is shorthand notation for ``only(x, y)`` (see below). The second
+  argument is optional and, if left out, is equivalent to ``only(x)``.
+
+``x^n``
+  The nth parent of x, n == 0, 1, or 2.
+  For n == 0, x; for n == 1, the first parent of each changeset in x;
+  for n == 2, the second parent of changeset in x.
+
+``x~n``
+  The nth first ancestor of x; ``x~0`` is x; ``x~3`` is ``x^^^``.
+
+``x ## y``
+  Concatenate strings and identifiers into one string.
+
+  All other prefix, infix and postfix operators have lower priority than
+  ``##``. For example, ``a1 ## a2~2`` is equivalent to ``(a1 ## a2)~2``.
+
+  For example::
+
+    [revsetalias]
+    issue(a1) = grep(r'\bissue[ :]?' ## a1 ## r'\b|\bbug\(' ## a1 ## r'\)')
+
+    ``issue(1234)`` is equivalent to
+    ``grep(r'\bissue[ :]?1234\b|\bbug\(1234\)')``
+    in this case. This matches against all of "issue 1234", "issue:1234",
+    "issue1234" and "bug(1234)".
+
+There is a single postfix operator:
+
+``x^``
+  Equivalent to ``x^1``, the first parent of each changeset in x.
+
+Patterns
+========
+
+Where noted, predicates that perform string matching can accept a pattern
+string. The pattern may be either a literal, or a regular expression. If the
+pattern starts with ``re:``, the remainder of the pattern is treated as a
+regular expression. Otherwise, it is treated as a literal. To match a pattern
+that actually starts with ``re:``, use the prefix ``literal:``.
+
+Matching is case-sensitive, unless otherwise noted.  To perform a case-
+insensitive match on a case-sensitive predicate, use a regular expression,
+prefixed with ``(?i)``.
+
+  For example::
+
+    ``tag(r're:(?i)release')`` matches "release" or "RELEASE" or "Release", etc
+
+Predicates
+==========
+
+The following predicates are supported:
+
+.. predicatesmarker
+
+Aliases
+=======
+
+New predicates (known as "aliases") can be defined, using any combination of
+existing predicates or other aliases. An alias definition looks like::
+
+  <alias> = <definition>
+
+in the ``revsetalias`` section of a Mercurial configuration file. Arguments
+of the form `a1`, `a2`, etc. are substituted from the alias into the
+definition.
+
+For example,
+
+::
+
+  [revsetalias]
+  h = heads()
+  d(s) = sort(s, date)
+  rs(s, k) = reverse(sort(s, k))
+
+defines three aliases, ``h``, ``d``, and ``rs``. ``rs(0:tip, author)`` is
+exactly equivalent to ``reverse(sort(0:tip, author))``.
+
+Equivalents
+===========
+
+Command line equivalents for :hg:`log`::
+
+  -f    ->  ::.
+  -d x  ->  date(x)
+  -k x  ->  keyword(x)
+  -m    ->  merge()
+  -u x  ->  user(x)
+  -b x  ->  branch(x)
+  -P x  ->  !::x
+  -l x  ->  limit(expr, x)
+
+Examples
+========
+
+Some sample queries:
+
+- Changesets on the default branch::
+
+    hg log -r "branch(default)"
+
+- Changesets on the default branch since tag 1.5 (excluding merges)::
+
+    hg log -r "branch(default) and 1.5:: and not merge()"
+
+- Open branch heads::
+
+    hg log -r "head() and not closed()"
+
+- Changesets between tags 1.3 and 1.5 mentioning "bug" that affect
+  ``hgext/*``::
+
+    hg log -r "1.3::1.5 and keyword(bug) and file('hgext/*')"
+
+- Changesets committed in May 2008, sorted by user::
+
+    hg log -r "sort(date('May 2008'), user)"
+
+- Changesets mentioning "bug" or "issue" that are not in a tagged
+  release::
+
+    hg log -r "(keyword(bug) or keyword(issue)) and not ancestors(tag())"
+
+- Update to commit that bookmark @ is pointing too, without activating the
+  bookmark (this works because the last revision of the revset is used)::
+
+    hg update :@
+
+- Show diff between tags 1.3 and 1.5 (this works because the first and the
+  last revisions of the revset are used)::
+
+    hg diff -r 1.3::1.5
--- a/mercurial/help/revsets.txt	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,162 +0,0 @@
-Mercurial supports a functional language for selecting a set of
-revisions.
-
-The language supports a number of predicates which are joined by infix
-operators. Parenthesis can be used for grouping.
-
-Identifiers such as branch names may need quoting with single or
-double quotes if they contain characters like ``-`` or if they match
-one of the predefined predicates.
-
-Special characters can be used in quoted identifiers by escaping them,
-e.g., ``\n`` is interpreted as a newline. To prevent them from being
-interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
-
-Prefix
-======
-
-There is a single prefix operator:
-
-``not x``
-  Changesets not in x. Short form is ``! x``.
-
-Infix
-=====
-
-These are the supported infix operators:
-
-``x::y``
-  A DAG range, meaning all changesets that are descendants of x and
-  ancestors of y, including x and y themselves. If the first endpoint
-  is left out, this is equivalent to ``ancestors(y)``, if the second
-  is left out it is equivalent to ``descendants(x)``.
-
-  An alternative syntax is ``x..y``.
-
-``x:y``
-  All changesets with revision numbers between x and y, both
-  inclusive. Either endpoint can be left out, they default to 0 and
-  tip.
-
-``x and y``
-  The intersection of changesets in x and y. Short form is ``x & y``.
-
-``x or y``
-  The union of changesets in x and y. There are two alternative short
-  forms: ``x | y`` and ``x + y``.
-
-``x - y``
-  Changesets in x but not in y.
-
-``x % y``
-  Changesets that are ancestors of x but not ancestors of y (i.e. ::x - ::y).
-  This is shorthand notation for ``only(x, y)`` (see below). The second
-  argument is optional and, if left out, is equivalent to ``only(x)``.
-
-``x^n``
-  The nth parent of x, n == 0, 1, or 2.
-  For n == 0, x; for n == 1, the first parent of each changeset in x;
-  for n == 2, the second parent of changeset in x.
-
-``x~n``
-  The nth first ancestor of x; ``x~0`` is x; ``x~3`` is ``x^^^``.
-
-``x ## y``
-  Concatenate strings and identifiers into one string.
-
-  All other prefix, infix and postfix operators have lower priority than
-  ``##``. For example, ``a1 ## a2~2`` is equivalent to ``(a1 ## a2)~2``.
-
-  For example::
-
-    [revsetalias]
-    issue(a1) = grep(r'\bissue[ :]?' ## a1 ## r'\b|\bbug\(' ## a1 ## r'\)')
-
-    ``issue(1234)`` is equivalent to
-    ``grep(r'\bissue[ :]?1234\b|\bbug\(1234\)')``
-    in this case. This matches against all of "issue 1234", "issue:1234",
-    "issue1234" and "bug(1234)".
-
-Postfix
-=======
-
-There is a single postfix operator:
-
-``x^``
-  Equivalent to ``x^1``, the first parent of each changeset in x.
-
-Predicates
-==========
-
-The following predicates are supported:
-
-.. predicatesmarker
-
-Aliases
-=======
-
-New predicates (known as "aliases") can be defined, using any combination of
-existing predicates or other aliases. An alias definition looks like::
-
-  <alias> = <definition>
-
-in the ``revsetalias`` section of a Mercurial configuration file. Arguments
-of the form `a1`, `a2`, etc. are substituted from the alias into the
-definition.
-
-For example,
-
-::
-
-  [revsetalias]
-  h = heads()
-  d(s) = sort(s, date)
-  rs(s, k) = reverse(sort(s, k))
-
-defines three aliases, ``h``, ``d``, and ``rs``. ``rs(0:tip, author)`` is
-exactly equivalent to ``reverse(sort(0:tip, author))``.
-
-Equivalents
-===========
-
-Command line equivalents for :hg:`log`::
-
-  -f    ->  ::.
-  -d x  ->  date(x)
-  -k x  ->  keyword(x)
-  -m    ->  merge()
-  -u x  ->  user(x)
-  -b x  ->  branch(x)
-  -P x  ->  !::x
-  -l x  ->  limit(expr, x)
-
-Examples
-========
-
-Some sample queries:
-
-- Changesets on the default branch::
-
-    hg log -r "branch(default)"
-
-- Changesets on the default branch since tag 1.5 (excluding merges)::
-
-    hg log -r "branch(default) and 1.5:: and not merge()"
-
-- Open branch heads::
-
-    hg log -r "head() and not closed()"
-
-- Changesets between tags 1.3 and 1.5 mentioning "bug" that affect
-  ``hgext/*``::
-
-    hg log -r "1.3::1.5 and keyword(bug) and file('hgext/*')"
-
-- Changesets committed in May 2008, sorted by user::
-
-    hg log -r "sort(date('May 2008'), user)"
-
-- Changesets mentioning "bug" or "issue" that are not in a tagged
-  release::
-
-    hg log -r "(keyword(bug) or keyword(issue)) and not ancestors(tag())"
--- a/mercurial/help/templates.txt	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/help/templates.txt	Wed Jan 18 11:43:36 2017 -0500
@@ -16,6 +16,9 @@
     $ hg log -r1 --template "{node}\n"
     b56ce7b07c52de7d5fd79fb89701ea538af65746
 
+Keywords
+========
+
 Strings in curly braces are called keywords. The availability of
 keywords depends on the exact context of the templater. These
 keywords are usually available for templating a log-like command:
@@ -32,6 +35,9 @@
    $ hg tip --template "{date|isodate}\n"
    2008-08-21 18:22 +0000
 
+Filters
+=======
+
 List of filters:
 
 .. filtersmarker
@@ -39,10 +45,16 @@
 Note that a filter is nothing more than a function call, i.e.
 ``expr|filter`` is equivalent to ``filter(expr)``.
 
+Functions
+=========
+
 In addition to filters, there are some basic built-in functions:
 
 .. functionsmarker
 
+Operators
+=========
+
 We provide a limited set of infix arithmetic operations on integers::
 
   + for addition
@@ -60,6 +72,9 @@
 To prevent it from being interpreted, you can use an escape character ``\{``
 or a raw string prefix, ``r'...'``.
 
+Aliases
+=======
+
 New keywords and functions can be defined in the ``templatealias`` section of
 a Mercurial configuration file::
 
@@ -94,6 +109,9 @@
 
   $ hg log -r . -Tnodedate
 
+Examples
+========
+
 Some sample command line templates:
 
 - Format lists, e.g. files::
--- a/mercurial/hgweb/__init__.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/__init__.py	Wed Jan 18 11:43:36 2017 -0500
@@ -85,41 +85,11 @@
     def run(self):
         self.httpd.serve_forever()
 
-def createservice(ui, repo, opts):
-    # this way we can check if something was given in the command-line
-    if opts.get('port'):
-        opts['port'] = util.getport(opts.get('port'))
-
-    alluis = set([ui])
-    if repo:
-        baseui = repo.baseui
-        alluis.update([repo.baseui, repo.ui])
-    else:
-        baseui = ui
-    webconf = opts.get('web_conf') or opts.get('webdir_conf')
+def createapp(baseui, repo, webconf):
     if webconf:
-        # load server settings (e.g. web.port) to "copied" ui, which allows
-        # hgwebdir to reload webconf cleanly
-        servui = ui.copy()
-        servui.readconfig(webconf, sections=['web'])
-        alluis.add(servui)
-    else:
-        servui = ui
-
-    optlist = ("name templates style address port prefix ipv6"
-               " accesslog errorlog certificate encoding")
-    for o in optlist.split():
-        val = opts.get(o, '')
-        if val in (None, ''): # should check against default options instead
-            continue
-        for u in alluis:
-            u.setconfig("web", o, val, 'serve')
-
-    if webconf:
-        app = hgwebdir_mod.hgwebdir(webconf, baseui=baseui)
+        return hgwebdir_mod.hgwebdir(webconf, baseui=baseui)
     else:
         if not repo:
             raise error.RepoError(_("there is no Mercurial repository"
                                     " here (.hg not found)"))
-        app = hgweb_mod.hgweb(repo, baseui=baseui)
-    return httpservice(servui, app, opts)
+        return hgweb_mod.hgweb(repo, baseui=baseui)
--- a/mercurial/hgweb/common.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/common.py	Wed Jan 18 11:43:36 2017 -0500
@@ -8,11 +8,17 @@
 
 from __future__ import absolute_import
 
+import base64
 import errno
 import mimetypes
 import os
+import uuid
 
-from .. import util
+from .. import (
+    encoding,
+    pycompat,
+    util,
+)
 
 httpserver = util.httpserver
 
@@ -139,7 +145,8 @@
     parts = fname.split('/')
     for part in parts:
         if (part in ('', os.curdir, os.pardir) or
-            os.sep in part or os.altsep is not None and os.altsep in part):
+            pycompat.ossep in part or
+            pycompat.osaltsep is not None and pycompat.osaltsep in part):
             return
     fpath = os.path.join(*parts)
     if isinstance(directory, str):
@@ -187,10 +194,29 @@
     """
     return (config("web", "contact") or
             config("ui", "username") or
-            os.environ.get("EMAIL") or "")
+            encoding.environ.get("EMAIL") or "")
 
 def caching(web, req):
     tag = 'W/"%s"' % web.mtime
     if req.env.get('HTTP_IF_NONE_MATCH') == tag:
         raise ErrorResponse(HTTP_NOT_MODIFIED)
     req.headers.append(('ETag', tag))
+
+def cspvalues(ui):
+    """Obtain the Content-Security-Policy header and nonce value.
+
+    Returns a 2-tuple of the CSP header value and the nonce value.
+
+    First value is ``None`` if CSP isn't enabled. Second value is ``None``
+    if CSP isn't enabled or if the CSP header doesn't need a nonce.
+    """
+    # Don't allow untrusted CSP setting since it be disable protections
+    # from a trusted/global source.
+    csp = ui.config('web', 'csp', untrusted=False)
+    nonce = None
+
+    if csp and '%nonce%' in csp:
+        nonce = base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip('=')
+        csp = csp.replace('%nonce%', nonce)
+
+    return csp, nonce
--- a/mercurial/hgweb/hgweb_mod.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/hgweb_mod.py	Wed Jan 18 11:43:36 2017 -0500
@@ -19,6 +19,7 @@
     HTTP_OK,
     HTTP_SERVER_ERROR,
     caching,
+    cspvalues,
     permhooks,
 )
 from .request import wsgirequest
@@ -53,6 +54,12 @@
     'pushkey': 'push',
 }
 
+archivespecs = util.sortdict((
+    ('zip', ('application/zip', 'zip', '.zip', None)),
+    ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)),
+    ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)),
+))
+
 def makebreadcrumb(url, prefix=''):
     '''Return a 'URL breadcrumb' list
 
@@ -89,7 +96,7 @@
         self.repo = repo
         self.reponame = app.reponame
 
-        self.archives = ('zip', 'gz', 'bz2')
+        self.archivespecs = archivespecs
 
         self.maxchanges = self.configint('web', 'maxchanges', 10)
         self.stripecount = self.configint('web', 'stripes', 1)
@@ -109,6 +116,8 @@
         # of the request.
         self.websubtable = app.websubtable
 
+        self.csp, self.nonce = cspvalues(self.repo.ui)
+
     # Trust the settings from the .hg/hgrc files by default.
     def config(self, section, name, default=None, untrusted=True):
         return self.repo.ui.config(section, name, default,
@@ -126,12 +135,6 @@
         return self.repo.ui.configlist(section, name, default,
                                        untrusted=untrusted)
 
-    archivespecs = {
-        'bz2': ('application/x-bzip2', 'tbz2', '.tar.bz2', None),
-        'gz': ('application/x-gzip', 'tgz', '.tar.gz', None),
-        'zip': ('application/zip', 'zip', '.zip', None),
-    }
-
     def archivelist(self, nodeid):
         allowed = self.configlist('web', 'allow_archive')
         for typ, spec in self.archivespecs.iteritems():
@@ -201,6 +204,7 @@
             'sessionvars': sessionvars,
             'pathdef': makebreadcrumb(req.url),
             'style': style,
+            'nonce': self.nonce,
         }
         tmpl = templater.templater.frommapfile(mapfile,
                                                filters={'websub': websubfilter},
@@ -224,7 +228,7 @@
             if baseui:
                 u = baseui.copy()
             else:
-                u = uimod.ui()
+                u = uimod.ui.load()
             r = hg.repository(u, repo)
         else:
             # we trust caller to give us a private copy
@@ -286,7 +290,8 @@
         Modern servers should be using WSGI and should avoid this
         method, if possible.
         """
-        if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
+        if not encoding.environ.get('GATEWAY_INTERFACE',
+                                    '').startswith("CGI/1."):
             raise RuntimeError("This function is only intended to be "
                                "called while running as a CGI script.")
         wsgicgi.launch(self)
@@ -317,6 +322,13 @@
         encoding.encoding = rctx.config('web', 'encoding', encoding.encoding)
         rctx.repo.ui.environ = req.env
 
+        if rctx.csp:
+            # hgwebdir may have added CSP header. Since we generate our own,
+            # replace it.
+            req.headers = [h for h in req.headers
+                           if h[0] != 'Content-Security-Policy']
+            req.headers.append(('Content-Security-Policy', rctx.csp))
+
         # work with CGI variables to create coherent structure
         # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
 
@@ -413,7 +425,9 @@
                 req.form['cmd'] = [tmpl.cache['default']]
                 cmd = req.form['cmd'][0]
 
-            if rctx.configbool('web', 'cache', True):
+            # Don't enable caching if using a CSP nonce because then it wouldn't
+            # be a nonce.
+            if rctx.configbool('web', 'cache', True) and not rctx.nonce:
                 caching(self, req) # sets ETag header or raises NOT_MODIFIED
             if cmd not in webcommands.__all__:
                 msg = 'no such method: %s' % cmd
@@ -467,4 +481,3 @@
         return repo.filtered(viewconfig)
     else:
         return repo.filtered('served')
-
--- a/mercurial/hgweb/hgwebdir_mod.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/hgwebdir_mod.py	Wed Jan 18 11:43:36 2017 -0500
@@ -19,6 +19,7 @@
     HTTP_NOT_FOUND,
     HTTP_OK,
     HTTP_SERVER_ERROR,
+    cspvalues,
     get_contact,
     get_mtime,
     ismember,
@@ -136,7 +137,7 @@
         if self.baseui:
             u = self.baseui.copy()
         else:
-            u = uimod.ui()
+            u = uimod.ui.load()
             u.setconfig('ui', 'report_untrusted', 'off', 'hgwebdir')
             u.setconfig('ui', 'nontty', 'true', 'hgwebdir')
             # displaying bundling progress bar while serving feels wrong and may
@@ -186,7 +187,8 @@
         self.lastrefresh = time.time()
 
     def run(self):
-        if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
+        if not encoding.environ.get('GATEWAY_INTERFACE',
+                                    '').startswith("CGI/1."):
             raise RuntimeError("This function is only intended to be "
                                "called while running as a CGI script.")
         wsgicgi.launch(self)
@@ -226,8 +228,12 @@
         try:
             self.refresh()
 
+            csp, nonce = cspvalues(self.ui)
+            if csp:
+                req.headers.append(('Content-Security-Policy', csp))
+
             virtual = req.env.get("PATH_INFO", "").strip('/')
-            tmpl = self.templater(req)
+            tmpl = self.templater(req, nonce)
             ctype = tmpl('mimetype', encoding=encoding.encoding)
             ctype = templater.stringify(ctype)
 
@@ -296,10 +302,10 @@
         def archivelist(ui, nodeid, url):
             allowed = ui.configlist("web", "allow_archive", untrusted=True)
             archives = []
-            for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
-                if i[0] in allowed or ui.configbool("web", "allow" + i[0],
+            for typ, spec in hgweb_mod.archivespecs.iteritems():
+                if typ in allowed or ui.configbool("web", "allow" + typ,
                                                     untrusted=True):
-                    archives.append({"type" : i[0], "extension": i[1],
+                    archives.append({"type" : typ, "extension": spec[2],
                                      "node": nodeid, "url": url})
             return archives
 
@@ -465,7 +471,7 @@
                     sortcolumn=sortcolumn, descending=descending,
                     **dict(sort))
 
-    def templater(self, req):
+    def templater(self, req, nonce):
 
         def motd(**map):
             if self.motd is not None:
@@ -509,6 +515,7 @@
             "staticurl": staticurl,
             "sessionvars": sessionvars,
             "style": style,
+            "nonce": nonce,
         }
         tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
         return tmpl
--- a/mercurial/hgweb/protocol.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/protocol.py	Wed Jan 18 11:43:36 2017 -0500
@@ -8,7 +8,7 @@
 from __future__ import absolute_import
 
 import cgi
-import zlib
+import struct
 
 from .common import (
     HTTP_OK,
@@ -24,13 +24,30 @@
 urlreq = util.urlreq
 
 HGTYPE = 'application/mercurial-0.1'
+HGTYPE2 = 'application/mercurial-0.2'
 HGERRTYPE = 'application/hg-error'
 
+def decodevaluefromheaders(req, headerprefix):
+    """Decode a long value from multiple HTTP request headers."""
+    chunks = []
+    i = 1
+    while True:
+        v = req.env.get('HTTP_%s_%d' % (
+            headerprefix.upper().replace('-', '_'), i))
+        if v is None:
+            break
+        chunks.append(v)
+        i += 1
+
+    return ''.join(chunks)
+
 class webproto(wireproto.abstractserverproto):
     def __init__(self, req, ui):
         self.req = req
         self.response = ''
         self.ui = ui
+        self.name = 'http'
+
     def getargs(self, args):
         knownargs = self._args()
         data = {}
@@ -52,15 +69,9 @@
             args.update(cgi.parse_qs(
                 self.req.read(postlen), keep_blank_values=True))
             return args
-        chunks = []
-        i = 1
-        while True:
-            h = self.req.env.get('HTTP_X_HGARG_' + str(i))
-            if h is None:
-                break
-            chunks += [h]
-            i += 1
-        args.update(cgi.parse_qs(''.join(chunks), keep_blank_values=True))
+
+        argvalue = decodevaluefromheaders(self.req, 'X-HgArg')
+        args.update(cgi.parse_qs(argvalue, keep_blank_values=True))
         return args
     def getfile(self, fp):
         length = int(self.req.env['CONTENT_LENGTH'])
@@ -74,47 +85,101 @@
         self.ui.ferr, self.ui.fout = self.oldio
         return val
 
-    def groupchunks(self, fh):
-        def getchunks():
-            while True:
-                chunk = fh.read(32768)
-                if not chunk:
-                    break
-                yield chunk
-
-        return self.compresschunks(getchunks())
-
-    def compresschunks(self, chunks):
-        # Don't allow untrusted settings because disabling compression or
-        # setting a very high compression level could lead to flooding
-        # the server's network or CPU.
-        z = zlib.compressobj(self.ui.configint('server', 'zliblevel', -1))
-        for chunk in chunks:
-            data = z.compress(chunk)
-            # Not all calls to compress() emit data. It is cheaper to inspect
-            # that here than to send it via the generator.
-            if data:
-                yield data
-        yield z.flush()
-
     def _client(self):
         return 'remote:%s:%s:%s' % (
             self.req.env.get('wsgi.url_scheme') or 'http',
             urlreq.quote(self.req.env.get('REMOTE_HOST', '')),
             urlreq.quote(self.req.env.get('REMOTE_USER', '')))
 
+    def responsetype(self, v1compressible=False):
+        """Determine the appropriate response type and compression settings.
+
+        The ``v1compressible`` argument states whether the response with
+        application/mercurial-0.1 media types should be zlib compressed.
+
+        Returns a tuple of (mediatype, compengine, engineopts).
+        """
+        # For now, if it isn't compressible in the old world, it's never
+        # compressible. We can change this to send uncompressed 0.2 payloads
+        # later.
+        if not v1compressible:
+            return HGTYPE, None, None
+
+        # Determine the response media type and compression engine based
+        # on the request parameters.
+        protocaps = decodevaluefromheaders(self.req, 'X-HgProto').split(' ')
+
+        if '0.2' in protocaps:
+            # Default as defined by wire protocol spec.
+            compformats = ['zlib', 'none']
+            for cap in protocaps:
+                if cap.startswith('comp='):
+                    compformats = cap[5:].split(',')
+                    break
+
+            # Now find an agreed upon compression format.
+            for engine in wireproto.supportedcompengines(self.ui, self,
+                                                         util.SERVERROLE):
+                if engine.wireprotosupport().name in compformats:
+                    opts = {}
+                    level = self.ui.configint('server',
+                                              '%slevel' % engine.name())
+                    if level is not None:
+                        opts['level'] = level
+
+                    return HGTYPE2, engine, opts
+
+            # No mutually supported compression format. Fall back to the
+            # legacy protocol.
+
+        # Don't allow untrusted settings because disabling compression or
+        # setting a very high compression level could lead to flooding
+        # the server's network or CPU.
+        opts = {'level': self.ui.configint('server', 'zliblevel', -1)}
+        return HGTYPE, util.compengines['zlib'], opts
+
 def iscmd(cmd):
     return cmd in wireproto.commands
 
 def call(repo, req, cmd):
     p = webproto(req, repo.ui)
+
+    def genversion2(gen, compress, engine, engineopts):
+        # application/mercurial-0.2 always sends a payload header
+        # identifying the compression engine.
+        name = engine.wireprotosupport().name
+        assert 0 < len(name) < 256
+        yield struct.pack('B', len(name))
+        yield name
+
+        if compress:
+            for chunk in engine.compressstream(gen, opts=engineopts):
+                yield chunk
+        else:
+            for chunk in gen:
+                yield chunk
+
     rsp = wireproto.dispatch(repo, p, cmd)
     if isinstance(rsp, str):
         req.respond(HTTP_OK, HGTYPE, body=rsp)
         return []
     elif isinstance(rsp, wireproto.streamres):
-        req.respond(HTTP_OK, HGTYPE)
-        return rsp.gen
+        if rsp.reader:
+            gen = iter(lambda: rsp.reader.read(32768), '')
+        else:
+            gen = rsp.gen
+
+        # This code for compression should not be streamres specific. It
+        # is here because we only compress streamres at the moment.
+        mediatype, engine, engineopts = p.responsetype(rsp.v1compressible)
+
+        if mediatype == HGTYPE and rsp.v1compressible:
+            gen = engine.compressstream(gen, engineopts)
+        elif mediatype == HGTYPE2:
+            gen = genversion2(gen, rsp.v1compressible, engine, engineopts)
+
+        req.respond(HTTP_OK, mediatype)
+        return gen
     elif isinstance(rsp, wireproto.pushres):
         val = p.restore()
         rsp = '%d\n%s' % (rsp.res, val)
--- a/mercurial/hgweb/server.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/server.py	Wed Jan 18 11:43:36 2017 -0500
@@ -18,6 +18,7 @@
 
 from .. import (
     error,
+    pycompat,
     util,
 )
 
@@ -266,7 +267,7 @@
 class MercurialHTTPServer(_mixin, httpservermod.httpserver, object):
 
     # SO_REUSEADDR has broken semantics on windows
-    if os.name == 'nt':
+    if pycompat.osname == 'nt':
         allow_reuse_address = 0
 
     def __init__(self, ui, app, addr, handler, **kwargs):
@@ -281,8 +282,8 @@
             prefix = '/' + prefix.strip('/')
         self.prefix = prefix
 
-        alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout)
-        elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr)
+        alog = openlog(ui.config('web', 'accesslog', '-'), ui.fout)
+        elog = openlog(ui.config('web', 'errorlog', '-'), ui.ferr)
         self.accesslog = alog
         self.errorlog = elog
 
--- a/mercurial/hgweb/webcommands.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/webcommands.py	Wed Jan 18 11:43:36 2017 -0500
@@ -972,28 +972,23 @@
     morevars['revcount'] = revcount * 2
 
     count = fctx.filerev() + 1
-    start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
+    start = max(0, count - revcount) # first rev on this page
     end = min(count, start + revcount) # last rev on this page
     parity = paritygen(web.stripecount, offset=start - end)
 
-    def entries():
-        l = []
-
-        repo = web.repo
-        revs = fctx.filelog().revs(start, end - 1)
-        for i in revs:
-            iterfctx = fctx.filectx(i)
+    repo = web.repo
+    revs = fctx.filelog().revs(start, end - 1)
+    entries = []
+    for i in revs:
+        iterfctx = fctx.filectx(i)
+        entries.append(dict(
+            parity=next(parity),
+            filerev=i,
+            file=f,
+            rename=webutil.renamelink(iterfctx),
+            **webutil.commonentry(repo, iterfctx)))
+    entries.reverse()
 
-            l.append(dict(
-                parity=next(parity),
-                filerev=i,
-                file=f,
-                rename=webutil.renamelink(iterfctx),
-                **webutil.commonentry(repo, iterfctx)))
-        for e in reversed(l):
-            yield e
-
-    entries = list(entries())
     latestentry = entries[:1]
 
     revnav = webutil.filerevnav(web.repo, fctx.path())
@@ -1034,7 +1029,7 @@
     allowed = web.configlist("web", "allow_archive")
     key = req.form['node'][0]
 
-    if type_ not in web.archives:
+    if type_ not in web.archivespecs:
         msg = 'Unsupported archive type: %s' % type_
         raise ErrorResponse(HTTP_NOT_FOUND, msg)
 
@@ -1302,7 +1297,7 @@
         return tmpl('helptopics', topics=topics, title=topicname,
                     subindex=True)
 
-    u = webutil.wsgiui()
+    u = webutil.wsgiui.load()
     u.verbose = True
 
     # Render a page from a sub-topic.
--- a/mercurial/hgweb/webutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/webutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -269,7 +269,7 @@
     try:
         ctx = repo[changeid]
     except error.RepoError:
-        man = repo.manifest
+        man = repo.manifestlog._revlog
         ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
 
     return ctx
--- a/mercurial/hgweb/wsgicgi.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hgweb/wsgicgi.py	Wed Jan 18 11:43:36 2017 -0500
@@ -10,10 +10,8 @@
 
 from __future__ import absolute_import
 
-import os
-import sys
-
 from .. import (
+    encoding,
     util,
 )
 
@@ -22,10 +20,10 @@
 )
 
 def launch(application):
-    util.setbinary(sys.stdin)
-    util.setbinary(sys.stdout)
+    util.setbinary(util.stdin)
+    util.setbinary(util.stdout)
 
-    environ = dict(os.environ.iteritems())
+    environ = dict(encoding.environ.iteritems())
     environ.setdefault('PATH_INFO', '')
     if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'):
         # IIS includes script_name in PATH_INFO
@@ -33,12 +31,12 @@
         if environ['PATH_INFO'].startswith(scriptname):
             environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):]
 
-    stdin = sys.stdin
+    stdin = util.stdin
     if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
-        stdin = common.continuereader(stdin, sys.stdout.write)
+        stdin = common.continuereader(stdin, util.stdout.write)
 
     environ['wsgi.input'] = stdin
-    environ['wsgi.errors'] = sys.stderr
+    environ['wsgi.errors'] = util.stderr
     environ['wsgi.version'] = (1, 0)
     environ['wsgi.multithread'] = False
     environ['wsgi.multiprocess'] = True
@@ -51,7 +49,7 @@
 
     headers_set = []
     headers_sent = []
-    out = sys.stdout
+    out = util.stdout
 
     def write(data):
         if not headers_set:
--- a/mercurial/hook.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/hook.py	Wed Jan 18 11:43:36 2017 -0500
@@ -16,6 +16,7 @@
     demandimport,
     error,
     extensions,
+    pycompat,
     util,
 )
 
@@ -90,12 +91,6 @@
     starttime = time.time()
 
     try:
-        # redirect IO descriptors to the ui descriptors so hooks
-        # that write directly to these don't mess up the command
-        # protocol when running through the command server
-        old = sys.stdout, sys.stderr, sys.stdin
-        sys.stdout, sys.stderr, sys.stdin = ui.fout, ui.ferr, ui.fin
-
         r = obj(ui=ui, repo=repo, hooktype=name, **args)
     except Exception as exc:
         if isinstance(exc, error.Abort):
@@ -111,7 +106,6 @@
         ui.traceback()
         return True, True
     finally:
-        sys.stdout, sys.stderr, sys.stdin = old
         duration = time.time() - starttime
         ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n',
                name, funcname, duration)
@@ -148,7 +142,7 @@
     if repo:
         cwd = repo.root
     else:
-        cwd = os.getcwd()
+        cwd = pycompat.getcwd()
     r = ui.system(cmd, environ=env, cwd=cwd)
 
     duration = time.time() - starttime
@@ -216,11 +210,11 @@
         for hname, cmd in hooks:
             if oldstdout == -1 and _redirect:
                 try:
-                    stdoutno = sys.__stdout__.fileno()
-                    stderrno = sys.__stderr__.fileno()
+                    stdoutno = util.stdout.fileno()
+                    stderrno = util.stderr.fileno()
                     # temporarily redirect stdout to stderr, if possible
                     if stdoutno >= 0 and stderrno >= 0:
-                        sys.__stdout__.flush()
+                        util.stdout.flush()
                         oldstdout = os.dup(stdoutno)
                         os.dup2(stderrno, stdoutno)
                 except (OSError, AttributeError):
@@ -262,9 +256,10 @@
             # The stderr is fully buffered on Windows when connected to a pipe.
             # A forcible flush is required to make small stderr data in the
             # remote side available to the client immediately.
-            sys.stderr.flush()
+            util.stderr.flush()
     finally:
         if _redirect and oldstdout >= 0:
+            util.stdout.flush()  # write hook output to stderr fd
             os.dup2(oldstdout, stdoutno)
             os.close(oldstdout)
 
--- a/mercurial/httppeer.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/httppeer.py	Wed Jan 18 11:43:36 2017 -0500
@@ -11,8 +11,8 @@
 import errno
 import os
 import socket
+import struct
 import tempfile
-import zlib
 
 from .i18n import _
 from .node import nullid
@@ -30,16 +30,49 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-def zgenerator(f):
-    zd = zlib.decompressobj()
+# FUTURE: consider refactoring this API to use generators. This will
+# require a compression engine API to emit generators.
+def decompressresponse(response, engine):
     try:
-        for chunk in util.filechunkiter(f):
-            while chunk:
-                yield zd.decompress(chunk, 2**18)
-                chunk = zd.unconsumed_tail
+        reader = engine.decompressorreader(response)
     except httplib.HTTPException:
         raise IOError(None, _('connection ended unexpectedly'))
-    yield zd.flush()
+
+    # We need to wrap reader.read() so HTTPException on subsequent
+    # reads is also converted.
+    # Ideally we'd use super() here. However, if ``reader`` isn't a new-style
+    # class, this can raise:
+    # TypeError: super() argument 1 must be type, not classobj
+    origread = reader.read
+    class readerproxy(reader.__class__):
+        def read(self, *args, **kwargs):
+            try:
+                return origread(*args, **kwargs)
+            except httplib.HTTPException:
+                raise IOError(None, _('connection ended unexpectedly'))
+
+    reader.__class__ = readerproxy
+    return reader
+
+def encodevalueinheaders(value, header, limit):
+    """Encode a string value into multiple HTTP headers.
+
+    ``value`` will be encoded into 1 or more HTTP headers with the names
+    ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
+    name + value will be at most ``limit`` bytes long.
+
+    Returns an iterable of 2-tuples consisting of header names and values.
+    """
+    fmt = header + '-%s'
+    valuelen = limit - len(fmt % '000') - len(': \r\n')
+    result = []
+
+    n = 0
+    for i in xrange(0, len(value), valuelen):
+        n += 1
+        result.append((fmt % str(n), value[i:i + valuelen]))
+
+    return result
 
 class httppeer(wireproto.wirepeer):
     def __init__(self, ui, path):
@@ -90,7 +123,7 @@
     def lock(self):
         raise error.Abort(_('operation not supported over http'))
 
-    def _callstream(self, cmd, **args):
+    def _callstream(self, cmd, _compressible=False, **args):
         if cmd == 'pushkey':
             args['data'] = ''
         data = args.pop('data', None)
@@ -99,6 +132,7 @@
         self.ui.debug("sending %s command\n" % cmd)
         q = [('cmd', cmd)]
         headersize = 0
+        varyheaders = []
         # Important: don't use self.capable() here or else you end up
         # with infinite recursion when trying to look up capabilities
         # for the first time.
@@ -122,16 +156,10 @@
             if headersize > 0:
                 # The headers can typically carry more data than the URL.
                 encargs = urlreq.urlencode(sorted(args.items()))
-                headerfmt = 'X-HgArg-%s'
-                contentlen = headersize - len(headerfmt % '000' + ': \r\n')
-                headernum = 0
-                varyheaders = []
-                for i in xrange(0, len(encargs), contentlen):
-                    headernum += 1
-                    header = headerfmt % str(headernum)
-                    headers[header] = encargs[i:i + contentlen]
+                for header, value in encodevalueinheaders(encargs, 'X-HgArg',
+                                                          headersize):
+                    headers[header] = value
                     varyheaders.append(header)
-                headers['Vary'] = ','.join(varyheaders)
             else:
                 q += sorted(args.items())
         qs = '?%s' % urlreq.urlencode(q)
@@ -146,7 +174,41 @@
             headers['X-HgHttp2'] = '1'
         if data is not None and 'Content-Type' not in headers:
             headers['Content-Type'] = 'application/mercurial-0.1'
+
+        # Tell the server we accept application/mercurial-0.2 and multiple
+        # compression formats if the server is capable of emitting those
+        # payloads.
+        protoparams = []
+
+        mediatypes = set()
+        if self.caps is not None:
+            mt = self.capable('httpmediatype')
+            if mt:
+                protoparams.append('0.1')
+                mediatypes = set(mt.split(','))
+
+        if '0.2tx' in mediatypes:
+            protoparams.append('0.2')
+
+        if '0.2tx' in mediatypes and self.capable('compression'):
+            # We /could/ compare supported compression formats and prune
+            # non-mutually supported or error if nothing is mutually supported.
+            # For now, send the full list to the server and have it error.
+            comps = [e.wireprotosupport().name for e in
+                     util.compengines.supportedwireengines(util.CLIENTROLE)]
+            protoparams.append('comp=%s' % ','.join(comps))
+
+        if protoparams:
+            protoheaders = encodevalueinheaders(' '.join(protoparams),
+                                                'X-HgProto',
+                                                headersize or 1024)
+            for header, value in protoheaders:
+                headers[header] = value
+                varyheaders.append(header)
+
+        headers['Vary'] = ','.join(varyheaders)
         req = self.requestbuilder(cu, data, headers)
+
         if data is not None:
             self.ui.debug("sending %s bytes\n" % size)
             req.add_unredirected_header('Content-Length', '%d' % size)
@@ -160,9 +222,6 @@
             self.ui.debug('http error while sending %s command\n' % cmd)
             self.ui.traceback()
             raise IOError(None, inst)
-        except IndexError:
-            # this only happens with Python 2.3, later versions raise URLError
-            raise error.Abort(_('http error, possibly caused by proxy setting'))
         # record the url we got redirected to
         resp_url = resp.geturl()
         if resp_url.endswith(qs):
@@ -197,10 +256,25 @@
             except ValueError:
                 raise error.RepoError(_("'%s' sent a broken Content-Type "
                                         "header (%s)") % (safeurl, proto))
-            if version_info > (0, 1):
+
+            if version_info == (0, 1):
+                if _compressible:
+                    return decompressresponse(resp, util.compengines['zlib'])
+                return resp
+            elif version_info == (0, 2):
+                # application/mercurial-0.2 always identifies the compression
+                # engine in the payload header.
+                elen = struct.unpack('B', resp.read(1))[0]
+                ename = resp.read(elen)
+                engine = util.compengines.forwiretype(ename)
+                return decompressresponse(resp, engine)
+            else:
                 raise error.RepoError(_("'%s' uses newer protocol %s") %
                                       (safeurl, version))
 
+        if _compressible:
+            return decompressresponse(resp, util.compengines['zlib'])
+
         return resp
 
     def _call(self, cmd, **args):
@@ -271,8 +345,7 @@
                 os.unlink(filename)
 
     def _callcompressable(self, cmd, **args):
-        stream = self._callstream(cmd, **args)
-        return util.chunkbuffer(zgenerator(stream))
+        return self._callstream(cmd, _compressible=True, **args)
 
     def _abort(self, exception):
         raise exception
--- a/mercurial/i18n.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/i18n.py	Wed Jan 18 11:43:36 2017 -0500
@@ -19,7 +19,7 @@
 
 # modelled after templater.templatepath:
 if getattr(sys, 'frozen', None) is not None:
-    module = sys.executable
+    module = pycompat.sysexecutable
 else:
     module = __file__
 
@@ -29,7 +29,7 @@
     unicode = str
 
 _languages = None
-if (os.name == 'nt'
+if (pycompat.osname == 'nt'
     and 'LANGUAGE' not in encoding.environ
     and 'LC_ALL' not in encoding.environ
     and 'LC_MESSAGES' not in encoding.environ
@@ -49,6 +49,7 @@
 _ugettext = None
 
 def setdatapath(datapath):
+    datapath = pycompat.fsdecode(datapath)
     localedir = os.path.join(datapath, pycompat.sysstr('locale'))
     t = gettextmod.translation('hg', localedir, _languages, fallback=True)
     global _ugettext
--- a/mercurial/keepalive.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/keepalive.py	Wed Jan 18 11:43:36 2017 -0500
@@ -78,31 +78,6 @@
   easy to distinguish between non-200 responses.  The reason is that
   urllib2 tries to do clever things with error codes 301, 302, 401,
   and 407, and it wraps the object upon return.
-
-  For python versions earlier than 2.4, you can avoid this fancy error
-  handling by setting the module-level global HANDLE_ERRORS to zero.
-  You see, prior to 2.4, it's the HTTP Handler's job to determine what
-  to handle specially, and what to just pass up.  HANDLE_ERRORS == 0
-  means "pass everything up".  In python 2.4, however, this job no
-  longer belongs to the HTTP Handler and is now done by a NEW handler,
-  HTTPErrorProcessor.  Here's the bottom line:
-
-    python version < 2.4
-        HANDLE_ERRORS == 1  (default) pass up 200, treat the rest as
-                            errors
-        HANDLE_ERRORS == 0  pass everything up, error processing is
-                            left to the calling code
-    python version >= 2.4
-        HANDLE_ERRORS == 1  pass up 200, treat the rest as errors
-        HANDLE_ERRORS == 0  (default) pass everything up, let the
-                            other handlers (specifically,
-                            HTTPErrorProcessor) decide what to do
-
-  In practice, setting the variable either way makes little difference
-  in python 2.4, so for the most consistent behavior across versions,
-  you probably just want to use the defaults, which will give you
-  exceptions on errors.
-
 """
 
 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
@@ -125,10 +100,6 @@
 
 DEBUG = None
 
-if sys.version_info < (2, 4):
-    HANDLE_ERRORS = 1
-else: HANDLE_ERRORS = 0
-
 class ConnectionManager(object):
     """
     The connection manager must be able to:
@@ -277,11 +248,7 @@
         r.headers = r.msg
         r.msg = r.reason
 
-        if r.status == 200 or not HANDLE_ERRORS:
-            return r
-        else:
-            return self.parent.error('http', req, r,
-                                     r.status, r.msg, r.headers)
+        return r
 
     def _reuse_connection(self, h, req, host):
         """start the transaction with a re-used connection
@@ -332,10 +299,9 @@
     def _start_transaction(self, h, req):
         # What follows mostly reimplements HTTPConnection.request()
         # except it adds self.parent.addheaders in the mix.
-        headers = req.headers.copy()
-        if sys.version_info >= (2, 4):
-            headers.update(req.unredirected_hdrs)
-        headers.update(self.parent.addheaders)
+        headers = dict(self.parent.addheaders)
+        headers.update(req.headers)
+        headers.update(req.unredirected_hdrs)
         headers = dict((n.lower(), v) for n, v in headers.items())
         skipheaders = {}
         for n in ('host', 'accept-encoding'):
@@ -433,10 +399,8 @@
     # stolen from Python SVN #68532 to fix issue1088
     def _read_chunked(self, amt):
         chunk_left = self.chunk_left
-        value = ''
+        parts = []
 
-        # XXX This accumulates chunks by repeated string concatenation,
-        # which is not efficient as the number or size of chunks gets big.
         while True:
             if chunk_left is None:
                 line = self.fp.readline()
@@ -449,22 +413,22 @@
                     # close the connection as protocol synchronization is
                     # probably lost
                     self.close()
-                    raise httplib.IncompleteRead(value)
+                    raise httplib.IncompleteRead(''.join(parts))
                 if chunk_left == 0:
                     break
             if amt is None:
-                value += self._safe_read(chunk_left)
+                parts.append(self._safe_read(chunk_left))
             elif amt < chunk_left:
-                value += self._safe_read(amt)
+                parts.append(self._safe_read(amt))
                 self.chunk_left = chunk_left - amt
-                return value
+                return ''.join(parts)
             elif amt == chunk_left:
-                value += self._safe_read(amt)
+                parts.append(self._safe_read(amt))
                 self._safe_read(2)  # toss the CRLF at the end of the chunk
                 self.chunk_left = None
-                return value
+                return ''.join(parts)
             else:
-                value += self._safe_read(chunk_left)
+                parts.append(self._safe_read(chunk_left))
                 amt -= chunk_left
 
             # we read the whole chunk, get another
@@ -485,26 +449,42 @@
         # we read everything; close the "file"
         self.close()
 
-        return value
+        return ''.join(parts)
 
-    def readline(self, limit=-1):
+    def readline(self):
+        # Fast path for a line is already available in read buffer.
         i = self._rbuf.find('\n')
-        while i < 0 and not (0 < limit <= len(self._rbuf)):
-            new = self._raw_read(self._rbufsize)
+        if i >= 0:
+            i += 1
+            line = self._rbuf[:i]
+            self._rbuf = self._rbuf[i:]
+            return line
+
+        # No newline in local buffer. Read until we find one.
+        chunks = [self._rbuf]
+        i = -1
+        readsize = self._rbufsize
+        while True:
+            new = self._raw_read(readsize)
             if not new:
                 break
+
+            chunks.append(new)
             i = new.find('\n')
             if i >= 0:
-                i = i + len(self._rbuf)
-            self._rbuf = self._rbuf + new
-        if i < 0:
-            i = len(self._rbuf)
-        else:
-            i = i + 1
-        if 0 <= limit < len(self._rbuf):
-            i = limit
-        data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
-        return data
+                break
+
+        # We either have exhausted the stream or have a newline in chunks[-1].
+
+        # EOF
+        if i == -1:
+            self._rbuf = ''
+            return ''.join(chunks)
+
+        i += 1
+        self._rbuf = chunks[-1][i:]
+        chunks[-1] = chunks[-1][:i]
+        return ''.join(chunks)
 
     def readlines(self, sizehint=0):
         total = 0
@@ -596,33 +576,6 @@
 #####   TEST FUNCTIONS
 #########################################################################
 
-def error_handler(url):
-    global HANDLE_ERRORS
-    orig = HANDLE_ERRORS
-    keepalive_handler = HTTPHandler()
-    opener = urlreq.buildopener(keepalive_handler)
-    urlreq.installopener(opener)
-    pos = {0: 'off', 1: 'on'}
-    for i in (0, 1):
-        print("  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i))
-        HANDLE_ERRORS = i
-        try:
-            fo = urlreq.urlopen(url)
-            fo.read()
-            fo.close()
-            try:
-                status, reason = fo.status, fo.reason
-            except AttributeError:
-                status, reason = None, None
-        except IOError as e:
-            print("  EXCEPTION: %s" % e)
-            raise
-        else:
-            print("  status = %s, reason = %s" % (status, reason))
-    HANDLE_ERRORS = orig
-    hosts = keepalive_handler.open_connections()
-    print("open connections:", hosts)
-    keepalive_handler.close_all()
 
 def continuity(url):
     md5 = hashlib.md5
@@ -661,14 +614,14 @@
 def comp(N, url):
     print('  making %i connections to:\n  %s' % (N, url))
 
-    sys.stdout.write('  first using the normal urllib handlers')
+    util.stdout.write('  first using the normal urllib handlers')
     # first use normal opener
     opener = urlreq.buildopener()
     urlreq.installopener(opener)
     t1 = fetch(N, url)
     print('  TIME: %.3f s' % t1)
 
-    sys.stdout.write('  now using the keepalive handler       ')
+    util.stdout.write('  now using the keepalive handler       ')
     # now install the keepalive handler and try again
     opener = urlreq.buildopener(HTTPHandler())
     urlreq.installopener(opener)
@@ -713,11 +666,11 @@
     i = 20
     print("  waiting %i seconds for the server to close the connection" % i)
     while i > 0:
-        sys.stdout.write('\r  %2i' % i)
-        sys.stdout.flush()
+        util.stdout.write('\r  %2i' % i)
+        util.stdout.flush()
         time.sleep(1)
         i -= 1
-    sys.stderr.write('\r')
+    util.stderr.write('\r')
 
     print("  fetching the file a second time")
     fo = urlreq.urlopen(url)
@@ -733,12 +686,6 @@
 
 
 def test(url, N=10):
-    print("checking error handler (do this on a non-200)")
-    try: error_handler(url)
-    except IOError:
-        print("exiting - exception will prevent further tests")
-        sys.exit()
-    print('')
     print("performing continuity test (making sure stuff isn't corrupted)")
     continuity(url)
     print('')
--- a/mercurial/localrepo.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/localrepo.py	Wed Jan 18 11:43:36 2017 -0500
@@ -28,9 +28,9 @@
     bundle2,
     changegroup,
     changelog,
-    cmdutil,
     context,
     dirstate,
+    dirstateguard,
     encoding,
     error,
     exchange,
@@ -41,6 +41,7 @@
     manifest,
     match as matchmod,
     merge as mergemod,
+    mergeutil,
     namespaces,
     obsolete,
     pathutil,
@@ -248,7 +249,7 @@
     # only functions defined in module of enabled extensions are invoked
     featuresetupfuncs = set()
 
-    def __init__(self, baseui, path=None, create=False):
+    def __init__(self, baseui, path, create=False):
         self.requirements = set()
         self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
         self.wopener = self.wvfs
@@ -283,6 +284,12 @@
         else:
             self.supported = self._basesupported
 
+        # Add compression engines.
+        for name in util.compengines:
+            engine = util.compengines[name]
+            if engine.revlogheader():
+                self.supported.add('exp-compression-%s' % name)
+
         if not self.vfs.isdir():
             if create:
                 self.requirements = newreporequirements(self)
@@ -396,6 +403,10 @@
         self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
         self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
 
+        for r in self.requirements:
+            if r.startswith('exp-compression-'):
+                self.svfs.options['compengine'] = r[len('exp-compression-'):]
+
     def _writerequirements(self):
         scmutil.writerequires(self.vfs, self.requirements)
 
@@ -498,21 +509,17 @@
     @storecache('00changelog.i')
     def changelog(self):
         c = changelog.changelog(self.svfs)
-        if 'HG_PENDING' in os.environ:
-            p = os.environ['HG_PENDING']
+        if 'HG_PENDING' in encoding.environ:
+            p = encoding.environ['HG_PENDING']
             if p.startswith(self.root):
                 c.readpending('00changelog.i.a')
         return c
 
-    @property
-    def manifest(self):
-        return self.manifestlog._oldmanifest
-
     def _constructmanifest(self):
         # This is a temporary function while we migrate from manifest to
         # manifestlog. It allows bundlerepo and unionrepo to intercept the
         # manifest creation.
-        return manifest.manifest(self.svfs)
+        return manifest.manifestrevlog(self.svfs)
 
     @storecache('00manifest.i')
     def manifestlog(self):
@@ -1019,8 +1026,7 @@
         if (self.ui.configbool('devel', 'all-warnings')
                 or self.ui.configbool('devel', 'check-locks')):
             if self._currentlock(self._lockref) is None:
-                raise RuntimeError('programming error: transaction requires '
-                                   'locking')
+                raise error.ProgrammingError('transaction requires locking')
         tr = self.currenttransaction()
         if tr is not None:
             return tr.nest()
@@ -1145,7 +1151,7 @@
             wlock = self.wlock()
             lock = self.lock()
             if self.svfs.exists("undo"):
-                dsguard = cmdutil.dirstateguard(self, 'rollback')
+                dsguard = dirstateguard.dirstateguard(self, 'rollback')
 
                 return self._rollback(dryrun, force, dsguard)
             else:
@@ -1303,7 +1309,7 @@
         # the contents of parentenvvar are used by the underlying lock to
         # determine whether it can be inherited
         if parentenvvar is not None:
-            parentlock = os.environ.get(parentenvvar)
+            parentlock = encoding.environ.get(parentenvvar)
         try:
             l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
                              acquirefn=acquirefn, desc=desc,
@@ -1502,7 +1508,7 @@
         return fparent1
 
     def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
-        """check for commit arguments that aren't commitable"""
+        """check for commit arguments that aren't committable"""
         if match.isexact() or match.prefix():
             matched = set(status.modified + status.added + status.removed)
 
@@ -1633,13 +1639,7 @@
                 raise error.Abort(_("cannot commit merge with missing files"))
 
             ms = mergemod.mergestate.read(self)
-
-            if list(ms.unresolved()):
-                raise error.Abort(_("unresolved merge conflicts "
-                                    "(see 'hg help resolve')"))
-            if ms.mdstate() != 's' or list(ms.driverresolved()):
-                raise error.Abort(_('driver-resolved merge conflicts'),
-                                  hint=_('run "hg resolve --all" to resolve'))
+            mergeutil.checkunresolved(ms)
 
             if editor:
                 cctx._text = editor(self, cctx, subs)
@@ -1705,10 +1705,18 @@
             tr = self.transaction("commit")
             trp = weakref.proxy(tr)
 
-            if ctx.files():
-                m1 = p1.manifest()
-                m2 = p2.manifest()
-                m = m1.copy()
+            if ctx.manifestnode():
+                # reuse an existing manifest revision
+                mn = ctx.manifestnode()
+                files = ctx.files()
+            elif ctx.files():
+                m1ctx = p1.manifestctx()
+                m2ctx = p2.manifestctx()
+                mctx = m1ctx.copy()
+
+                m = mctx.read()
+                m1 = m1ctx.read()
+                m2 = m2ctx.read()
 
                 # check in files
                 added = []
@@ -1742,9 +1750,9 @@
                 drop = [f for f in removed if f in m]
                 for f in drop:
                     del m[f]
-                mn = self.manifestlog.add(m, trp, linkrev,
-                                          p1.manifestnode(), p2.manifestnode(),
-                                          added, drop)
+                mn = mctx.write(trp, linkrev,
+                                p1.manifestnode(), p2.manifestnode(),
+                                added, drop)
                 files = changed + removed
             else:
                 mn = p1.manifestnode()
@@ -1996,6 +2004,18 @@
             if ui.configbool('format', 'dotencode', True):
                 requirements.add('dotencode')
 
+    compengine = ui.config('experimental', 'format.compression', 'zlib')
+    if compengine not in util.compengines:
+        raise error.Abort(_('compression engine %s defined by '
+                            'experimental.format.compression not available') %
+                          compengine,
+                          hint=_('run "hg debuginstall" to list available '
+                                 'compression engines'))
+
+    # zlib is the historical default and doesn't need an explicit requirement.
+    if compengine != 'zlib':
+        requirements.add('exp-compression-%s' % compengine)
+
     if scmutil.gdinitconfig(ui):
         requirements.add('generaldelta')
     if ui.configbool('experimental', 'treemanifest', False):
--- a/mercurial/mail.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/mail.py	Wed Jan 18 11:43:36 2017 -0500
@@ -5,7 +5,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import
 
 import email
 import email.charset
@@ -14,7 +14,6 @@
 import quopri
 import smtplib
 import socket
-import sys
 import time
 
 from .i18n import _
@@ -87,7 +86,7 @@
 
     def _get_socket(self, host, port, timeout):
         if self.debuglevel > 0:
-            print('connect:', (host, port), file=sys.stderr)
+            self._ui.debug('connect: %r\n' % (host, port))
         new_socket = socket.create_connection((host, port), timeout)
         new_socket = sslutil.wrapsocket(new_socket,
                                         self.keyfile, self.certfile,
--- a/mercurial/manifest.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/manifest.py	Wed Jan 18 11:43:36 2017 -0500
@@ -422,6 +422,11 @@
     def __len__(self):
         return len(self._lm)
 
+    def __nonzero__(self):
+        # nonzero is covered by the __len__ function, but implementing it here
+        # makes it easier for extensions to override.
+        return len(self._lm) != 0
+
     def __setitem__(self, key, node):
         self._lm[key] = node, self.flags(key, '')
 
@@ -1178,7 +1183,7 @@
                                                     self._dirlogcache)
         return self._dirlogcache[dir]
 
-    def add(self, m, transaction, link, p1, p2, added, removed):
+    def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
         if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
             and not self._usemanifestv2):
             # If our first parent is in the manifest cache, we can
@@ -1201,9 +1206,10 @@
             # through to the revlog layer, and let it handle the delta
             # process.
             if self._treeondisk:
-                m1 = self.read(p1)
-                m2 = self.read(p2)
-                n = self._addtree(m, transaction, link, m1, m2)
+                assert readtree, "readtree must be set for treemanifest writes"
+                m1 = readtree(self._dir, p1)
+                m2 = readtree(self._dir, p2)
+                n = self._addtree(m, transaction, link, m1, m2, readtree)
                 arraytext = None
             else:
                 text = m.text(self._usemanifestv2)
@@ -1215,14 +1221,15 @@
 
         return n
 
-    def _addtree(self, m, transaction, link, m1, m2):
+    def _addtree(self, m, transaction, link, m1, m2, readtree):
         # If the manifest is unchanged compared to one parent,
         # don't write a new revision
         if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
             return m.node()
         def writesubtree(subm, subp1, subp2):
             sublog = self.dirlog(subm.dir())
-            sublog.add(subm, transaction, link, subp1, subp2, None, None)
+            sublog.add(subm, transaction, link, subp1, subp2, None, None,
+                       readtree=readtree)
         m.writesubtrees(m1, m2, writesubtree)
         text = m.dirtext(self._usemanifestv2)
         # Double-check whether contents are unchanged to one parent
@@ -1248,41 +1255,100 @@
         self._repo = repo
 
         usetreemanifest = False
+        cachesize = 4
 
         opts = getattr(opener, 'options', None)
         if opts is not None:
             usetreemanifest = opts.get('treemanifest', usetreemanifest)
+            cachesize = opts.get('manifestcachesize', cachesize)
         self._treeinmem = usetreemanifest
 
         self._oldmanifest = repo._constructmanifest()
         self._revlog = self._oldmanifest
 
-        # We'll separate this into it's own cache once oldmanifest is no longer
-        # used
-        self._mancache = self._oldmanifest._mancache
+        # A cache of the manifestctx or treemanifestctx for each directory
+        self._dirmancache = {}
+        self._dirmancache[''] = util.lrucachedict(cachesize)
+
+        self.cachesize = cachesize
 
     def __getitem__(self, node):
-        """Retrieves the manifest instance for the given node. Throws a KeyError
-        if not found.
+        """Retrieves the manifest instance for the given node. Throws a
+        LookupError if not found.
         """
-        if node in self._mancache:
-            cachemf = self._mancache[node]
-            # The old manifest may put non-ctx manifests in the cache, so skip
-            # those since they don't implement the full api.
+        return self.get('', node)
+
+    def get(self, dir, node, verify=True):
+        """Retrieves the manifest instance for the given node. Throws a
+        LookupError if not found.
+
+        `verify` - if True an exception will be thrown if the node is not in
+                   the revlog
+        """
+        if node in self._dirmancache.get(dir, ()):
+            cachemf = self._dirmancache[dir][node]
+            # The old manifest may put non-ctx manifests in the cache, so
+            # skip those since they don't implement the full api.
             if (isinstance(cachemf, manifestctx) or
                 isinstance(cachemf, treemanifestctx)):
                 return cachemf
 
-        if self._treeinmem:
-            m = treemanifestctx(self._repo, '', node)
+        if dir:
+            if self._revlog._treeondisk:
+                if verify:
+                    dirlog = self._revlog.dirlog(dir)
+                    if node not in dirlog.nodemap:
+                        raise LookupError(node, dirlog.indexfile,
+                                          _('no node'))
+                m = treemanifestctx(self._repo, dir, node)
+            else:
+                raise error.Abort(
+                        _("cannot ask for manifest directory '%s' in a flat "
+                          "manifest") % dir)
         else:
-            m = manifestctx(self._repo, node)
+            if verify:
+                if node not in self._revlog.nodemap:
+                    raise LookupError(node, self._revlog.indexfile,
+                                      _('no node'))
+            if self._treeinmem:
+                m = treemanifestctx(self._repo, '', node)
+            else:
+                m = manifestctx(self._repo, node)
+
         if node != revlog.nullid:
-            self._mancache[node] = m
+            mancache = self._dirmancache.get(dir)
+            if not mancache:
+                mancache = util.lrucachedict(self.cachesize)
+                self._dirmancache[dir] = mancache
+            mancache[node] = m
         return m
 
-    def add(self, m, transaction, link, p1, p2, added, removed):
-        return self._revlog.add(m, transaction, link, p1, p2, added, removed)
+    def clearcaches(self):
+        self._dirmancache.clear()
+        self._revlog.clearcaches()
+
+class memmanifestctx(object):
+    def __init__(self, repo):
+        self._repo = repo
+        self._manifestdict = manifestdict()
+
+    def _revlog(self):
+        return self._repo.manifestlog._revlog
+
+    def new(self):
+        return memmanifestctx(self._repo)
+
+    def copy(self):
+        memmf = memmanifestctx(self._repo)
+        memmf._manifestdict = self.read().copy()
+        return memmf
+
+    def read(self):
+        return self._manifestdict
+
+    def write(self, transaction, link, p1, p2, added, removed):
+        return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
+                                  added, removed)
 
 class manifestctx(object):
     """A class representing a single revision of a manifest, including its
@@ -1301,35 +1367,62 @@
         #rev = revlog.rev(node)
         #self.linkrev = revlog.linkrev(rev)
 
+    def _revlog(self):
+        return self._repo.manifestlog._revlog
+
     def node(self):
         return self._node
 
+    def new(self):
+        return memmanifestctx(self._repo)
+
+    def copy(self):
+        memmf = memmanifestctx(self._repo)
+        memmf._manifestdict = self.read().copy()
+        return memmf
+
+    @propertycache
+    def parents(self):
+        return self._revlog().parents(self._node)
+
     def read(self):
         if not self._data:
             if self._node == revlog.nullid:
                 self._data = manifestdict()
             else:
-                rl = self._repo.manifestlog._revlog
+                rl = self._revlog()
                 text = rl.revision(self._node)
                 arraytext = array.array('c', text)
                 rl._fulltextcache[self._node] = arraytext
                 self._data = manifestdict(text)
         return self._data
 
-    def readfast(self):
-        rl = self._repo.manifestlog._revlog
+    def readfast(self, shallow=False):
+        '''Calls either readdelta or read, based on which would be less work.
+        readdelta is called if the delta is against the p1, and therefore can be
+        read quickly.
+
+        If `shallow` is True, nothing changes since this is a flat manifest.
+        '''
+        rl = self._revlog()
         r = rl.rev(self._node)
         deltaparent = rl.deltaparent(r)
         if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
             return self.readdelta()
         return self.read()
 
-    def readdelta(self):
-        revlog = self._repo.manifestlog._revlog
+    def readdelta(self, shallow=False):
+        '''Returns a manifest containing just the entries that are present
+        in this manifest, but not in its p1 manifest. This is efficient to read
+        if the revlog delta is already p1.
+
+        Changing the value of `shallow` has no effect on flat manifests.
+        '''
+        revlog = self._revlog()
         if revlog._usemanifestv2:
             # Need to perform a slow delta
             r0 = revlog.deltaparent(revlog.rev(self._node))
-            m0 = manifestctx(self._repo, revlog.node(r0)).read()
+            m0 = self._repo.manifestlog[revlog.node(r0)].read()
             m1 = self.read()
             md = manifestdict()
             for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
@@ -1343,6 +1436,35 @@
         d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
         return manifestdict(d)
 
+    def find(self, key):
+        return self.read().find(key)
+
+class memtreemanifestctx(object):
+    def __init__(self, repo, dir=''):
+        self._repo = repo
+        self._dir = dir
+        self._treemanifest = treemanifest()
+
+    def _revlog(self):
+        return self._repo.manifestlog._revlog
+
+    def new(self, dir=''):
+        return memtreemanifestctx(self._repo, dir=dir)
+
+    def copy(self):
+        memmf = memtreemanifestctx(self._repo, dir=self._dir)
+        memmf._treemanifest = self._treemanifest.copy()
+        return memmf
+
+    def read(self):
+        return self._treemanifest
+
+    def write(self, transaction, link, p1, p2, added, removed):
+        def readtree(dir, node):
+            return self._repo.manifestlog.get(dir, node).read()
+        return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
+                                  added, removed, readtree=readtree)
+
 class treemanifestctx(object):
     def __init__(self, repo, dir, node):
         self._repo = repo
@@ -1371,12 +1493,15 @@
                 def gettext():
                     return rl.revision(self._node)
                 def readsubtree(dir, subm):
-                    return treemanifestctx(self._repo, dir, subm).read()
+                    # Set verify to False since we need to be able to create
+                    # subtrees for trees that don't exist on disk.
+                    return self._repo.manifestlog.get(dir, subm,
+                                                      verify=False).read()
                 m.read(gettext, readsubtree)
                 m.setnode(self._node)
                 self._data = m
             else:
-                text = revlog.revision(self._node)
+                text = rl.revision(self._node)
                 arraytext = array.array('c', text)
                 rl.fulltextcache[self._node] = arraytext
                 self._data = treemanifest(dir=self._dir, text=text)
@@ -1386,150 +1511,66 @@
     def node(self):
         return self._node
 
-    def readdelta(self):
-        # Need to perform a slow delta
+    def new(self, dir=''):
+        return memtreemanifestctx(self._repo, dir=dir)
+
+    def copy(self):
+        memmf = memtreemanifestctx(self._repo, dir=self._dir)
+        memmf._treemanifest = self.read().copy()
+        return memmf
+
+    @propertycache
+    def parents(self):
+        return self._revlog().parents(self._node)
+
+    def readdelta(self, shallow=False):
+        '''Returns a manifest containing just the entries that are present
+        in this manifest, but not in its p1 manifest. This is efficient to read
+        if the revlog delta is already p1.
+
+        If `shallow` is True, this will read the delta for this directory,
+        without recursively reading subdirectory manifests. Instead, any
+        subdirectory entry will be reported as it appears in the manifest, i.e.
+        the subdirectory will be reported among files and distinguished only by
+        its 't' flag.
+        '''
         revlog = self._revlog()
-        r0 = revlog.deltaparent(revlog.rev(self._node))
-        m0 = treemanifestctx(self._repo, self._dir, revlog.node(r0)).read()
-        m1 = self.read()
-        md = treemanifest(dir=self._dir)
-        for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
-            if n1:
-                md[f] = n1
-                if fl1:
-                    md.setflag(f, fl1)
-        return md
+        if shallow and not revlog._usemanifestv2:
+            r = revlog.rev(self._node)
+            d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
+            return manifestdict(d)
+        else:
+            # Need to perform a slow delta
+            r0 = revlog.deltaparent(revlog.rev(self._node))
+            m0 = self._repo.manifestlog.get(self._dir, revlog.node(r0)).read()
+            m1 = self.read()
+            md = treemanifest(dir=self._dir)
+            for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
+                if n1:
+                    md[f] = n1
+                    if fl1:
+                        md.setflag(f, fl1)
+            return md
 
-    def readfast(self):
+    def readfast(self, shallow=False):
+        '''Calls either readdelta or read, based on which would be less work.
+        readdelta is called if the delta is against the p1, and therefore can be
+        read quickly.
+
+        If `shallow` is True, it only returns the entries from this manifest,
+        and not any submanifests.
+        '''
         rl = self._revlog()
         r = rl.rev(self._node)
         deltaparent = rl.deltaparent(r)
-        if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
-            return self.readdelta()
-        return self.read()
-
-class manifest(manifestrevlog):
-    def __init__(self, opener, dir='', dirlogcache=None):
-        '''The 'dir' and 'dirlogcache' arguments are for internal use by
-        manifest.manifest only. External users should create a root manifest
-        log with manifest.manifest(opener) and call dirlog() on it.
-        '''
-        # During normal operations, we expect to deal with not more than four
-        # revs at a time (such as during commit --amend). When rebasing large
-        # stacks of commits, the number can go up, hence the config knob below.
-        cachesize = 4
-        usetreemanifest = False
-        opts = getattr(opener, 'options', None)
-        if opts is not None:
-            cachesize = opts.get('manifestcachesize', cachesize)
-            usetreemanifest = opts.get('treemanifest', usetreemanifest)
-        self._mancache = util.lrucachedict(cachesize)
-        self._treeinmem = usetreemanifest
-        super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
-
-    def _newmanifest(self, data=''):
-        if self._treeinmem:
-            return treemanifest(self._dir, data)
-        return manifestdict(data)
-
-    def dirlog(self, dir):
-        """This overrides the base revlog implementation to allow construction
-        'manifest' types instead of manifestrevlog types. This is only needed
-        until we migrate off the 'manifest' type."""
-        if dir:
-            assert self._treeondisk
-        if dir not in self._dirlogcache:
-            self._dirlogcache[dir] = manifest(self.opener, dir,
-                                              self._dirlogcache)
-        return self._dirlogcache[dir]
-
-    def _slowreaddelta(self, node):
-        r0 = self.deltaparent(self.rev(node))
-        m0 = self.read(self.node(r0))
-        m1 = self.read(node)
-        md = self._newmanifest()
-        for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
-            if n1:
-                md[f] = n1
-                if fl1:
-                    md.setflag(f, fl1)
-        return md
-
-    def readdelta(self, node):
-        if self._usemanifestv2 or self._treeondisk:
-            return self._slowreaddelta(node)
-        r = self.rev(node)
-        d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
-        return self._newmanifest(d)
+        if (deltaparent != revlog.nullrev and
+            deltaparent in rl.parentrevs(r)):
+            return self.readdelta(shallow=shallow)
 
-    def readshallowdelta(self, node):
-        '''For flat manifests, this is the same as readdelta(). For
-        treemanifests, this will read the delta for this revlog's directory,
-        without recursively reading subdirectory manifests. Instead, any
-        subdirectory entry will be reported as it appears in the manifests, i.e.
-        the subdirectory will be reported among files and distinguished only by
-        its 't' flag.'''
-        if not self._treeondisk:
-            return self.readdelta(node)
-        if self._usemanifestv2:
-            raise error.Abort(
-                _("readshallowdelta() not implemented for manifestv2"))
-        r = self.rev(node)
-        d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
-        return manifestdict(d)
-
-    def readshallowfast(self, node):
-        '''like readfast(), but calls readshallowdelta() instead of readdelta()
-        '''
-        r = self.rev(node)
-        deltaparent = self.deltaparent(r)
-        if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
-            return self.readshallowdelta(node)
-        return self.readshallow(node)
+        if shallow:
+            return manifestdict(rl.revision(self._node))
+        else:
+            return self.read()
 
-    def read(self, node):
-        if node == revlog.nullid:
-            return self._newmanifest() # don't upset local cache
-        if node in self._mancache:
-            cached = self._mancache[node]
-            if (isinstance(cached, manifestctx) or
-                isinstance(cached, treemanifestctx)):
-                cached = cached.read()
-            return cached
-        if self._treeondisk:
-            def gettext():
-                return self.revision(node)
-            def readsubtree(dir, subm):
-                return self.dirlog(dir).read(subm)
-            m = self._newmanifest()
-            m.read(gettext, readsubtree)
-            m.setnode(node)
-            arraytext = None
-        else:
-            text = self.revision(node)
-            m = self._newmanifest(text)
-            arraytext = array.array('c', text)
-        self._mancache[node] = m
-        if arraytext is not None:
-            self.fulltextcache[node] = arraytext
-        return m
-
-    def readshallow(self, node):
-        '''Reads the manifest in this directory. When using flat manifests,
-        this manifest will generally have files in subdirectories in it. Does
-        not cache the manifest as the callers generally do not read the same
-        version twice.'''
-        return manifestdict(self.revision(node))
-
-    def find(self, node, f):
-        '''look up entry for a single file efficiently.
-        return (node, flags) pair if found, (None, None) if not.'''
-        m = self.read(node)
-        try:
-            return m.find(f)
-        except KeyError:
-            return None, None
-
-    def clearcaches(self):
-        super(manifest, self).clearcaches()
-        self._mancache.clear()
+    def find(self, key):
+        return self.read().find(key)
--- a/mercurial/match.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/match.py	Wed Jan 18 11:43:36 2017 -0500
@@ -669,7 +669,7 @@
     patterns = []
 
     fp = open(filepath)
-    for lineno, line in enumerate(fp, start=1):
+    for lineno, line in enumerate(util.iterfile(fp), start=1):
         if "#" in line:
             global _commentre
             if not _commentre:
--- a/mercurial/mdiff.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/mdiff.py	Wed Jan 18 11:43:36 2017 -0500
@@ -52,10 +52,12 @@
         'nodates': False,
         'nobinary': False,
         'noprefix': False,
+        'index': 0,
         'ignorews': False,
         'ignorewsamount': False,
         'ignoreblanklines': False,
         'upgrade': False,
+        'showsimilarity': False,
         }
 
     def __init__(self, **opts):
@@ -113,6 +115,45 @@
         s1 = i1
         s2 = i2
 
+def blocksinrange(blocks, rangeb):
+    """filter `blocks` like (a1, a2, b1, b2) from items outside line range
+    `rangeb` from ``(b1, b2)`` point of view.
+
+    Return `filteredblocks, rangea` where:
+
+    * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
+      `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
+      block ``(b1, b2)`` being inside `rangeb` if
+      ``rangeb[0] < b2 and b1 < rangeb[1]``;
+    * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
+    """
+    lbb, ubb = rangeb
+    lba, uba = None, None
+    filteredblocks = []
+    for block in blocks:
+        (a1, a2, b1, b2), stype = block
+        if lbb >= b1 and ubb <= b2 and stype == '=':
+            # rangeb is within a single "=" hunk, restrict back linerange1
+            # by offsetting rangeb
+            lba = lbb - b1 + a1
+            uba = ubb - b1 + a1
+        else:
+            if b1 <= lbb < b2:
+                if stype == '=':
+                    lba = a2 - (b2 - lbb)
+                else:
+                    lba = a1
+            if b1 < ubb <= b2:
+                if stype == '=':
+                    uba = a1 + (ubb - b1)
+                else:
+                    uba = a2
+        if lbb < b2 and b1 < ubb:
+            filteredblocks.append(block)
+    if lba is None or uba is None or uba < lba:
+        raise error.Abort(_('line range exceeds file size'))
+    return filteredblocks, (lba, uba)
+
 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
     """Return (block, type) tuples, where block is an mdiff.blocks
     line entry. type is '=' for blocks matching exactly one another
--- a/mercurial/merge.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/merge.py	Wed Jan 18 11:43:36 2017 -0500
@@ -15,18 +15,20 @@
 
 from .i18n import _
 from .node import (
+    addednodeid,
     bin,
     hex,
+    modifiednodeid,
     nullhex,
     nullid,
     nullrev,
 )
 from . import (
     copies,
-    destutil,
     error,
     filemerge,
     obsolete,
+    pycompat,
     scmutil,
     subrepo,
     util,
@@ -66,7 +68,7 @@
        (experimental)
     m: the external merge driver defined for this merge plus its run state
        (experimental)
-    f: a (filename, dictonary) tuple of optional values for a given file
+    f: a (filename, dictionary) tuple of optional values for a given file
     X: unsupported mandatory record type (used in tests)
     x: unsupported advisory record type (used in tests)
     l: the labels for the parts of the merge.
@@ -792,7 +794,7 @@
     if matcher is not None and matcher.always():
         matcher = None
 
-    copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
+    copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
 
     # manifests fetched in order are going to be faster, so prime the caches
     [x.manifest() for x in
@@ -800,7 +802,7 @@
 
     if followcopies:
         ret = copies.mergecopies(repo, wctx, p2, pa)
-        copy, movewithdir, diverge, renamedelete = ret
+        copy, movewithdir, diverge, renamedelete, dirmove = ret
 
     repo.ui.note(_("resolving manifests\n"))
     repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
@@ -814,7 +816,7 @@
     if '.hgsubstate' in m1:
         # check whether sub state is modified
         if any(wctx.sub(s).dirty() for s in wctx.substate):
-            m1['.hgsubstate'] += '+'
+            m1['.hgsubstate'] = modifiednodeid
 
     # Compare manifests
     if matcher is not None:
@@ -873,7 +875,7 @@
                     else:
                         actions[f] = ('cd', (f, None, f, False, pa.node()),
                                       "prompt changed/deleted")
-                elif n1[20:] == 'a':
+                elif n1 == addednodeid:
                     # This extra 'a' is added by working copy manifest to mark
                     # the file as locally added. We should forget it instead of
                     # deleting it.
@@ -919,7 +921,16 @@
                     actions[f] = ('cm', (fl2, pa.node()),
                                   "remote created, get or merge")
             elif n2 != ma[f]:
-                if acceptremote:
+                df = None
+                for d in dirmove:
+                    if f.startswith(d):
+                        # new file added in a directory that was moved
+                        df = dirmove[d] + f[len(d):]
+                        break
+                if df in m1:
+                    actions[df] = ('m', (df, f, f, False, pa.node()),
+                            "local directory rename - respect move from " + f)
+                elif acceptremote:
                     actions[f] = ('c', (fl2,), "remote recreating")
                 else:
                     actions[f] = ('dc', (None, f, f, False, pa.node()),
@@ -1039,7 +1050,7 @@
     wjoin = repo.wjoin
     audit = repo.wvfs.audit
     try:
-        cwd = os.getcwd()
+        cwd = pycompat.getcwd()
     except OSError as err:
         if err.errno != errno.ENOENT:
             raise
@@ -1065,7 +1076,7 @@
         # cwd was present before we started to remove files
         # let's check if it is present after we removed them
         try:
-            os.getcwd()
+            pycompat.getcwd()
         except OSError as err:
             if err.errno != errno.ENOENT:
                 raise
@@ -1482,11 +1493,6 @@
         if ancestor is not None:
             pas = [repo[ancestor]]
 
-        if node is None:
-            repo.ui.deprecwarn('update with no target', '3.9')
-            rev, _mark, _act = destutil.destupdate(repo)
-            node = repo[rev].node()
-
         overwrite = force and not branchmerge
 
         p2 = repo[node]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/mergeutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,22 @@
+# mergeutil.py - help for merge processing in mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from .i18n import _
+
+from . import (
+    error,
+)
+
+def checkunresolved(ms):
+    if list(ms.unresolved()):
+        raise error.Abort(_("unresolved merge conflicts "
+                            "(see 'hg help resolve')"))
+    if ms.mdstate() != 's' or list(ms.driverresolved()):
+        raise error.Abort(_('driver-resolved merge conflicts'),
+                          hint=_('run "hg resolve --all" to resolve'))
--- a/mercurial/minirst.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/minirst.py	Wed Jan 18 11:43:36 2017 -0500
@@ -697,7 +697,7 @@
         if collapse:
             synthetic.reverse()
             for s in synthetic:
-                path = [blocks[i]['lines'][0] for i in s]
+                path = [blocks[syn]['lines'][0] for syn in s]
                 real = s[-1] + 2
                 realline = blocks[real]['lines']
                 realline[0] = ('"%s"' %
--- a/mercurial/node.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/node.py	Wed Jan 18 11:43:36 2017 -0500
@@ -17,6 +17,14 @@
 nullid = b"\0" * 20
 nullhex = hex(nullid)
 
+# Phony node value to stand-in for new files in some uses of
+# manifests.
+newnodeid = '!' * 20
+addednodeid = ('0' * 15) + 'added'
+modifiednodeid = ('0' * 12) + 'modified'
+
+wdirnodes = set((newnodeid, addednodeid, modifiednodeid))
+
 # pseudo identifiers for working directory
 # (they are experimental, so don't add too many dependencies on them)
 wdirrev = 0x7fffffff
--- a/mercurial/osutil.c	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/osutil.c	Wed Jan 18 11:43:36 2017 -0500
@@ -727,6 +727,63 @@
 }
 
 #endif /* CMSG_LEN */
+
+#if defined(HAVE_SETPROCTITLE)
+/* setproctitle is the first choice - available in FreeBSD */
+#define SETPROCNAME_USE_SETPROCTITLE
+#elif (defined(__linux__) || defined(__APPLE__)) && PY_MAJOR_VERSION == 2
+/* rewrite the argv buffer in place - works in Linux and OS X. Py_GetArgcArgv
+ * in Python 3 returns the copied wchar_t **argv, thus unsupported. */
+#define SETPROCNAME_USE_ARGVREWRITE
+#else
+#define SETPROCNAME_USE_NONE
+#endif
+
+#ifndef SETPROCNAME_USE_NONE
+static PyObject *setprocname(PyObject *self, PyObject *args)
+{
+	const char *name = NULL;
+	if (!PyArg_ParseTuple(args, "s", &name))
+		return NULL;
+
+#if defined(SETPROCNAME_USE_SETPROCTITLE)
+	setproctitle("%s", name);
+#elif defined(SETPROCNAME_USE_ARGVREWRITE)
+	{
+		static char *argvstart = NULL;
+		static size_t argvsize = 0;
+		if (argvstart == NULL) {
+			int argc = 0, i;
+			char **argv = NULL;
+			char *argvend;
+			extern void Py_GetArgcArgv(int *argc, char ***argv);
+			Py_GetArgcArgv(&argc, &argv);
+
+			/* Check the memory we can use. Typically, argv[i] and
+			 * argv[i + 1] are continuous. */
+			argvend = argvstart = argv[0];
+			for (i = 0; i < argc; ++i) {
+				if (argv[i] > argvend || argv[i] < argvstart)
+					break; /* not continuous */
+				size_t len = strlen(argv[i]);
+				argvend = argv[i] + len + 1 /* '\0' */;
+			}
+			if (argvend > argvstart) /* sanity check */
+				argvsize = argvend - argvstart;
+		}
+
+		if (argvstart && argvsize > 1) {
+			int n = snprintf(argvstart, argvsize, "%s", name);
+			if (n >= 0 && (size_t)n < argvsize)
+				memset(argvstart + n, 0, argvsize - n);
+		}
+	}
+#endif
+
+	Py_RETURN_NONE;
+}
+#endif /* ndef SETPROCNAME_USE_NONE */
+
 #endif /* ndef _WIN32 */
 
 static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
@@ -899,7 +956,11 @@
 	{"recvfds", (PyCFunction)recvfds, METH_VARARGS,
 	 "receive list of file descriptors via socket\n"},
 #endif
+#ifndef SETPROCNAME_USE_NONE
+	{"setprocname", (PyCFunction)setprocname, METH_VARARGS,
+	 "set process title (best-effort)\n"},
 #endif
+#endif /* ndef _WIN32 */
 #ifdef __APPLE__
 	{
 		"isgui", (PyCFunction)isgui, METH_NOARGS,
--- a/mercurial/parser.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/parser.py	Wed Jan 18 11:43:36 2017 -0500
@@ -90,23 +90,63 @@
             return self.eval(t)
         return t
 
-def buildargsdict(trees, funcname, keys, keyvaluenode, keynode):
+def splitargspec(spec):
+    """Parse spec of function arguments into (poskeys, varkey, keys)
+
+    >>> splitargspec('')
+    ([], None, [])
+    >>> splitargspec('foo bar')
+    ([], None, ['foo', 'bar'])
+    >>> splitargspec('foo *bar baz')
+    (['foo'], 'bar', ['baz'])
+    >>> splitargspec('*foo')
+    ([], 'foo', [])
+    """
+    pre, sep, post = spec.partition('*')
+    pres = pre.split()
+    posts = post.split()
+    if sep:
+        if not posts:
+            raise error.ProgrammingError('no *varkey name provided')
+        return pres, posts[0], posts[1:]
+    return [], None, pres
+
+def buildargsdict(trees, funcname, argspec, keyvaluenode, keynode):
     """Build dict from list containing positional and keyword arguments
 
-    Invalid keywords or too many positional arguments are rejected, but
-    missing arguments are just omitted.
+    Arguments are specified by a tuple of ``(poskeys, varkey, keys)`` where
+
+    - ``poskeys``: list of names of positional arguments
+    - ``varkey``: optional argument name that takes up remainder
+    - ``keys``: list of names that can be either positional or keyword arguments
+
+    If ``varkey`` specified, all ``keys`` must be given as keyword arguments.
+
+    Invalid keywords, too few positional arguments, or too many positional
+    arguments are rejected, but missing keyword arguments are just omitted.
     """
-    if len(trees) > len(keys):
+    poskeys, varkey, keys = argspec
+    kwstart = next((i for i, x in enumerate(trees) if x[0] == keyvaluenode),
+                   len(trees))
+    if kwstart < len(poskeys):
+        raise error.ParseError(_("%(func)s takes at least %(nargs)d positional "
+                                 "arguments")
+                               % {'func': funcname, 'nargs': len(poskeys)})
+    if not varkey and len(trees) > len(poskeys) + len(keys):
         raise error.ParseError(_("%(func)s takes at most %(nargs)d arguments")
-                               % {'func': funcname, 'nargs': len(keys)})
+                               % {'func': funcname,
+                                  'nargs': len(poskeys) + len(keys)})
     args = {}
     # consume positional arguments
-    for k, x in zip(keys, trees):
-        if x[0] == keyvaluenode:
-            break
+    for k, x in zip(poskeys, trees[:kwstart]):
         args[k] = x
+    if varkey:
+        args[varkey] = trees[len(args):kwstart]
+    else:
+        for k, x in zip(keys, trees[len(args):kwstart]):
+            args[k] = x
     # remainder should be keyword arguments
-    for x in trees[len(args):]:
+    for x in trees[kwstart:]:
         if x[0] != keyvaluenode or x[1][0] != keynode:
             raise error.ParseError(_("%(func)s got an invalid argument")
                                    % {'func': funcname})
@@ -248,7 +288,7 @@
     This is a helper for fileset/revset/template aliases. A concrete rule set
     should be made by sub-classing this and implementing class/static methods.
 
-    It supports alias expansion of symbol and funciton-call styles::
+    It supports alias expansion of symbol and function-call styles::
 
         # decl = defn
         h = heads(default)
--- a/mercurial/parsers.c	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/parsers.c	Wed Jan 18 11:43:36 2017 -0500
@@ -753,6 +753,7 @@
 	PyObject_HEAD
 	/* Type-specific fields go here. */
 	PyObject *data;        /* raw bytes of index */
+	Py_buffer buf;         /* buffer of data */
 	PyObject **cache;      /* cached tuples */
 	const char **offsets;  /* populated on demand */
 	Py_ssize_t raw_length; /* original number of elements */
@@ -808,7 +809,7 @@
 		return self->offsets[pos];
 	}
 
-	return PyBytes_AS_STRING(self->data) + pos * v1_hdrsize;
+	return (const char *)(self->buf.buf) + pos * v1_hdrsize;
 }
 
 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
@@ -2389,9 +2390,9 @@
  */
 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
 {
-	const char *data = PyBytes_AS_STRING(self->data);
+	const char *data = (const char *)self->buf.buf;
 	Py_ssize_t pos = 0;
-	Py_ssize_t end = PyBytes_GET_SIZE(self->data);
+	Py_ssize_t end = self->buf.len;
 	long incr = v1_hdrsize;
 	Py_ssize_t len = 0;
 
@@ -2425,6 +2426,7 @@
 	self->added = NULL;
 	self->cache = NULL;
 	self->data = NULL;
+	memset(&self->buf, 0, sizeof(self->buf));
 	self->headrevs = NULL;
 	self->filteredrevs = Py_None;
 	Py_INCREF(Py_None);
@@ -2433,11 +2435,15 @@
 
 	if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
 		return -1;
-	if (!PyBytes_Check(data_obj)) {
-		PyErr_SetString(PyExc_TypeError, "data is not a string");
+	if (!PyObject_CheckBuffer(data_obj)) {
+		PyErr_SetString(PyExc_TypeError,
+				"data does not support buffer interface");
 		return -1;
 	}
-	size = PyBytes_GET_SIZE(data_obj);
+
+	if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
+		return -1;
+	size = self->buf.len;
 
 	self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
 	self->data = data_obj;
@@ -2478,6 +2484,10 @@
 {
 	_index_clearcaches(self);
 	Py_XDECREF(self->filteredrevs);
+	if (self->buf.buf) {
+		PyBuffer_Release(&self->buf);
+		memset(&self->buf, 0, sizeof(self->buf));
+	}
 	Py_XDECREF(self->data);
 	Py_XDECREF(self->added);
 	PyObject_Del(self);
@@ -2577,7 +2587,8 @@
  * follows:
  *
  * index: an index object that lazily parses RevlogNG records
- * cache: if data is inlined, a tuple (index_file_content, 0), else None
+ * cache: if data is inlined, a tuple (0, index_file_content), else None
+ *        index_file_content could be a string, or a buffer
  *
  * added complications are for backwards compatibility
  */
--- a/mercurial/patch.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/patch.py	Wed Jan 18 11:43:36 2017 -0500
@@ -35,6 +35,7 @@
     mdiff,
     pathutil,
     scmutil,
+    similar,
     util,
 )
 stringio = util.stringio
@@ -1069,7 +1070,7 @@
                     # Remove comment lines
                     patchfp = open(patchfn)
                     ncpatchfp = stringio()
-                    for line in patchfp:
+                    for line in util.iterfile(patchfp):
                         if not line.startswith('#'):
                             ncpatchfp.write(line)
                     patchfp.close()
@@ -2012,7 +2013,7 @@
     fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
                                        util.shellquote(patchname)))
     try:
-        for line in fp:
+        for line in util.iterfile(fp):
             line = line.rstrip()
             ui.note(line + '\n')
             if line.startswith('patching file '):
@@ -2168,6 +2169,36 @@
 
     if git:
         buildopts['git'] = get('git')
+
+        # since this is in the experimental section, we need to call
+        # ui.configbool directory
+        buildopts['showsimilarity'] = ui.configbool('experimental',
+                                                    'extendedheader.similarity')
+
+        # need to inspect the ui object instead of using get() since we want to
+        # test for an int
+        hconf = ui.config('experimental', 'extendedheader.index')
+        if hconf is not None:
+            hlen = None
+            try:
+                # the hash config could be an integer (for length of hash) or a
+                # word (e.g. short, full, none)
+                hlen = int(hconf)
+                if hlen < 0 or hlen > 40:
+                    msg = _("invalid length for extendedheader.index: '%d'\n")
+                    ui.warn(msg % hlen)
+            except ValueError:
+                # default value
+                if hconf == 'short' or hconf == '':
+                    hlen = 12
+                elif hconf == 'full':
+                    hlen = 40
+                elif hconf != 'none':
+                    msg = _("invalid value for extendedheader.index: '%s'\n")
+                    ui.warn(msg % hconf)
+            finally:
+                buildopts['index'] = hlen
+
     if whitespace:
         buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
         buildopts['ignorewsamount'] = get('ignore_space_change',
@@ -2318,6 +2349,8 @@
                     ('old', 'diff.extended'),
                     ('new', 'diff.extended'),
                     ('deleted', 'diff.extended'),
+                    ('index', 'diff.extended'),
+                    ('similarity', 'diff.extended'),
                     ('---', 'diff.file_a'),
                     ('+++', 'diff.file_b')]
     textprefixes = [('@', 'diff.hunk'),
@@ -2490,6 +2523,9 @@
                     header.append('old mode %s' % mode1)
                     header.append('new mode %s' % mode2)
                 if copyop is not None:
+                    if opts.showsimilarity:
+                        sim = similar.score(ctx1[path1], ctx2[path2]) * 100
+                        header.append('similarity index %d%%' % sim)
                     header.append('%s from %s' % (copyop, path1))
                     header.append('%s to %s' % (copyop, path2))
         elif revs and not repo.ui.quiet:
@@ -2501,6 +2537,15 @@
                 header.append('index %s..%s' %
                               (gitindex(content1), gitindex(content2)))
         else:
+            if opts.git and opts.index > 0:
+                flag = flag1
+                if flag is None:
+                    flag = flag2
+                header.append('index %s..%s %s' %
+                              (gitindex(content1)[0:opts.index],
+                               gitindex(content2)[0:opts.index],
+                               gitmode[flag]))
+
             text = mdiff.unidiff(content1, date1,
                                  content2, date2,
                                  path1, path2, opts=opts)
@@ -2550,7 +2595,7 @@
     addresult()
     return results
 
-def diffstat(lines, width=80, git=False):
+def diffstat(lines, width=80):
     output = []
     stats = diffstatdata(lines)
     maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
--- a/mercurial/pathutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/pathutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -9,6 +9,7 @@
 from . import (
     encoding,
     error,
+    pycompat,
     util,
 )
 
@@ -84,11 +85,11 @@
         normparts.pop()
         prefixes = []
         # It's important that we check the path parts starting from the root.
-        # This means we won't accidentaly traverse a symlink into some other
+        # This means we won't accidentally traverse a symlink into some other
         # filesystem (which is potentially expensive to access).
         for i in range(len(parts)):
-            prefix = os.sep.join(parts[:i + 1])
-            normprefix = os.sep.join(normparts[:i + 1])
+            prefix = pycompat.ossep.join(parts[:i + 1])
+            normprefix = pycompat.ossep.join(normparts[:i + 1])
             if normprefix in self.auditeddir:
                 continue
             if self._realfs:
@@ -132,7 +133,7 @@
     if util.endswithsep(root):
         rootsep = root
     else:
-        rootsep = root + os.sep
+        rootsep = root + pycompat.ossep
     name = myname
     if not os.path.isabs(name):
         name = os.path.join(root, cwd, name)
@@ -202,8 +203,8 @@
     '/'
     '''
     d, p = os.path.splitdrive(path)
-    if len(p) != len(os.sep):
-        return path + os.sep
+    if len(p) != len(pycompat.ossep):
+        return path + pycompat.ossep
     else:
         return path
 
--- a/mercurial/phases.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/phases.py	Wed Jan 18 11:43:36 2017 -0500
@@ -103,7 +103,6 @@
 from __future__ import absolute_import
 
 import errno
-import os
 
 from .i18n import _
 from .node import (
@@ -114,6 +113,7 @@
     short,
 )
 from . import (
+    encoding,
     error,
 )
 
@@ -137,7 +137,7 @@
     roots = [set() for i in allphases]
     try:
         f = None
-        if 'HG_PENDING' in os.environ:
+        if 'HG_PENDING' in encoding.environ:
             try:
                 f = repo.svfs('phaseroots.pending')
             except IOError as inst:
--- a/mercurial/posix.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/posix.py	Wed Jan 18 11:43:36 2017 -0500
@@ -23,6 +23,7 @@
 from .i18n import _
 from . import (
     encoding,
+    pycompat,
 )
 
 posixfile = open
@@ -79,7 +80,7 @@
 def parsepatchoutput(output_line):
     """parses the output produced by patch and returns the filename"""
     pf = output_line[14:]
-    if os.sys.platform == 'OpenVMS':
+    if pycompat.sysplatform == 'OpenVMS':
         if pf[0] == '`':
             pf = pf[1:-1] # Remove the quotes
     else:
@@ -160,31 +161,96 @@
 
     try:
         EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
-        fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
+        cachedir = os.path.join(path, '.hg', 'cache')
+        if os.path.isdir(cachedir):
+            checkisexec = os.path.join(cachedir, 'checkisexec')
+            checknoexec = os.path.join(cachedir, 'checknoexec')
+
+            try:
+                m = os.stat(checkisexec).st_mode
+            except OSError as e:
+                if e.errno != errno.ENOENT:
+                    raise
+                # checkisexec does not exist - fall through ...
+            else:
+                # checkisexec exists, check if it actually is exec
+                if m & EXECFLAGS != 0:
+                    # ensure checkisexec exists, check it isn't exec
+                    try:
+                        m = os.stat(checknoexec).st_mode
+                    except OSError as e:
+                        if e.errno != errno.ENOENT:
+                            raise
+                        file(checknoexec, 'w').close() # might fail
+                        m = os.stat(checknoexec).st_mode
+                    if m & EXECFLAGS == 0:
+                        # check-exec is exec and check-no-exec is not exec
+                        return True
+                    # checknoexec exists but is exec - delete it
+                    os.unlink(checknoexec)
+                # checkisexec exists but is not exec - delete it
+                os.unlink(checkisexec)
+
+            # check using one file, leave it as checkisexec
+            checkdir = cachedir
+        else:
+            # check directly in path and don't leave checkisexec behind
+            checkdir = path
+            checkisexec = None
+        fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
         try:
             os.close(fh)
-            m = os.stat(fn).st_mode & 0o777
-            new_file_has_exec = m & EXECFLAGS
-            os.chmod(fn, m ^ EXECFLAGS)
-            exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0o777) == m)
+            m = os.stat(fn).st_mode
+            if m & EXECFLAGS == 0:
+                os.chmod(fn, m & 0o777 | EXECFLAGS)
+                if os.stat(fn).st_mode & EXECFLAGS != 0:
+                    if checkisexec is not None:
+                        os.rename(fn, checkisexec)
+                        fn = None
+                    return True
         finally:
-            os.unlink(fn)
+            if fn is not None:
+                os.unlink(fn)
     except (IOError, OSError):
         # we don't care, the user probably won't be able to commit anyway
         return False
-    return not (new_file_has_exec or exec_flags_cannot_flip)
 
 def checklink(path):
     """check whether the given path is on a symlink-capable filesystem"""
     # mktemp is not racy because symlink creation will fail if the
     # file already exists
     while True:
-        name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
+        cachedir = os.path.join(path, '.hg', 'cache')
+        checklink = os.path.join(cachedir, 'checklink')
+        # try fast path, read only
+        if os.path.islink(checklink):
+            return True
+        if os.path.isdir(cachedir):
+            checkdir = cachedir
+        else:
+            checkdir = path
+            cachedir = None
+        name = tempfile.mktemp(dir=checkdir, prefix='checklink-')
         try:
-            fd = tempfile.NamedTemporaryFile(dir=path, prefix='hg-checklink-')
+            fd = None
+            if cachedir is None:
+                fd = tempfile.NamedTemporaryFile(dir=checkdir,
+                                                 prefix='hg-checklink-')
+                target = os.path.basename(fd.name)
+            else:
+                # create a fixed file to link to; doesn't matter if it
+                # already exists.
+                target = 'checklink-target'
+                open(os.path.join(cachedir, target), 'w').close()
             try:
-                os.symlink(os.path.basename(fd.name), name)
-                os.unlink(name)
+                os.symlink(target, name)
+                if cachedir is None:
+                    os.unlink(name)
+                else:
+                    try:
+                        os.rename(name, checklink)
+                    except OSError:
+                        os.unlink(name)
                 return True
             except OSError as inst:
                 # link creation might race, try again
@@ -192,7 +258,8 @@
                     continue
                 raise
             finally:
-                fd.close()
+                if fd is not None:
+                    fd.close()
         except AttributeError:
             return False
         except OSError as inst:
@@ -236,7 +303,7 @@
 # fallback normcase function for non-ASCII strings
 normcasefallback = normcase
 
-if sys.platform == 'darwin':
+if pycompat.sysplatform == 'darwin':
 
     def normcase(path):
         '''
@@ -287,7 +354,7 @@
         # drop HFS+ ignored characters
         return encoding.hfsignoreclean(enc)
 
-if sys.platform == 'cygwin':
+if pycompat.sysplatform == 'cygwin':
     # workaround for cygwin, in which mount point part of path is
     # treated as case sensitive, even though underlying NTFS is case
     # insensitive.
@@ -302,7 +369,7 @@
     # use upper-ing as normcase as same as NTFS workaround
     def normcase(path):
         pathlen = len(path)
-        if (pathlen == 0) or (path[0] != os.sep):
+        if (pathlen == 0) or (path[0] != pycompat.ossep):
             # treat as relative
             return encoding.upper(path)
 
@@ -314,7 +381,7 @@
             mplen = len(mp)
             if mplen == pathlen: # mount point itself
                 return mp
-            if path[mplen] == os.sep:
+            if path[mplen] == pycompat.ossep:
                 return mp + encoding.upper(path[mplen:])
 
         return encoding.upper(path)
@@ -337,7 +404,7 @@
 
 _needsshellquote = None
 def shellquote(s):
-    if os.sys.platform == 'OpenVMS':
+    if pycompat.sysplatform == 'OpenVMS':
         return '"%s"' % s
     global _needsshellquote
     if _needsshellquote is None:
@@ -356,7 +423,7 @@
 
 def testpid(pid):
     '''return False if pid dead, True if running or not sure'''
-    if os.sys.platform == 'OpenVMS':
+    if pycompat.sysplatform == 'OpenVMS':
         return True
     try:
         os.kill(pid, 0)
@@ -380,7 +447,7 @@
     If command is a basename then PATH is searched for command.
     PATH isn't searched if command is an absolute or relative path.
     If command isn't found None is returned.'''
-    if sys.platform == 'OpenVMS':
+    if pycompat.sysplatform == 'OpenVMS':
         return command
 
     def findexisting(executable):
@@ -389,13 +456,13 @@
             return executable
         return None
 
-    if os.sep in command:
+    if pycompat.ossep in command:
         return findexisting(command)
 
-    if sys.platform == 'plan9':
+    if pycompat.sysplatform == 'plan9':
         return findexisting(os.path.join('/bin', command))
 
-    for path in os.environ.get('PATH', '').split(os.pathsep):
+    for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
         executable = findexisting(os.path.join(path, command))
         if executable is not None:
             return executable
@@ -463,36 +530,6 @@
 def gethgcmd():
     return sys.argv[:1]
 
-def termwidth():
-    try:
-        import array
-        import termios
-        for dev in (sys.stderr, sys.stdout, sys.stdin):
-            try:
-                try:
-                    fd = dev.fileno()
-                except AttributeError:
-                    continue
-                if not os.isatty(fd):
-                    continue
-                try:
-                    arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
-                    width = array.array('h', arri)[1]
-                    if width > 0:
-                        return width
-                except AttributeError:
-                    pass
-            except ValueError:
-                pass
-            except IOError as e:
-                if e[0] == errno.EINVAL:
-                    pass
-                else:
-                    raise
-    except ImportError:
-        pass
-    return 80
-
 def makedir(path, notindexed):
     os.mkdir(path)
 
--- a/mercurial/profiling.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/profiling.py	Wed Jan 18 11:43:36 2017 -0500
@@ -8,12 +8,11 @@
 from __future__ import absolute_import, print_function
 
 import contextlib
-import os
-import sys
 import time
 
 from .i18n import _
 from . import (
+    encoding,
     error,
     util,
 )
@@ -80,11 +79,7 @@
 
 @contextlib.contextmanager
 def statprofile(ui, fp):
-    try:
-        import statprof
-    except ImportError:
-        raise error.Abort(_(
-            'statprof not available - install using "easy_install statprof"'))
+    from . import statprof
 
     freq = ui.configint('profiling', 'freq', default=1000)
     if freq > 0:
@@ -94,12 +89,29 @@
     else:
         ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq)
 
-    statprof.start()
+    statprof.start(mechanism='thread')
+
     try:
         yield
     finally:
-        statprof.stop()
-        statprof.display(fp)
+        data = statprof.stop()
+
+        profformat = ui.config('profiling', 'statformat', 'hotpath')
+
+        formats = {
+            'byline': statprof.DisplayFormats.ByLine,
+            'bymethod': statprof.DisplayFormats.ByMethod,
+            'hotpath': statprof.DisplayFormats.Hotpath,
+            'json': statprof.DisplayFormats.Json,
+        }
+
+        if profformat in formats:
+            displayformat = formats[profformat]
+        else:
+            ui.warn(_('unknown profiler output format: %s\n') % profformat)
+            displayformat = statprof.DisplayFormats.Hotpath
+
+        statprof.display(fp, data=data, format=displayformat)
 
 @contextlib.contextmanager
 def profile(ui):
@@ -108,12 +120,12 @@
     Profiling is active when the context manager is active. When the context
     manager exits, profiling results will be written to the configured output.
     """
-    profiler = os.getenv('HGPROF')
+    profiler = encoding.environ.get('HGPROF')
     if profiler is None:
-        profiler = ui.config('profiling', 'type', default='ls')
+        profiler = ui.config('profiling', 'type', default='stat')
     if profiler not in ('ls', 'stat', 'flame'):
         ui.warn(_("unrecognized profiler '%s' - ignored\n") % profiler)
-        profiler = 'ls'
+        profiler = 'stat'
 
     output = ui.config('profiling', 'output')
 
@@ -123,7 +135,7 @@
         path = ui.expandpath(output)
         fp = open(path, 'wb')
     else:
-        fp = sys.stderr
+        fp = ui.ferr
 
     try:
         if profiler == 'ls':
--- a/mercurial/progress.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/progress.py	Wed Jan 18 11:43:36 2017 -0500
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import sys
 import threading
 import time
 
@@ -19,7 +18,7 @@
 
 def shouldprint(ui):
     return not (ui.quiet or ui.plain('progress')) and (
-        ui._isatty(sys.stderr) or ui.configbool('progress', 'assume-tty'))
+        ui._isatty(ui.ferr) or ui.configbool('progress', 'assume-tty'))
 
 def fmtremaining(seconds):
     """format a number of remaining seconds in human readable way
@@ -158,14 +157,14 @@
             out = spacejoin(head, prog, tail)
         else:
             out = spacejoin(head, tail)
-        sys.stderr.write('\r' + encoding.trim(out, termwidth))
+        self.ui.ferr.write('\r' + encoding.trim(out, termwidth))
         self.lasttopic = topic
-        sys.stderr.flush()
+        self.ui.ferr.flush()
 
     def clear(self):
         if not self.printed or not self.lastprint or not shouldprint(self.ui):
             return
-        sys.stderr.write('\r%s\r' % (' ' * self.width()))
+        self.ui.ferr.write('\r%s\r' % (' ' * self.width()))
         if self.printed:
             # force immediate re-paint of progress bar
             self.lastprint = 0
@@ -176,8 +175,8 @@
         if self.ui.configbool('progress', 'clear-complete', default=True):
             self.clear()
         else:
-            sys.stderr.write('\n')
-        sys.stderr.flush()
+            self.ui.ferr.write('\n')
+        self.ui.ferr.flush()
 
     def width(self):
         tw = self.ui.termwidth()
--- a/mercurial/pure/osutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/pure/osutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -12,9 +12,12 @@
 import os
 import socket
 import stat as statmod
-import sys
 
-from . import policy
+from . import (
+    policy,
+    pycompat,
+)
+
 modulepolicy = policy.policy
 policynocffi = policy.policynocffi
 
@@ -51,8 +54,8 @@
     '''
     result = []
     prefix = path
-    if not prefix.endswith(os.sep):
-        prefix += os.sep
+    if not prefix.endswith(pycompat.ossep):
+        prefix += pycompat.ossep
     names = os.listdir(path)
     names.sort()
     for fn in names:
@@ -66,14 +69,14 @@
     return result
 
 ffi = None
-if modulepolicy not in policynocffi and sys.platform == 'darwin':
+if modulepolicy not in policynocffi and pycompat.sysplatform == 'darwin':
     try:
         from _osutil_cffi import ffi, lib
     except ImportError:
         if modulepolicy == 'cffi': # strict cffi import
             raise
 
-if sys.platform == 'darwin' and ffi is not None:
+if pycompat.sysplatform == 'darwin' and ffi is not None:
     listdir_batch_size = 4096
     # tweakable number, only affects performance, which chunks
     # of bytes do we get back from getattrlistbulk
@@ -155,13 +158,13 @@
 else:
     listdir = listdirpure
 
-if os.name != 'nt':
+if pycompat.osname != 'nt':
     posixfile = open
 
     _SCM_RIGHTS = 0x01
     _socklen_t = ctypes.c_uint
 
-    if sys.platform == 'linux2':
+    if pycompat.sysplatform.startswith('linux'):
         # socket.h says "the type should be socklen_t but the definition of
         # the kernel is incompatible with this."
         _cmsg_len_t = ctypes.c_size_t
--- a/mercurial/py3kcompat.py	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-# py3kcompat.py - compatibility definitions for running hg in py3k
-#
-# Copyright 2010 Renato Cunha <renatoc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import builtins
-import numbers
-
-Number = numbers.Number
-
-def bytesformatter(format, args):
-    '''Custom implementation of a formatter for bytestrings.
-
-    This function currently relies on the string formatter to do the
-    formatting and always returns bytes objects.
-
-    >>> bytesformatter(20, 10)
-    0
-    >>> bytesformatter('unicode %s, %s!', ('string', 'foo'))
-    b'unicode string, foo!'
-    >>> bytesformatter(b'test %s', 'me')
-    b'test me'
-    >>> bytesformatter('test %s', 'me')
-    b'test me'
-    >>> bytesformatter(b'test %s', b'me')
-    b'test me'
-    >>> bytesformatter('test %s', b'me')
-    b'test me'
-    >>> bytesformatter('test %d: %s', (1, b'result'))
-    b'test 1: result'
-    '''
-    # The current implementation just converts from bytes to unicode, do
-    # what's needed and then convert the results back to bytes.
-    # Another alternative is to use the Python C API implementation.
-    if isinstance(format, Number):
-        # If the fixer erroneously passes a number remainder operation to
-        # bytesformatter, we just return the correct operation
-        return format % args
-    if isinstance(format, bytes):
-        format = format.decode('utf-8', 'surrogateescape')
-    if isinstance(args, bytes):
-        args = args.decode('utf-8', 'surrogateescape')
-    if isinstance(args, tuple):
-        newargs = []
-        for arg in args:
-            if isinstance(arg, bytes):
-                arg = arg.decode('utf-8', 'surrogateescape')
-            newargs.append(arg)
-        args = tuple(newargs)
-    ret = format % args
-    return ret.encode('utf-8', 'surrogateescape')
-builtins.bytesformatter = bytesformatter
-
-origord = builtins.ord
-def fakeord(char):
-    if isinstance(char, int):
-        return char
-    return origord(char)
-builtins.ord = fakeord
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()
-
--- a/mercurial/pycompat.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/pycompat.py	Wed Jan 18 11:43:36 2017 -0500
@@ -10,6 +10,9 @@
 
 from __future__ import absolute_import
 
+import getopt
+import os
+import shlex
 import sys
 
 ispy3 = (sys.version_info[0] >= 3)
@@ -21,6 +24,7 @@
     import Queue as _queue
     import SocketServer as socketserver
     import urlparse
+    urlunquote = urlparse.unquote
     import xmlrpclib
 else:
     import http.client as httplib
@@ -29,13 +33,43 @@
     import queue as _queue
     import socketserver
     import urllib.parse as urlparse
+    urlunquote = urlparse.unquote_to_bytes
     import xmlrpc.client as xmlrpclib
 
 if ispy3:
     import builtins
     import functools
-    import os
     fsencode = os.fsencode
+    fsdecode = os.fsdecode
+    # A bytes version of os.name.
+    osname = os.name.encode('ascii')
+    ospathsep = os.pathsep.encode('ascii')
+    ossep = os.sep.encode('ascii')
+    osaltsep = os.altsep
+    if osaltsep:
+        osaltsep = osaltsep.encode('ascii')
+    # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
+    # returns bytes.
+    getcwd = os.getcwdb
+    sysplatform = sys.platform.encode('ascii')
+    sysexecutable = sys.executable
+    if sysexecutable:
+        sysexecutable = os.fsencode(sysexecutable)
+
+    # TODO: .buffer might not exist if std streams were replaced; we'll need
+    # a silly wrapper to make a bytes stream backed by a unicode one.
+    stdin = sys.stdin.buffer
+    stdout = sys.stdout.buffer
+    stderr = sys.stderr.buffer
+
+    # Since Python 3 converts argv to wchar_t type by Py_DecodeLocale() on Unix,
+    # we can use os.fsencode() to get back bytes argv.
+    #
+    # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55
+    #
+    # TODO: On Windows, the native argv is wchar_t, so we'll need a different
+    # workaround to simulate the Python 2 (i.e. ANSI Win32 API) behavior.
+    sysargv = list(map(os.fsencode, sys.argv))
 
     def sysstr(s):
         """Return a keyword str to be passed to Python functions such as
@@ -62,6 +96,40 @@
     setattr = _wrapattrfunc(builtins.setattr)
     xrange = builtins.range
 
+    # getopt.getopt() on Python 3 deals with unicodes internally so we cannot
+    # pass bytes there. Passing unicodes will result in unicodes as return
+    # values which we need to convert again to bytes.
+    def getoptb(args, shortlist, namelist):
+        args = [a.decode('latin-1') for a in args]
+        shortlist = shortlist.decode('latin-1')
+        namelist = [a.decode('latin-1') for a in namelist]
+        opts, args = getopt.getopt(args, shortlist, namelist)
+        opts = [(a[0].encode('latin-1'), a[1].encode('latin-1'))
+                for a in opts]
+        args = [a.encode('latin-1') for a in args]
+        return opts, args
+
+    # keys of keyword arguments in Python need to be strings which are unicodes
+    # Python 3. This function takes keyword arguments, convert the keys to str.
+    def strkwargs(dic):
+        dic = dict((k.decode('latin-1'), v) for k, v in dic.iteritems())
+        return dic
+
+    # keys of keyword arguments need to be unicode while passing into
+    # a function. This function helps us to convert those keys back to bytes
+    # again as we need to deal with bytes.
+    def byteskwargs(dic):
+        dic = dict((k.encode('latin-1'), v) for k, v in dic.iteritems())
+        return dic
+
+    # shlex.split() accepts unicodes on Python 3. This function takes bytes
+    # argument, convert it into unicodes, pass into shlex.split(), convert the
+    # returned value to bytes and return that.
+    # TODO: handle shlex.shlex().
+    def shlexsplit(s):
+        ret = shlex.split(s.decode('latin-1'))
+        return [a.encode('latin-1') for a in ret]
+
 else:
     def sysstr(s):
         return s
@@ -76,6 +144,33 @@
             raise TypeError(
                 "expect str, not %s" % type(filename).__name__)
 
+    # In Python 2, fsdecode() has a very chance to receive bytes. So it's
+    # better not to touch Python 2 part as it's already working fine.
+    def fsdecode(filename):
+        return filename
+
+    def getoptb(args, shortlist, namelist):
+        return getopt.getopt(args, shortlist, namelist)
+
+    def strkwargs(dic):
+        return dic
+
+    def byteskwargs(dic):
+        return dic
+
+    osname = os.name
+    ospathsep = os.pathsep
+    ossep = os.sep
+    osaltsep = os.altsep
+    stdin = sys.stdin
+    stdout = sys.stdout
+    stderr = sys.stderr
+    sysargv = sys.argv
+    sysplatform = sys.platform
+    getcwd = os.getcwd
+    sysexecutable = sys.executable
+    shlexsplit = shlex.split
+
 stringio = io.StringIO
 empty = _queue.Empty
 queue = _queue.Queue
--- a/mercurial/registrar.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/registrar.py	Wed Jan 18 11:43:36 2017 -0500
@@ -8,12 +8,13 @@
 from __future__ import absolute_import
 
 from . import (
+    error,
     pycompat,
     util,
 )
 
 class _funcregistrarbase(object):
-    """Base of decorator to register a fuction for specific purpose
+    """Base of decorator to register a function for specific purpose
 
     This decorator stores decorated functions into own dict 'table'.
 
@@ -50,6 +51,10 @@
     def _doregister(self, func, decl, *args, **kwargs):
         name = self._getname(decl)
 
+        if name in self._table:
+            msg = 'duplicate registration for name: "%s"' % name
+            raise error.ProgrammingError(msg)
+
         if func.__doc__ and not util.safehasattr(func, '_origdoc'):
             doc = func.__doc__.strip()
             func._origdoc = doc
@@ -177,7 +182,7 @@
 
     Usage::
 
-        templaetkeyword = registrar.templatekeyword()
+        templatekeyword = registrar.templatekeyword()
 
         @templatekeyword('mykeyword')
         def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
--- a/mercurial/repair.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/repair.py	Wed Jan 18 11:43:36 2017 -0500
@@ -10,15 +10,22 @@
 
 import errno
 import hashlib
+import stat
+import tempfile
+import time
 
 from .i18n import _
 from .node import short
 from . import (
     bundle2,
     changegroup,
+    changelog,
     error,
     exchange,
+    manifest,
     obsolete,
+    revlog,
+    scmutil,
     util,
 )
 
@@ -67,7 +74,7 @@
         _, brokenset = revlog.getstrippoint(striprev)
         s.update([revlog.linkrev(r) for r in brokenset])
 
-    collectone(repo.manifest)
+    collectone(repo.manifestlog._revlog)
     for fname in files:
         collectone(repo.file(fname))
 
@@ -91,6 +98,9 @@
     striplist = [cl.rev(node) for node in nodelist]
     striprev = min(striplist)
 
+    files = _collectfiles(repo, striprev)
+    saverevs = _collectbrokencsets(repo, files, striprev)
+
     # Some revisions with rev > striprev may not be descendants of striprev.
     # We have to find these revisions and put them in a bundle, so that
     # we can restore them after the truncations.
@@ -99,16 +109,11 @@
     # (head = revision in the set that has no descendant in the set;
     #  base = revision in the set that has no ancestor in the set)
     tostrip = set(striplist)
-    for rev in striplist:
-        for desc in cl.descendants([rev]):
-            tostrip.add(desc)
+    saveheads = set(saverevs)
+    for r in cl.revs(start=striprev + 1):
+        if any(p in tostrip for p in cl.parentrevs(r)):
+            tostrip.add(r)
 
-    files = _collectfiles(repo, striprev)
-    saverevs = _collectbrokencsets(repo, files, striprev)
-
-    # compute heads
-    saveheads = set(saverevs)
-    for r in xrange(striprev + 1, len(cl)):
         if r not in tostrip:
             saverevs.add(r)
             saveheads.difference_update(cl.parentrevs(r))
@@ -153,7 +158,7 @@
         tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
                             compress=False)
 
-    mfst = repo.manifest
+    mfst = repo.manifestlog._revlog
 
     curtr = repo.currenttransaction()
     if curtr is not None:
@@ -174,7 +179,7 @@
                     if (unencoded.startswith('meta/') and
                         unencoded.endswith('00manifest.i')):
                         dir = unencoded[5:-12]
-                        repo.manifest.dirlog(dir).strip(striprev, tr)
+                        repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
             for fn in files:
                 repo.file(fn).strip(striprev, tr)
             tr.endgroup()
@@ -244,6 +249,9 @@
             vfs.unlink(tmpbundlefile)
 
     repo.destroyed()
+    # return the backup file path (or None if 'backup' was False) so
+    # extensions can use it
+    return backupfile
 
 def rebuildfncache(ui, repo):
     """Rebuilds the fncache file from repo history.
@@ -357,3 +365,738 @@
         newobsstorefile.write(bytes)
     newobsstorefile.close()
     return n
+
+def upgraderequiredsourcerequirements(repo):
+    """Obtain requirements required to be present to upgrade a repo.
+
+    An upgrade will not be allowed if the repository doesn't have the
+    requirements returned by this function.
+    """
+    return set([
+        # Introduced in Mercurial 0.9.2.
+        'revlogv1',
+        # Introduced in Mercurial 0.9.2.
+        'store',
+    ])
+
+def upgradeblocksourcerequirements(repo):
+    """Obtain requirements that will prevent an upgrade from occurring.
+
+    An upgrade cannot be performed if the source repository contains a
+    requirements in the returned set.
+    """
+    return set([
+        # The upgrade code does not yet support these experimental features.
+        # This is an artificial limitation.
+        'manifestv2',
+        'treemanifest',
+        # This was a precursor to generaldelta and was never enabled by default.
+        # It should (hopefully) not exist in the wild.
+        'parentdelta',
+        # Upgrade should operate on the actual store, not the shared link.
+        'shared',
+    ])
+
+def upgradesupportremovedrequirements(repo):
+    """Obtain requirements that can be removed during an upgrade.
+
+    If an upgrade were to create a repository that dropped a requirement,
+    the dropped requirement must appear in the returned set for the upgrade
+    to be allowed.
+    """
+    return set()
+
+def upgradesupporteddestrequirements(repo):
+    """Obtain requirements that upgrade supports in the destination.
+
+    If the result of the upgrade would create requirements not in this set,
+    the upgrade is disallowed.
+
+    Extensions should monkeypatch this to add their custom requirements.
+    """
+    return set([
+        'dotencode',
+        'fncache',
+        'generaldelta',
+        'revlogv1',
+        'store',
+    ])
+
+def upgradeallowednewrequirements(repo):
+    """Obtain requirements that can be added to a repository during upgrade.
+
+    This is used to disallow proposed requirements from being added when
+    they weren't present before.
+
+    We use a list of allowed requirement additions instead of a list of known
+    bad additions because the whitelist approach is safer and will prevent
+    future, unknown requirements from accidentally being added.
+    """
+    return set([
+        'dotencode',
+        'fncache',
+        'generaldelta',
+    ])
+
+deficiency = 'deficiency'
+optimisation = 'optimization'
+
+class upgradeimprovement(object):
+    """Represents an improvement that can be made as part of an upgrade.
+
+    The following attributes are defined on each instance:
+
+    name
+       Machine-readable string uniquely identifying this improvement. It
+       will be mapped to an action later in the upgrade process.
+
+    type
+       Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
+       problem. An optimization is an action (sometimes optional) that
+       can be taken to further improve the state of the repository.
+
+    description
+       Message intended for humans explaining the improvement in more detail,
+       including the implications of it. For ``deficiency`` types, should be
+       worded in the present tense. For ``optimisation`` types, should be
+       worded in the future tense.
+
+    upgrademessage
+       Message intended for humans explaining what an upgrade addressing this
+       issue will do. Should be worded in the future tense.
+
+    fromdefault (``deficiency`` types only)
+       Boolean indicating whether the current (deficient) state deviates
+       from Mercurial's default configuration.
+
+    fromconfig (``deficiency`` types only)
+       Boolean indicating whether the current (deficient) state deviates
+       from the current Mercurial configuration.
+    """
+    def __init__(self, name, type, description, upgrademessage, **kwargs):
+        self.name = name
+        self.type = type
+        self.description = description
+        self.upgrademessage = upgrademessage
+
+        for k, v in kwargs.items():
+            setattr(self, k, v)
+
+def upgradefindimprovements(repo):
+    """Determine improvements that can be made to the repo during upgrade.
+
+    Returns a list of ``upgradeimprovement`` describing repository deficiencies
+    and optimizations.
+    """
+    # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
+    from . import localrepo
+
+    newreporeqs = localrepo.newreporequirements(repo)
+
+    improvements = []
+
+    # We could detect lack of revlogv1 and store here, but they were added
+    # in 0.9.2 and we don't support upgrading repos without these
+    # requirements, so let's not bother.
+
+    if 'fncache' not in repo.requirements:
+        improvements.append(upgradeimprovement(
+            name='fncache',
+            type=deficiency,
+            description=_('long and reserved filenames may not work correctly; '
+                          'repository performance is sub-optimal'),
+            upgrademessage=_('repository will be more resilient to storing '
+                             'certain paths and performance of certain '
+                             'operations should be improved'),
+            fromdefault=True,
+            fromconfig='fncache' in newreporeqs))
+
+    if 'dotencode' not in repo.requirements:
+        improvements.append(upgradeimprovement(
+            name='dotencode',
+            type=deficiency,
+            description=_('storage of filenames beginning with a period or '
+                          'space may not work correctly'),
+            upgrademessage=_('repository will be better able to store files '
+                             'beginning with a space or period'),
+            fromdefault=True,
+            fromconfig='dotencode' in newreporeqs))
+
+    if 'generaldelta' not in repo.requirements:
+        improvements.append(upgradeimprovement(
+            name='generaldelta',
+            type=deficiency,
+            description=_('deltas within internal storage are unable to '
+                          'choose optimal revisions; repository is larger and '
+                          'slower than it could be; interaction with other '
+                          'repositories may require extra network and CPU '
+                          'resources, making "hg push" and "hg pull" slower'),
+            upgrademessage=_('repository storage will be able to create '
+                             'optimal deltas; new repository data will be '
+                             'smaller and read times should decrease; '
+                             'interacting with other repositories using this '
+                             'storage model should require less network and '
+                             'CPU resources, making "hg push" and "hg pull" '
+                             'faster'),
+            fromdefault=True,
+            fromconfig='generaldelta' in newreporeqs))
+
+    # Mercurial 4.0 changed changelogs to not use delta chains. Search for
+    # changelogs with deltas.
+    cl = repo.changelog
+    for rev in cl:
+        chainbase = cl.chainbase(rev)
+        if chainbase != rev:
+            improvements.append(upgradeimprovement(
+                name='removecldeltachain',
+                type=deficiency,
+                description=_('changelog storage is using deltas instead of '
+                              'raw entries; changelog reading and any '
+                              'operation relying on changelog data are slower '
+                              'than they could be'),
+                upgrademessage=_('changelog storage will be reformated to '
+                                 'store raw entries; changelog reading will be '
+                                 'faster; changelog size may be reduced'),
+                fromdefault=True,
+                fromconfig=True))
+            break
+
+    # Now for the optimizations.
+
+    # These are unconditionally added. There is logic later that figures out
+    # which ones to apply.
+
+    improvements.append(upgradeimprovement(
+        name='redeltaparent',
+        type=optimisation,
+        description=_('deltas within internal storage will be recalculated to '
+                      'choose an optimal base revision where this was not '
+                      'already done; the size of the repository may shrink and '
+                      'various operations may become faster; the first time '
+                      'this optimization is performed could slow down upgrade '
+                      'execution considerably; subsequent invocations should '
+                      'not run noticeably slower'),
+        upgrademessage=_('deltas within internal storage will choose a new '
+                         'base revision if needed')))
+
+    improvements.append(upgradeimprovement(
+        name='redeltamultibase',
+        type=optimisation,
+        description=_('deltas within internal storage will be recalculated '
+                      'against multiple base revision and the smallest '
+                      'difference will be used; the size of the repository may '
+                      'shrink significantly when there are many merges; this '
+                      'optimization will slow down execution in proportion to '
+                      'the number of merges in the repository and the amount '
+                      'of files in the repository; this slow down should not '
+                      'be significant unless there are tens of thousands of '
+                      'files and thousands of merges'),
+        upgrademessage=_('deltas within internal storage will choose an '
+                         'optimal delta by computing deltas against multiple '
+                         'parents; may slow down execution time '
+                         'significantly')))
+
+    improvements.append(upgradeimprovement(
+        name='redeltaall',
+        type=optimisation,
+        description=_('deltas within internal storage will always be '
+                      'recalculated without reusing prior deltas; this will '
+                      'likely make execution run several times slower; this '
+                      'optimization is typically not needed'),
+        upgrademessage=_('deltas within internal storage will be fully '
+                         'recomputed; this will likely drastically slow down '
+                         'execution time')))
+
+    return improvements
+
+def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
+                            optimize):
+    """Determine upgrade actions that will be performed.
+
+    Given a list of improvements as returned by ``upgradefindimprovements``,
+    determine the list of upgrade actions that will be performed.
+
+    The role of this function is to filter improvements if needed, apply
+    recommended optimizations from the improvements list that make sense,
+    etc.
+
+    Returns a list of action names.
+    """
+    newactions = []
+
+    knownreqs = upgradesupporteddestrequirements(repo)
+
+    for i in improvements:
+        name = i.name
+
+        # If the action is a requirement that doesn't show up in the
+        # destination requirements, prune the action.
+        if name in knownreqs and name not in destreqs:
+            continue
+
+        if i.type == deficiency:
+            newactions.append(name)
+
+    newactions.extend(o for o in sorted(optimize) if o not in newactions)
+
+    # FUTURE consider adding some optimizations here for certain transitions.
+    # e.g. adding generaldelta could schedule parent redeltas.
+
+    return newactions
+
+def _revlogfrompath(repo, path):
+    """Obtain a revlog from a repo path.
+
+    An instance of the appropriate class is returned.
+    """
+    if path == '00changelog.i':
+        return changelog.changelog(repo.svfs)
+    elif path.endswith('00manifest.i'):
+        mandir = path[:-len('00manifest.i')]
+        return manifest.manifestrevlog(repo.svfs, dir=mandir)
+    else:
+        # Filelogs don't do anything special with settings. So we can use a
+        # vanilla revlog.
+        return revlog.revlog(repo.svfs, path)
+
+def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
+    """Copy revlogs between 2 repos."""
+    revcount = 0
+    srcsize = 0
+    srcrawsize = 0
+    dstsize = 0
+    fcount = 0
+    frevcount = 0
+    fsrcsize = 0
+    frawsize = 0
+    fdstsize = 0
+    mcount = 0
+    mrevcount = 0
+    msrcsize = 0
+    mrawsize = 0
+    mdstsize = 0
+    crevcount = 0
+    csrcsize = 0
+    crawsize = 0
+    cdstsize = 0
+
+    # Perform a pass to collect metadata. This validates we can open all
+    # source files and allows a unified progress bar to be displayed.
+    for unencoded, encoded, size in srcrepo.store.walk():
+        if unencoded.endswith('.d'):
+            continue
+
+        rl = _revlogfrompath(srcrepo, unencoded)
+        revcount += len(rl)
+
+        datasize = 0
+        rawsize = 0
+        idx = rl.index
+        for rev in rl:
+            e = idx[rev]
+            datasize += e[1]
+            rawsize += e[2]
+
+        srcsize += datasize
+        srcrawsize += rawsize
+
+        # This is for the separate progress bars.
+        if isinstance(rl, changelog.changelog):
+            crevcount += len(rl)
+            csrcsize += datasize
+            crawsize += rawsize
+        elif isinstance(rl, manifest.manifestrevlog):
+            mcount += 1
+            mrevcount += len(rl)
+            msrcsize += datasize
+            mrawsize += rawsize
+        elif isinstance(rl, revlog.revlog):
+            fcount += 1
+            frevcount += len(rl)
+            fsrcsize += datasize
+            frawsize += rawsize
+
+    if not revcount:
+        return
+
+    ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
+               '%d in changelog)\n') %
+             (revcount, frevcount, mrevcount, crevcount))
+    ui.write(_('migrating %s in store; %s tracked data\n') % (
+             (util.bytecount(srcsize), util.bytecount(srcrawsize))))
+
+    # Used to keep track of progress.
+    progress = []
+    def oncopiedrevision(rl, rev, node):
+        progress[1] += 1
+        srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
+
+    # Do the actual copying.
+    # FUTURE this operation can be farmed off to worker processes.
+    seen = set()
+    for unencoded, encoded, size in srcrepo.store.walk():
+        if unencoded.endswith('.d'):
+            continue
+
+        oldrl = _revlogfrompath(srcrepo, unencoded)
+        newrl = _revlogfrompath(dstrepo, unencoded)
+
+        if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
+            ui.write(_('finished migrating %d manifest revisions across %d '
+                       'manifests; change in size: %s\n') %
+                     (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
+
+            ui.write(_('migrating changelog containing %d revisions '
+                       '(%s in store; %s tracked data)\n') %
+                     (crevcount, util.bytecount(csrcsize),
+                      util.bytecount(crawsize)))
+            seen.add('c')
+            progress[:] = [_('changelog revisions'), 0, crevcount]
+        elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
+            ui.write(_('finished migrating %d filelog revisions across %d '
+                       'filelogs; change in size: %s\n') %
+                     (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
+
+            ui.write(_('migrating %d manifests containing %d revisions '
+                       '(%s in store; %s tracked data)\n') %
+                     (mcount, mrevcount, util.bytecount(msrcsize),
+                      util.bytecount(mrawsize)))
+            seen.add('m')
+            progress[:] = [_('manifest revisions'), 0, mrevcount]
+        elif 'f' not in seen:
+            ui.write(_('migrating %d filelogs containing %d revisions '
+                       '(%s in store; %s tracked data)\n') %
+                     (fcount, frevcount, util.bytecount(fsrcsize),
+                      util.bytecount(frawsize)))
+            seen.add('f')
+            progress[:] = [_('file revisions'), 0, frevcount]
+
+        ui.progress(progress[0], progress[1], total=progress[2])
+
+        ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
+        oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
+                    deltareuse=deltareuse,
+                    aggressivemergedeltas=aggressivemergedeltas)
+
+        datasize = 0
+        idx = newrl.index
+        for rev in newrl:
+            datasize += idx[rev][1]
+
+        dstsize += datasize
+
+        if isinstance(newrl, changelog.changelog):
+            cdstsize += datasize
+        elif isinstance(newrl, manifest.manifestrevlog):
+            mdstsize += datasize
+        else:
+            fdstsize += datasize
+
+    ui.progress(progress[0], None)
+
+    ui.write(_('finished migrating %d changelog revisions; change in size: '
+               '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
+
+    ui.write(_('finished migrating %d total revisions; total change in store '
+               'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
+
+def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
+    """Determine whether to copy a store file during upgrade.
+
+    This function is called when migrating store files from ``srcrepo`` to
+    ``dstrepo`` as part of upgrading a repository.
+
+    Args:
+      srcrepo: repo we are copying from
+      dstrepo: repo we are copying to
+      requirements: set of requirements for ``dstrepo``
+      path: store file being examined
+      mode: the ``ST_MODE`` file type of ``path``
+      st: ``stat`` data structure for ``path``
+
+    Function should return ``True`` if the file is to be copied.
+    """
+    # Skip revlogs.
+    if path.endswith(('.i', '.d')):
+        return False
+    # Skip transaction related files.
+    if path.startswith('undo'):
+        return False
+    # Only copy regular files.
+    if mode != stat.S_IFREG:
+        return False
+    # Skip other skipped files.
+    if path in ('lock', 'fncache'):
+        return False
+
+    return True
+
+def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
+    """Hook point for extensions to perform additional actions during upgrade.
+
+    This function is called after revlogs and store files have been copied but
+    before the new store is swapped into the original location.
+    """
+
+def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
+    """Do the low-level work of upgrading a repository.
+
+    The upgrade is effectively performed as a copy between a source
+    repository and a temporary destination repository.
+
+    The source repository is unmodified for as long as possible so the
+    upgrade can abort at any time without causing loss of service for
+    readers and without corrupting the source repository.
+    """
+    assert srcrepo.currentwlock()
+    assert dstrepo.currentwlock()
+
+    ui.write(_('(it is safe to interrupt this process any time before '
+               'data migration completes)\n'))
+
+    if 'redeltaall' in actions:
+        deltareuse = revlog.revlog.DELTAREUSENEVER
+    elif 'redeltaparent' in actions:
+        deltareuse = revlog.revlog.DELTAREUSESAMEREVS
+    elif 'redeltamultibase' in actions:
+        deltareuse = revlog.revlog.DELTAREUSESAMEREVS
+    else:
+        deltareuse = revlog.revlog.DELTAREUSEALWAYS
+
+    with dstrepo.transaction('upgrade') as tr:
+        _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
+                     'redeltamultibase' in actions)
+
+    # Now copy other files in the store directory.
+    for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
+        if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
+                                       p, kind, st):
+            continue
+
+        srcrepo.ui.write(_('copying %s\n') % p)
+        src = srcrepo.store.vfs.join(p)
+        dst = dstrepo.store.vfs.join(p)
+        util.copyfile(src, dst, copystat=True)
+
+    _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
+
+    ui.write(_('data fully migrated to temporary repository\n'))
+
+    backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
+    backupvfs = scmutil.vfs(backuppath)
+
+    # Make a backup of requires file first, as it is the first to be modified.
+    util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
+
+    # We install an arbitrary requirement that clients must not support
+    # as a mechanism to lock out new clients during the data swap. This is
+    # better than allowing a client to continue while the repository is in
+    # an inconsistent state.
+    ui.write(_('marking source repository as being upgraded; clients will be '
+               'unable to read from repository\n'))
+    scmutil.writerequires(srcrepo.vfs,
+                          srcrepo.requirements | set(['upgradeinprogress']))
+
+    ui.write(_('starting in-place swap of repository data\n'))
+    ui.write(_('replaced files will be backed up at %s\n') %
+             backuppath)
+
+    # Now swap in the new store directory. Doing it as a rename should make
+    # the operation nearly instantaneous and atomic (at least in well-behaved
+    # environments).
+    ui.write(_('replacing store...\n'))
+    tstart = time.time()
+    util.rename(srcrepo.spath, backupvfs.join('store'))
+    util.rename(dstrepo.spath, srcrepo.spath)
+    elapsed = time.time() - tstart
+    ui.write(_('store replacement complete; repository was inconsistent for '
+               '%0.1fs\n') % elapsed)
+
+    # We first write the requirements file. Any new requirements will lock
+    # out legacy clients.
+    ui.write(_('finalizing requirements file and making repository readable '
+               'again\n'))
+    scmutil.writerequires(srcrepo.vfs, requirements)
+
+    # The lock file from the old store won't be removed because nothing has a
+    # reference to its new location. So clean it up manually. Alternatively, we
+    # could update srcrepo.svfs and other variables to point to the new
+    # location. This is simpler.
+    backupvfs.unlink('store/lock')
+
+    return backuppath
+
+def upgraderepo(ui, repo, run=False, optimize=None):
+    """Upgrade a repository in place."""
+    # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
+    from . import localrepo
+
+    optimize = set(optimize or [])
+    repo = repo.unfiltered()
+
+    # Ensure the repository can be upgraded.
+    missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
+    if missingreqs:
+        raise error.Abort(_('cannot upgrade repository; requirement '
+                            'missing: %s') % _(', ').join(sorted(missingreqs)))
+
+    blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
+    if blockedreqs:
+        raise error.Abort(_('cannot upgrade repository; unsupported source '
+                            'requirement: %s') %
+                          _(', ').join(sorted(blockedreqs)))
+
+    # FUTURE there is potentially a need to control the wanted requirements via
+    # command arguments or via an extension hook point.
+    newreqs = localrepo.newreporequirements(repo)
+
+    noremovereqs = (repo.requirements - newreqs -
+                   upgradesupportremovedrequirements(repo))
+    if noremovereqs:
+        raise error.Abort(_('cannot upgrade repository; requirement would be '
+                            'removed: %s') % _(', ').join(sorted(noremovereqs)))
+
+    noaddreqs = (newreqs - repo.requirements -
+                 upgradeallowednewrequirements(repo))
+    if noaddreqs:
+        raise error.Abort(_('cannot upgrade repository; do not support adding '
+                            'requirement: %s') %
+                          _(', ').join(sorted(noaddreqs)))
+
+    unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
+    if unsupportedreqs:
+        raise error.Abort(_('cannot upgrade repository; do not support '
+                            'destination requirement: %s') %
+                          _(', ').join(sorted(unsupportedreqs)))
+
+    # Find and validate all improvements that can be made.
+    improvements = upgradefindimprovements(repo)
+    for i in improvements:
+        if i.type not in (deficiency, optimisation):
+            raise error.Abort(_('unexpected improvement type %s for %s') % (
+                i.type, i.name))
+
+    # Validate arguments.
+    unknownoptimize = optimize - set(i.name for i in improvements
+                                     if i.type == optimisation)
+    if unknownoptimize:
+        raise error.Abort(_('unknown optimization action requested: %s') %
+                          ', '.join(sorted(unknownoptimize)),
+                          hint=_('run without arguments to see valid '
+                                 'optimizations'))
+
+    actions = upgradedetermineactions(repo, improvements, repo.requirements,
+                                      newreqs, optimize)
+
+    def printrequirements():
+        ui.write(_('requirements\n'))
+        ui.write(_('   preserved: %s\n') %
+                 _(', ').join(sorted(newreqs & repo.requirements)))
+
+        if repo.requirements - newreqs:
+            ui.write(_('   removed: %s\n') %
+                     _(', ').join(sorted(repo.requirements - newreqs)))
+
+        if newreqs - repo.requirements:
+            ui.write(_('   added: %s\n') %
+                     _(', ').join(sorted(newreqs - repo.requirements)))
+
+        ui.write('\n')
+
+    def printupgradeactions():
+        for action in actions:
+            for i in improvements:
+                if i.name == action:
+                    ui.write('%s\n   %s\n\n' %
+                             (i.name, i.upgrademessage))
+
+    if not run:
+        fromdefault = []
+        fromconfig = []
+        optimizations = []
+
+        for i in improvements:
+            assert i.type in (deficiency, optimisation)
+            if i.type == deficiency:
+                if i.fromdefault:
+                    fromdefault.append(i)
+                if i.fromconfig:
+                    fromconfig.append(i)
+            else:
+                optimizations.append(i)
+
+        if fromdefault or fromconfig:
+            fromconfignames = set(x.name for x in fromconfig)
+            onlydefault = [i for i in fromdefault
+                           if i.name not in fromconfignames]
+
+            if fromconfig:
+                ui.write(_('repository lacks features recommended by '
+                           'current config options:\n\n'))
+                for i in fromconfig:
+                    ui.write('%s\n   %s\n\n' % (i.name, i.description))
+
+            if onlydefault:
+                ui.write(_('repository lacks features used by the default '
+                           'config options:\n\n'))
+                for i in onlydefault:
+                    ui.write('%s\n   %s\n\n' % (i.name, i.description))
+
+            ui.write('\n')
+        else:
+            ui.write(_('(no feature deficiencies found in existing '
+                       'repository)\n'))
+
+        ui.write(_('performing an upgrade with "--run" will make the following '
+                   'changes:\n\n'))
+
+        printrequirements()
+        printupgradeactions()
+
+        unusedoptimize = [i for i in improvements
+                          if i.name not in actions and i.type == optimisation]
+        if unusedoptimize:
+            ui.write(_('additional optimizations are available by specifying '
+                     '"--optimize <name>":\n\n'))
+            for i in unusedoptimize:
+                ui.write(_('%s\n   %s\n\n') % (i.name, i.description))
+        return
+
+    # Else we're in the run=true case.
+    ui.write(_('upgrade will perform the following actions:\n\n'))
+    printrequirements()
+    printupgradeactions()
+
+    ui.write(_('beginning upgrade...\n'))
+    with repo.wlock():
+        with repo.lock():
+            ui.write(_('repository locked and read-only\n'))
+            # Our strategy for upgrading the repository is to create a new,
+            # temporary repository, write data to it, then do a swap of the
+            # data. There are less heavyweight ways to do this, but it is easier
+            # to create a new repo object than to instantiate all the components
+            # (like the store) separately.
+            tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
+            backuppath = None
+            try:
+                ui.write(_('creating temporary repository to stage migrated '
+                           'data: %s\n') % tmppath)
+                dstrepo = localrepo.localrepository(repo.baseui,
+                                                    path=tmppath,
+                                                    create=True)
+
+                with dstrepo.wlock():
+                    with dstrepo.lock():
+                        backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
+                                                  actions)
+
+            finally:
+                ui.write(_('removing temporary repository %s\n') % tmppath)
+                repo.vfs.rmtree(tmppath, forcibly=True)
+
+                if backuppath:
+                    ui.warn(_('copy of old repository backed up at %s\n') %
+                            backuppath)
+                    ui.warn(_('the old repository will not be deleted; remove '
+                              'it to free up disk space once the upgraded '
+                              'repository is verified\n'))
--- a/mercurial/revlog.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/revlog.py	Wed Jan 18 11:43:36 2017 -0500
@@ -39,8 +39,8 @@
 
 _pack = struct.pack
 _unpack = struct.unpack
-_compress = zlib.compress
-_decompress = zlib.decompress
+# Aliased for performance.
+_zlibdecompress = zlib.decompress
 
 # revlog header flags
 REVLOGV0 = 0
@@ -54,8 +54,16 @@
 
 # revlog index flags
 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
+REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
+REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
 REVIDX_DEFAULT_FLAGS = 0
-REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
+# stable order in which flags need to be processed and their processors applied
+REVIDX_FLAGS_ORDER = [
+    REVIDX_ISCENSORED,
+    REVIDX_ELLIPSIS,
+    REVIDX_EXTSTORED,
+]
+REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
 
 # max size of revlog with inline data
 _maxinline = 131072
@@ -64,6 +72,41 @@
 RevlogError = error.RevlogError
 LookupError = error.LookupError
 CensoredNodeError = error.CensoredNodeError
+ProgrammingError = error.ProgrammingError
+
+# Store flag processors (cf. 'addflagprocessor()' to register)
+_flagprocessors = {
+    REVIDX_ISCENSORED: None,
+}
+
+def addflagprocessor(flag, processor):
+    """Register a flag processor on a revision data flag.
+
+    Invariant:
+    - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
+    - Only one flag processor can be registered on a specific flag.
+    - flagprocessors must be 3-tuples of functions (read, write, raw) with the
+      following signatures:
+          - (read)  f(self, text) -> newtext, bool
+          - (write) f(self, text) -> newtext, bool
+          - (raw)   f(self, text) -> bool
+      The boolean returned by these transforms is used to determine whether
+      'newtext' can be used for hash integrity checking.
+
+      Note: The 'raw' transform is used for changegroup generation and in some
+      debug commands. In this case the transform only indicates whether the
+      contents can be used for hash integrity checks.
+    """
+    if not flag & REVIDX_KNOWN_FLAGS:
+        msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
+        raise ProgrammingError(msg)
+    if flag not in REVIDX_FLAGS_ORDER:
+        msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
+        raise ProgrammingError(msg)
+    if flag in _flagprocessors:
+        msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
+        raise error.Abort(msg)
+    _flagprocessors[flag] = processor
 
 def getoffset(q):
     return int(q >> 16)
@@ -72,6 +115,8 @@
     return int(q & 0xFFFF)
 
 def offset_type(offset, type):
+    if (type & ~REVIDX_KNOWN_FLAGS) != 0:
+        raise ValueError('unknown revlog index flags')
     return long(long(offset) << 16 | type)
 
 _nullhash = hashlib.sha1(nullid)
@@ -97,22 +142,6 @@
     s.update(text)
     return s.digest()
 
-def decompress(bin):
-    """ decompress the given input """
-    if not bin:
-        return bin
-    t = bin[0]
-    if t == '\0':
-        return bin
-    if t == 'x':
-        try:
-            return _decompress(bin)
-        except zlib.error as e:
-            raise RevlogError(_("revlog decompress error: %s") % str(e))
-    if t == 'u':
-        return util.buffer(bin, 1)
-    raise RevlogError(_("unknown compression type %r") % t)
-
 # index v0:
 #  4 bytes: offset
 #  4 bytes: compressed length
@@ -245,6 +274,7 @@
         # Mapping of revision integer to full node.
         self._nodecache = {nullid: nullrev}
         self._nodepos = None
+        self._compengine = 'zlib'
 
         v = REVLOG_DEFAULT_VERSION
         opts = getattr(opener, 'options', None)
@@ -261,6 +291,8 @@
             if 'aggressivemergedeltas' in opts:
                 self._aggressivemergedeltas = opts['aggressivemergedeltas']
             self._lazydeltabase = bool(opts.get('lazydeltabase', False))
+            if 'compengine' in opts:
+                self._compengine = opts['compengine']
 
         if self._chunkcachesize <= 0:
             raise RevlogError(_('revlog chunk cache size %r is not greater '
@@ -313,6 +345,12 @@
             self._chunkclear()
         # revnum -> (chain-length, sum-delta-length)
         self._chaininfocache = {}
+        # revlog header -> revlog compressor
+        self._decompressors = {}
+
+    @util.propertycache
+    def _compressor(self):
+        return util.compengines[self._compengine].revlogcompressor()
 
     def tip(self):
         return self.node(len(self.index) - 2)
@@ -380,22 +418,29 @@
                     return r
             raise LookupError(node, self.indexfile, _('no node'))
 
-    def node(self, rev):
-        return self.index[rev][7]
-    def linkrev(self, rev):
-        return self.index[rev][4]
-    def parents(self, node):
-        i = self.index
-        d = i[self.rev(node)]
-        return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
-    def parentrevs(self, rev):
-        return self.index[rev][5:7]
+    # Accessors for index entries.
+
+    # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
+    # are flags.
     def start(self, rev):
         return int(self.index[rev][0] >> 16)
-    def end(self, rev):
-        return self.start(rev) + self.length(rev)
+
+    def flags(self, rev):
+        return self.index[rev][0] & 0xFFFF
+
     def length(self, rev):
         return self.index[rev][1]
+
+    def rawsize(self, rev):
+        """return the length of the uncompressed text for a given revision"""
+        l = self.index[rev][2]
+        if l >= 0:
+            return l
+
+        t = self.revision(self.node(rev))
+        return len(t)
+    size = rawsize
+
     def chainbase(self, rev):
         base = self._chainbasecache.get(rev)
         if base is not None:
@@ -409,6 +454,26 @@
 
         self._chainbasecache[rev] = base
         return base
+
+    def linkrev(self, rev):
+        return self.index[rev][4]
+
+    def parentrevs(self, rev):
+        return self.index[rev][5:7]
+
+    def node(self, rev):
+        return self.index[rev][7]
+
+    # Derived from index values.
+
+    def end(self, rev):
+        return self.start(rev) + self.length(rev)
+
+    def parents(self, node):
+        i = self.index
+        d = i[self.rev(node)]
+        return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
+
     def chainlen(self, rev):
         return self._chaininfo(rev)[0]
 
@@ -478,18 +543,6 @@
         chain.reverse()
         return chain, stopped
 
-    def flags(self, rev):
-        return self.index[rev][0] & 0xFFFF
-    def rawsize(self, rev):
-        """return the length of the uncompressed text for a given revision"""
-        l = self.index[rev][2]
-        if l >= 0:
-            return l
-
-        t = self.revision(self.node(rev))
-        return len(t)
-    size = rawsize
-
     def ancestors(self, revs, stoprev=0, inclusive=False):
         """Generate the ancestors of 'revs' in reverse topological order.
         Does not generate revs lower than stoprev.
@@ -582,7 +635,7 @@
                         visit.append(p)
         missing = list(missing)
         missing.sort()
-        return has, [self.node(r) for r in missing]
+        return has, [self.node(miss) for miss in missing]
 
     def incrementalmissingrevs(self, common=None):
         """Return an object that can be used to incrementally compute the
@@ -734,10 +787,10 @@
                 # include roots that aren't ancestors.
 
                 # Filter out roots that aren't ancestors of heads
-                roots = [n for n in roots if n in ancestors]
+                roots = [root for root in roots if root in ancestors]
                 # Recompute the lowest revision
                 if roots:
-                    lowestrev = min([self.rev(n) for n in roots])
+                    lowestrev = min([self.rev(root) for root in roots])
                 else:
                     # No more roots?  Return empty list
                     return nonodes
@@ -796,7 +849,7 @@
                     # But, obviously its parents aren't.
                     for p in self.parents(n):
                         heads.pop(p, None)
-        heads = [n for n, flag in heads.iteritems() if flag]
+        heads = [head for head, flag in heads.iteritems() if flag]
         roots = list(roots)
         assert orderedout
         assert roots
@@ -948,9 +1001,9 @@
 
     def _partialmatch(self, id):
         try:
-            n = self.index.partialmatch(id)
-            if n and self.hasnode(n):
-                return n
+            partial = self.index.partialmatch(id)
+            if partial and self.hasnode(partial):
+                return partial
             return None
         except RevlogError:
             # parsers.c radix tree lookup gave multiple matches
@@ -1094,8 +1147,17 @@
         Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
         to determine where each revision's data begins and ends.
         """
-        start = self.start(startrev)
-        end = self.end(endrev)
+        # Inlined self.start(startrev) & self.end(endrev) for perf reasons
+        # (functions are expensive).
+        index = self.index
+        istart = index[startrev]
+        start = int(istart[0] >> 16)
+        if startrev == endrev:
+            end = start + istart[1]
+        else:
+            iend = index[endrev]
+            end = int(iend[0] >> 16) + iend[1]
+
         if self._inline:
             start += (startrev + 1) * self._io.size
             end += (endrev + 1) * self._io.size
@@ -1112,7 +1174,7 @@
 
         Returns a str holding uncompressed data for the requested revision.
         """
-        return decompress(self._chunkraw(rev, rev, df=df)[1])
+        return self.decompress(self._chunkraw(rev, rev, df=df)[1])
 
     def _chunks(self, revs, df=None):
         """Obtain decompressed chunks for the specified revisions.
@@ -1145,12 +1207,13 @@
             # 2G on Windows
             return [self._chunk(rev, df=df) for rev in revs]
 
+        decomp = self.decompress
         for rev in revs:
             chunkstart = start(rev)
             if inline:
                 chunkstart += (rev + 1) * iosize
             chunklength = length(rev)
-            ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
+            ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
 
         return l
 
@@ -1176,12 +1239,14 @@
         return mdiff.textdiff(self.revision(rev1),
                               self.revision(rev2))
 
-    def revision(self, nodeorrev, _df=None):
+    def revision(self, nodeorrev, _df=None, raw=False):
         """return an uncompressed revision of a given node or revision
         number.
 
-        _df is an existing file handle to read from. It is meant to only be
-        used internally.
+        _df - an existing file handle to read from. (internal-only)
+        raw - an optional argument specifying if the revision data is to be
+        treated as raw data when applying flag transforms. 'raw' should be set
+        to True when generating changegroups or in debug commands.
         """
         if isinstance(nodeorrev, int):
             rev = nodeorrev
@@ -1203,11 +1268,6 @@
         if rev is None:
             rev = self.rev(node)
 
-        # check rev flags
-        if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
-            raise RevlogError(_('incompatible revision flag %x') %
-                              (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
-
         chain, stopped = self._deltachain(rev, stoprev=cachedrev)
         if stopped:
             text = self._cache[2]
@@ -1222,7 +1282,10 @@
 
         text = mdiff.patches(text, bins)
 
-        text = self._checkhash(text, node, rev)
+        text, validatehash = self._processflags(text, self.flags(rev), 'read',
+                                                raw=raw)
+        if validatehash:
+            self.checkhash(text, node, rev=rev)
 
         self._cache = (node, rev, text)
         return text
@@ -1235,12 +1298,73 @@
         """
         return hash(text, p1, p2)
 
-    def _checkhash(self, text, node, rev):
-        p1, p2 = self.parents(node)
-        self.checkhash(text, p1, p2, node, rev)
-        return text
+    def _processflags(self, text, flags, operation, raw=False):
+        """Inspect revision data flags and applies transforms defined by
+        registered flag processors.
+
+        ``text`` - the revision data to process
+        ``flags`` - the revision flags
+        ``operation`` - the operation being performed (read or write)
+        ``raw`` - an optional argument describing if the raw transform should be
+        applied.
+
+        This method processes the flags in the order (or reverse order if
+        ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
+        flag processors registered for present flags. The order of flags defined
+        in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
+
+        Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
+        processed text and ``validatehash`` is a bool indicating whether the
+        returned text should be checked for hash integrity.
 
-    def checkhash(self, text, p1, p2, node, rev=None):
+        Note: If the ``raw`` argument is set, it has precedence over the
+        operation and will only update the value of ``validatehash``.
+        """
+        if not operation in ('read', 'write'):
+            raise ProgrammingError(_("invalid '%s' operation ") % (operation))
+        # Check all flags are known.
+        if flags & ~REVIDX_KNOWN_FLAGS:
+            raise RevlogError(_("incompatible revision flag '%#x'") %
+                              (flags & ~REVIDX_KNOWN_FLAGS))
+        validatehash = True
+        # Depending on the operation (read or write), the order might be
+        # reversed due to non-commutative transforms.
+        orderedflags = REVIDX_FLAGS_ORDER
+        if operation == 'write':
+            orderedflags = reversed(orderedflags)
+
+        for flag in orderedflags:
+            # If a flagprocessor has been registered for a known flag, apply the
+            # related operation transform and update result tuple.
+            if flag & flags:
+                vhash = True
+
+                if flag not in _flagprocessors:
+                    message = _("missing processor for flag '%#x'") % (flag)
+                    raise RevlogError(message)
+
+                processor = _flagprocessors[flag]
+                if processor is not None:
+                    readtransform, writetransform, rawtransform = processor
+
+                    if raw:
+                        vhash = rawtransform(self, text)
+                    elif operation == 'read':
+                        text, vhash = readtransform(self, text)
+                    else: # write operation
+                        text, vhash = writetransform(self, text)
+                validatehash = validatehash and vhash
+
+        return text, validatehash
+
+    def checkhash(self, text, node, p1=None, p2=None, rev=None):
+        """Check node hash integrity.
+
+        Available as a function so that subclasses can extend hash mismatch
+        behaviors as needed.
+        """
+        if p1 is None and p2 is None:
+            p1, p2 = self.parents(node)
         if node != self.hash(text, p1, p2):
             revornode = rev
             if revornode is None:
@@ -1300,7 +1424,7 @@
         self._chunkclear()
 
     def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
-                    node=None):
+                    node=None, flags=REVIDX_DEFAULT_FLAGS):
         """add a revision to the log
 
         text - the revision data to add
@@ -1311,11 +1435,23 @@
         node - nodeid of revision; typically node is not specified, and it is
             computed by default as hash(text, p1, p2), however subclasses might
             use different hashing method (and override checkhash() in such case)
+        flags - the known flags to set on the revision
         """
         if link == nullrev:
             raise RevlogError(_("attempted to add linkrev -1 to %s")
                               % self.indexfile)
 
+        if flags:
+            node = node or self.hash(text, p1, p2)
+
+        newtext, validatehash = self._processflags(text, flags, 'write')
+
+        # If the flag processor modifies the revision data, ignore any provided
+        # cachedelta.
+        if newtext != text:
+            cachedelta = None
+        text = newtext
+
         if len(text) > _maxentrysize:
             raise RevlogError(
                 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
@@ -1325,46 +1461,90 @@
         if node in self.nodemap:
             return node
 
+        if validatehash:
+            self.checkhash(text, node, p1=p1, p2=p2)
+
         dfh = None
         if not self._inline:
             dfh = self.opener(self.datafile, "a+")
         ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
         try:
             return self._addrevision(node, text, transaction, link, p1, p2,
-                                     REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
+                                     flags, cachedelta, ifh, dfh)
         finally:
             if dfh:
                 dfh.close()
             ifh.close()
 
-    def compress(self, text):
-        """ generate a possibly-compressed representation of text """
-        if not text:
-            return ("", text)
-        l = len(text)
-        bin = None
-        if l < 44:
-            pass
-        elif l > 1000000:
-            # zlib makes an internal copy, thus doubling memory usage for
-            # large files, so lets do this in pieces
-            z = zlib.compressobj()
-            p = []
-            pos = 0
-            while pos < l:
-                pos2 = pos + 2**20
-                p.append(z.compress(text[pos:pos2]))
-                pos = pos2
-            p.append(z.flush())
-            if sum(map(len, p)) < l:
-                bin = "".join(p)
-        else:
-            bin = _compress(text)
-        if bin is None or len(bin) > l:
-            if text[0] == '\0':
-                return ("", text)
-            return ('u', text)
-        return ("", bin)
+    def compress(self, data):
+        """Generate a possibly-compressed representation of data."""
+        if not data:
+            return '', data
+
+        compressed = self._compressor.compress(data)
+
+        if compressed:
+            # The revlog compressor added the header in the returned data.
+            return '', compressed
+
+        if data[0] == '\0':
+            return '', data
+        return 'u', data
+
+    def decompress(self, data):
+        """Decompress a revlog chunk.
+
+        The chunk is expected to begin with a header identifying the
+        format type so it can be routed to an appropriate decompressor.
+        """
+        if not data:
+            return data
+
+        # Revlogs are read much more frequently than they are written and many
+        # chunks only take microseconds to decompress, so performance is
+        # important here.
+        #
+        # We can make a few assumptions about revlogs:
+        #
+        # 1) the majority of chunks will be compressed (as opposed to inline
+        #    raw data).
+        # 2) decompressing *any* data will likely by at least 10x slower than
+        #    returning raw inline data.
+        # 3) we want to prioritize common and officially supported compression
+        #    engines
+        #
+        # It follows that we want to optimize for "decompress compressed data
+        # when encoded with common and officially supported compression engines"
+        # case over "raw data" and "data encoded by less common or non-official
+        # compression engines." That is why we have the inline lookup first
+        # followed by the compengines lookup.
+        #
+        # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
+        # compressed chunks. And this matters for changelog and manifest reads.
+        t = data[0]
+
+        if t == 'x':
+            try:
+                return _zlibdecompress(data)
+            except zlib.error as e:
+                raise RevlogError(_('revlog decompress error: %s') % str(e))
+        # '\0' is more common than 'u' so it goes first.
+        elif t == '\0':
+            return data
+        elif t == 'u':
+            return util.buffer(data, 1)
+
+        try:
+            compressor = self._decompressors[t]
+        except KeyError:
+            try:
+                engine = util.compengines.forrevlogheader(t)
+                compressor = engine.revlogcompressor()
+                self._decompressors[t] = compressor
+            except KeyError:
+                raise RevlogError(_('unknown compression type %r') % t)
+
+        return compressor.decompress(data)
 
     def _isgooddelta(self, d, textlen):
         """Returns True if the given delta is good. Good means that it is within
@@ -1386,13 +1566,16 @@
         return True
 
     def _addrevision(self, node, text, transaction, link, p1, p2, flags,
-                     cachedelta, ifh, dfh, alwayscache=False):
+                     cachedelta, ifh, dfh, alwayscache=False, raw=False):
         """internal function to add revisions to the log
 
         see addrevision for argument descriptions.
         invariants:
         - text is optional (can be None); if not set, cachedelta must be set.
           if both are set, they must correspond to each other.
+        - raw is optional; if set to True, it indicates the revision data is to
+          be treated by _processflags() as raw. It is usually set by changegroup
+          generation and debug commands.
         """
         btext = [text]
         def buildtext():
@@ -1412,10 +1595,14 @@
                     fh = ifh
                 else:
                     fh = dfh
-                basetext = self.revision(self.node(baserev), _df=fh)
+                basetext = self.revision(self.node(baserev), _df=fh, raw=raw)
                 btext[0] = mdiff.patch(basetext, delta)
+
             try:
-                self.checkhash(btext[0], p1, p2, node)
+                res = self._processflags(btext[0], flags, 'read', raw=raw)
+                btext[0], validatehash = res
+                if validatehash:
+                    self.checkhash(btext[0], node, p1=p1, p2=p2)
                 if flags & REVIDX_ISCENSORED:
                     raise RevlogError(_('node %s is not censored') % node)
             except CensoredNodeError:
@@ -1642,10 +1829,14 @@
                 # the added revision, which will require a call to
                 # revision(). revision() will fast path if there is a cache
                 # hit. So, we tell _addrevision() to always cache in this case.
+                # We're only using addgroup() in the context of changegroup
+                # generation so the revision data can always be handled as raw
+                # by the flagprocessor.
                 chain = self._addrevision(node, None, transaction, link,
                                           p1, p2, flags, (baserev, delta),
                                           ifh, dfh,
-                                          alwayscache=bool(addrevisioncb))
+                                          alwayscache=bool(addrevisioncb),
+                                          raw=True)
 
                 if addrevisioncb:
                     addrevisioncb(self, chain)
@@ -1794,3 +1985,117 @@
         if not self._inline:
             res.append(self.datafile)
         return res
+
+    DELTAREUSEALWAYS = 'always'
+    DELTAREUSESAMEREVS = 'samerevs'
+    DELTAREUSENEVER = 'never'
+
+    DELTAREUSEALL = set(['always', 'samerevs', 'never'])
+
+    def clone(self, tr, destrevlog, addrevisioncb=None,
+              deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
+        """Copy this revlog to another, possibly with format changes.
+
+        The destination revlog will contain the same revisions and nodes.
+        However, it may not be bit-for-bit identical due to e.g. delta encoding
+        differences.
+
+        The ``deltareuse`` argument control how deltas from the existing revlog
+        are preserved in the destination revlog. The argument can have the
+        following values:
+
+        DELTAREUSEALWAYS
+           Deltas will always be reused (if possible), even if the destination
+           revlog would not select the same revisions for the delta. This is the
+           fastest mode of operation.
+        DELTAREUSESAMEREVS
+           Deltas will be reused if the destination revlog would pick the same
+           revisions for the delta. This mode strikes a balance between speed
+           and optimization.
+        DELTAREUSENEVER
+           Deltas will never be reused. This is the slowest mode of execution.
+           This mode can be used to recompute deltas (e.g. if the diff/delta
+           algorithm changes).
+
+        Delta computation can be slow, so the choice of delta reuse policy can
+        significantly affect run time.
+
+        The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
+        two extremes. Deltas will be reused if they are appropriate. But if the
+        delta could choose a better revision, it will do so. This means if you
+        are converting a non-generaldelta revlog to a generaldelta revlog,
+        deltas will be recomputed if the delta's parent isn't a parent of the
+        revision.
+
+        In addition to the delta policy, the ``aggressivemergedeltas`` argument
+        controls whether to compute deltas against both parents for merges.
+        By default, the current default is used.
+        """
+        if deltareuse not in self.DELTAREUSEALL:
+            raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
+
+        if len(destrevlog):
+            raise ValueError(_('destination revlog is not empty'))
+
+        if getattr(self, 'filteredrevs', None):
+            raise ValueError(_('source revlog has filtered revisions'))
+        if getattr(destrevlog, 'filteredrevs', None):
+            raise ValueError(_('destination revlog has filtered revisions'))
+
+        # lazydeltabase controls whether to reuse a cached delta, if possible.
+        oldlazydeltabase = destrevlog._lazydeltabase
+        oldamd = destrevlog._aggressivemergedeltas
+
+        try:
+            if deltareuse == self.DELTAREUSEALWAYS:
+                destrevlog._lazydeltabase = True
+            elif deltareuse == self.DELTAREUSESAMEREVS:
+                destrevlog._lazydeltabase = False
+
+            destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
+
+            populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
+                                                self.DELTAREUSESAMEREVS)
+
+            index = self.index
+            for rev in self:
+                entry = index[rev]
+
+                # Some classes override linkrev to take filtered revs into
+                # account. Use raw entry from index.
+                flags = entry[0] & 0xffff
+                linkrev = entry[4]
+                p1 = index[entry[5]][7]
+                p2 = index[entry[6]][7]
+                node = entry[7]
+
+                # (Possibly) reuse the delta from the revlog if allowed and
+                # the revlog chunk is a delta.
+                cachedelta = None
+                text = None
+                if populatecachedelta:
+                    dp = self.deltaparent(rev)
+                    if dp != nullrev:
+                        cachedelta = (dp, str(self._chunk(rev)))
+
+                if not cachedelta:
+                    text = self.revision(rev)
+
+                ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
+                                        checkambig=False)
+                dfh = None
+                if not destrevlog._inline:
+                    dfh = destrevlog.opener(destrevlog.datafile, 'a+')
+                try:
+                    destrevlog._addrevision(node, text, tr, linkrev, p1, p2,
+                                            flags, cachedelta, ifh, dfh)
+                finally:
+                    if dfh:
+                        dfh.close()
+                    ifh.close()
+
+                if addrevisioncb:
+                    addrevisioncb(self, rev, node)
+        finally:
+            destrevlog._lazydeltabase = oldlazydeltabase
+            destrevlog._aggressivemergedeltas = oldamd
--- a/mercurial/revset.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/revset.py	Wed Jan 18 11:43:36 2017 -0500
@@ -302,6 +302,8 @@
 
 # helpers
 
+_notset = object()
+
 def getsymbol(x):
     if x and x[0] == 'symbol':
         return x[1]
@@ -312,6 +314,14 @@
         return x[1]
     raise error.ParseError(err)
 
+def getinteger(x, err, default=_notset):
+    if not x and default is not _notset:
+        return default
+    try:
+        return int(getstring(x, err))
+    except ValueError:
+        raise error.ParseError(err)
+
 def getlist(x):
     if not x:
         return []
@@ -319,6 +329,20 @@
         return list(x[1:])
     return [x]
 
+def getrange(x, err):
+    if not x:
+        raise error.ParseError(err)
+    op = x[0]
+    if op == 'range':
+        return x[1], x[2]
+    elif op == 'rangepre':
+        return None, x[1]
+    elif op == 'rangepost':
+        return x[1], None
+    elif op == 'rangeall':
+        return None, None
+    raise error.ParseError(err)
+
 def getargs(x, min, max, err):
     l = getlist(x)
     if len(l) < min or (max >= 0 and len(l) > max):
@@ -326,7 +350,7 @@
     return l
 
 def getargsdict(x, funcname, keys):
-    return parser.buildargsdict(getlist(x), funcname, keys.split(),
+    return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
                                 keyvaluenode='keyvalue', keynode='symbol')
 
 def getset(repo, subset, x):
@@ -370,6 +394,10 @@
         return baseset()
     return _makerangeset(repo, subset, m.first(), n.last(), order)
 
+def rangeall(repo, subset, x, order):
+    assert x is None
+    return _makerangeset(repo, subset, 0, len(repo) - 1, order)
+
 def rangepre(repo, subset, y, order):
     # ':y' can't be rewritten to '0:y' since '0' may be hidden
     n = getset(repo, fullreposet(repo), y)
@@ -377,6 +405,12 @@
         return baseset()
     return _makerangeset(repo, subset, 0, n.last(), order)
 
+def rangepost(repo, subset, x, order):
+    m = getset(repo, fullreposet(repo), x)
+    if not m:
+        return baseset()
+    return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
+
 def _makerangeset(repo, subset, m, n, order):
     if m == n:
         r = baseset([m])
@@ -437,10 +471,10 @@
 def func(repo, subset, a, b, order):
     f = getsymbol(a)
     if f in symbols:
-        fn = symbols[f]
-        if getattr(fn, '_takeorder', False):
-            return fn(repo, subset, b, order)
-        return fn(repo, subset, b)
+        func = symbols[f]
+        if getattr(func, '_takeorder', False):
+            return func(repo, subset, b, order)
+        return func(repo, subset, b)
 
     keep = lambda fn: getattr(fn, '__doc__', None) is not None
 
@@ -539,10 +573,7 @@
     Changesets that are the Nth ancestor (first parents only) of a changeset
     in set.
     """
-    try:
-        n = int(n[1])
-    except (TypeError, ValueError):
-        raise error.ParseError(_("~ expects a number"))
+    n = getinteger(n, _("~ expects a number"))
     ps = set()
     cl = repo.changelog
     for r in getset(repo, fullreposet(repo), x):
@@ -556,9 +587,9 @@
     """Alias for ``user(string)``.
     """
     # i18n: "author" is a keyword
-    n = encoding.lower(getstring(x, _("author requires a string")))
-    kind, pattern, matcher = _substringmatcher(n)
-    return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
+    n = getstring(x, _("author requires a string"))
+    kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
+    return subset.filter(lambda x: matcher(repo[x].user()),
                          condrepr=('<user %r>', n))
 
 @predicate('bisect(string)', safe=True)
@@ -588,9 +619,7 @@
 def bookmark(repo, subset, x):
     """The named bookmark or all bookmarks.
 
-    If `name` starts with `re:`, the remainder of the name is treated as
-    a regular expression. To match a bookmark that actually starts with `re:`,
-    use the prefix `literal:`.
+    Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
     """
     # i18n: "bookmark" is a keyword
     args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
@@ -628,9 +657,8 @@
     All changesets belonging to the given branch or the branches of the given
     changesets.
 
-    If `string` starts with `re:`, the remainder of the name is treated as
-    a regular expression. To match a branch that actually starts with `re:`,
-    use the prefix `literal:`.
+    Pattern matching is supported for `string`. See
+    :hg:`help revisions.patterns`.
     """
     getbi = repo.revbranchcache().branchinfo
 
@@ -723,12 +751,15 @@
     cs = set()
     pr = repo.changelog.parentrevs
     minrev = parentset.min()
+    nullrev = node.nullrev
     for r in subset:
         if r <= minrev:
             continue
-        for p in pr(r):
-            if p in parentset:
-                cs.add(r)
+        p1, p2 = pr(r)
+        if p1 in parentset:
+            cs.add(r)
+        if p2 != nullrev and p2 in parentset:
+            cs.add(r)
     return baseset(cs)
 
 @predicate('children(set)', safe=True)
@@ -811,15 +842,17 @@
 @predicate('desc(string)', safe=True)
 def desc(repo, subset, x):
     """Search commit message for string. The match is case-insensitive.
+
+    Pattern matching is supported for `string`. See
+    :hg:`help revisions.patterns`.
     """
     # i18n: "desc" is a keyword
-    ds = encoding.lower(getstring(x, _("desc requires a string")))
-
-    def matches(x):
-        c = repo[x]
-        return ds in encoding.lower(c.description())
-
-    return subset.filter(matches, condrepr=('<desc %r>', ds))
+    ds = getstring(x, _("desc requires a string"))
+
+    kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
+
+    return subset.filter(lambda r: matcher(repo[r].description()),
+                         condrepr=('<desc %r>', ds))
 
 def _descendants(repo, subset, x, followfirst=False):
     roots = getset(repo, fullreposet(repo), x)
@@ -921,9 +954,8 @@
     """Changesets with the given label in the extra metadata, with the given
     optional value.
 
-    If `value` starts with `re:`, the remainder of the value is treated as
-    a regular expression. To match a value that actually starts with `re:`,
-    use the prefix `literal:`.
+    Pattern matching is supported for `value`. See
+    :hg:`help revisions.patterns`.
     """
     args = getargsdict(x, 'extra', 'label value')
     if 'label' not in args:
@@ -1065,6 +1097,51 @@
     # of every revisions or files revisions.
     return _follow(repo, subset, x, '_followfirst', followfirst=True)
 
+@predicate('followlines(file, fromline:toline[, startrev=.])', safe=True)
+def followlines(repo, subset, x):
+    """Changesets modifying `file` in line range ('fromline', 'toline').
+
+    Line range corresponds to 'file' content at 'startrev' and should hence be
+    consistent with file size. If startrev is not specified, working directory's
+    parent is used.
+    """
+    from . import context  # avoid circular import issues
+
+    args = getargsdict(x, 'followlines', 'file *lines startrev')
+    if len(args['lines']) != 1:
+        raise error.ParseError(_("followlines requires a line range"))
+
+    rev = '.'
+    if 'startrev' in args:
+        revs = getset(repo, fullreposet(repo), args['startrev'])
+        if len(revs) != 1:
+            raise error.ParseError(
+                _("followlines expects exactly one revision"))
+        rev = revs.last()
+
+    pat = getstring(args['file'], _("followlines requires a pattern"))
+    if not matchmod.patkind(pat):
+        fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
+    else:
+        m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
+        files = [f for f in repo[rev] if m(f)]
+        if len(files) != 1:
+            raise error.ParseError(_("followlines expects exactly one file"))
+        fname = files[0]
+
+    lr = getrange(args['lines'][0], _("followlines expects a line range"))
+    fromline, toline = [getinteger(a, _("line range bounds must be integers"))
+                        for a in lr]
+    if toline - fromline < 0:
+        raise error.ParseError(_("line range must be positive"))
+    if fromline < 1:
+        raise error.ParseError(_("fromline must be strictly positive"))
+    fromline -= 1
+
+    fctx = repo[rev].filectx(fname)
+    revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
+    return subset & generatorset(revs, iterasc=False)
+
 @predicate('all()', safe=True)
 def getall(repo, subset, x):
     """All changesets, the same as ``0:tip``.
@@ -1204,6 +1281,9 @@
 def keyword(repo, subset, x):
     """Search commit message, user name, and names of changed files for
     string. The match is case-insensitive.
+
+    For a regular expression or case sensitive search of these fields, use
+    ``grep(regex)``.
     """
     # i18n: "keyword" is a keyword
     kw = encoding.lower(getstring(x, _("keyword requires a string")))
@@ -1223,19 +1303,12 @@
     if 'set' not in args:
         # i18n: "limit" is a keyword
         raise error.ParseError(_("limit requires one to three arguments"))
-    try:
-        lim, ofs = 1, 0
-        if 'n' in args:
-            # i18n: "limit" is a keyword
-            lim = int(getstring(args['n'], _("limit requires a number")))
-        if 'offset' in args:
-            # i18n: "limit" is a keyword
-            ofs = int(getstring(args['offset'], _("limit requires a number")))
-        if ofs < 0:
-            raise error.ParseError(_("negative offset"))
-    except (TypeError, ValueError):
-        # i18n: "limit" is a keyword
-        raise error.ParseError(_("limit expects a number"))
+    # i18n: "limit" is a keyword
+    lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
+    # i18n: "limit" is a keyword
+    ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
+    if ofs < 0:
+        raise error.ParseError(_("negative offset"))
     os = getset(repo, fullreposet(repo), args['set'])
     result = []
     it = iter(os)
@@ -1258,14 +1331,10 @@
     """
     # i18n: "last" is a keyword
     l = getargs(x, 1, 2, _("last requires one or two arguments"))
-    try:
-        lim = 1
-        if len(l) == 2:
-            # i18n: "last" is a keyword
-            lim = int(getstring(l[1], _("last requires a number")))
-    except (TypeError, ValueError):
+    lim = 1
+    if len(l) == 2:
         # i18n: "last" is a keyword
-        raise error.ParseError(_("last expects a number"))
+        lim = getinteger(l[1], _("last expects a number"))
     os = getset(repo, fullreposet(repo), l[0])
     os.reverse()
     result = []
@@ -1354,9 +1423,8 @@
 def named(repo, subset, x):
     """The changesets in a given namespace.
 
-    If `namespace` starts with `re:`, the remainder of the string is treated as
-    a regular expression. To match a namespace that actually starts with `re:`,
-    use the prefix `literal:`.
+    Pattern matching is supported for `namespace`. See
+    :hg:`help revisions.patterns`.
     """
     # i18n: "named" is a keyword
     args = getargs(x, 1, 1, _('named requires a namespace argument'))
@@ -1443,7 +1511,7 @@
 
     results = set(cl.findmissingrevs(common=exclude, heads=include))
     # XXX we should turn this into a baseset instead of a set, smartset may do
-    # some optimisations from the fact this is a baseset.
+    # some optimizations from the fact this is a baseset.
     return subset & results
 
 @predicate('origin([set])', safe=True)
@@ -1475,7 +1543,7 @@
     o = set([_firstsrc(r) for r in dests])
     o -= set([None])
     # XXX we should turn this into a baseset instead of a set, smartset may do
-    # some optimisations from the fact this is a baseset.
+    # some optimizations from the fact this is a baseset.
     return subset & o
 
 @predicate('outgoing([path])', safe=True)
@@ -1521,7 +1589,7 @@
         ps.add(cl.parentrevs(r)[0])
     ps -= set([node.nullrev])
     # XXX we should turn this into a baseset instead of a set, smartset may do
-    # some optimisations from the fact this is a baseset.
+    # some optimizations from the fact this is a baseset.
     return subset & ps
 
 @predicate('p2([set])', safe=True)
@@ -1544,7 +1612,7 @@
         ps.add(cl.parentrevs(r)[1])
     ps -= set([node.nullrev])
     # XXX we should turn this into a baseset instead of a set, smartset may do
-    # some optimisations from the fact this is a baseset.
+    # some optimizations from the fact this is a baseset.
     return subset & ps
 
 def parentpost(repo, subset, x, order):
@@ -2197,19 +2265,23 @@
 
     return subset.filter(matches, condrepr=('<subrepo %r>', pat))
 
-def _substringmatcher(pattern):
-    kind, pattern, matcher = util.stringmatcher(pattern)
+def _substringmatcher(pattern, casesensitive=True):
+    kind, pattern, matcher = util.stringmatcher(pattern,
+                                                casesensitive=casesensitive)
     if kind == 'literal':
-        matcher = lambda s: pattern in s
+        if not casesensitive:
+            pattern = encoding.lower(pattern)
+            matcher = lambda s: pattern in encoding.lower(s)
+        else:
+            matcher = lambda s: pattern in s
     return kind, pattern, matcher
 
 @predicate('tag([name])', safe=True)
 def tag(repo, subset, x):
     """The specified tag by name, or all tagged revisions if no name is given.
 
-    If `name` starts with `re:`, the remainder of the name is treated as
-    a regular expression. To match a tag that actually starts with `re:`,
-    use the prefix `literal:`.
+    Pattern matching is supported for `name`. See
+    :hg:`help revisions.patterns`.
     """
     # i18n: "tag" is a keyword
     args = getargs(x, 0, 1, _("tag takes one or no arguments"))
@@ -2250,15 +2322,14 @@
 def user(repo, subset, x):
     """User name contains string. The match is case-insensitive.
 
-    If `string` starts with `re:`, the remainder of the string is treated as
-    a regular expression. To match a user that actually contains `re:`, use
-    the prefix `literal:`.
+    Pattern matching is supported for `string`. See
+    :hg:`help revisions.patterns`.
     """
     return author(repo, subset, x)
 
-# experimental
 @predicate('wdir', safe=True)
 def wdir(repo, subset, x):
+    """Working directory. (EXPERIMENTAL)"""
     # i18n: "wdir" is a keyword
     getargs(x, 0, 0, _("wdir takes no arguments"))
     if node.wdirrev in subset or isinstance(subset, fullreposet):
@@ -2339,7 +2410,9 @@
 
 methods = {
     "range": rangeset,
+    "rangeall": rangeall,
     "rangepre": rangepre,
+    "rangepost": rangepost,
     "dagrange": dagrange,
     "string": stringset,
     "symbol": stringset,
@@ -2454,10 +2527,6 @@
         return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
     elif op == 'dagrangepost':
         return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
-    elif op == 'rangeall':
-        return _analyze(('rangepre', ('string', 'tip')), order)
-    elif op == 'rangepost':
-        return _analyze(('range', x[1], ('string', 'tip')), order)
     elif op == 'negate':
         s = getstring(x[1], _("can't negate that"))
         return _analyze(('string', '-' + s), order)
@@ -2471,7 +2540,9 @@
         return (op, _analyze(x[1], order), order)
     elif op == 'not':
         return (op, _analyze(x[1], anyorder), order)
-    elif op in ('rangepre', 'parentpost'):
+    elif op == 'rangeall':
+        return (op, None, order)
+    elif op in ('rangepre', 'rangepost', 'parentpost'):
         return (op, _analyze(x[1], defineorder), order)
     elif op == 'group':
         return _analyze(x[1], order)
@@ -2576,7 +2647,9 @@
             o = _optimize(x[1], not small)
             order = x[2]
             return o[0], (op, o[1], order)
-    elif op in ('rangepre', 'parentpost'):
+    elif op == 'rangeall':
+        return smallbonus, x
+    elif op in ('rangepre', 'rangepost', 'parentpost'):
         o = _optimize(x[1], small)
         order = x[2]
         return o[0], (op, o[1], order)
@@ -2603,7 +2676,7 @@
             w = 100 # very slow
         elif f == "ancestor":
             w = 1 * smallbonus
-        elif f in ('reverse', 'limit', 'first', '_intlist'):
+        elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
             w = 0
         elif f == "sort":
             w = 10 # assume most sorts look at changelog
@@ -3198,7 +3271,7 @@
     def __len__(self):
         # Basic implementation to be changed in future patches.
         # until this gets improved, we use generator expression
-        # here, since list compr is free to call __len__ again
+        # here, since list comprehensions are free to call __len__ again
         # causing infinite recursion
         l = baseset(r for r in self)
         return len(l)
@@ -3811,17 +3884,6 @@
             # object.
             other = baseset(other - self._hiddenrevs)
 
-        # XXX As fullreposet is also used as bootstrap, this is wrong.
-        #
-        # With a giveme312() revset returning [3,1,2], this makes
-        #   'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
-        # We cannot just drop it because other usage still need to sort it:
-        #   'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
-        #
-        # There is also some faulty revset implementations that rely on it
-        # (eg: children as of its state in e8075329c5fb)
-        #
-        # When we fix the two points above we can move this into the if clause
         other.sort(reverse=self.isdescending())
         return other
 
--- a/mercurial/scmposix.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/scmposix.py	Wed Jan 18 11:43:36 2017 -0500
@@ -1,10 +1,15 @@
 from __future__ import absolute_import
 
+import array
+import errno
+import fcntl
 import os
 import sys
 
 from . import (
+    encoding,
     osutil,
+    pycompat,
 )
 
 def _rcfiles(path):
@@ -20,20 +25,48 @@
 
 def systemrcpath():
     path = []
-    if sys.platform == 'plan9':
+    if pycompat.sysplatform == 'plan9':
         root = 'lib/mercurial'
     else:
         root = 'etc/mercurial'
     # old mod_python does not set sys.argv
     if len(getattr(sys, 'argv', [])) > 0:
-        p = os.path.dirname(os.path.dirname(sys.argv[0]))
+        p = os.path.dirname(os.path.dirname(pycompat.sysargv[0]))
         if p != '/':
             path.extend(_rcfiles(os.path.join(p, root)))
     path.extend(_rcfiles('/' + root))
     return path
 
 def userrcpath():
-    if sys.platform == 'plan9':
-        return [os.environ['home'] + '/lib/hgrc']
+    if pycompat.sysplatform == 'plan9':
+        return [encoding.environ['home'] + '/lib/hgrc']
     else:
         return [os.path.expanduser('~/.hgrc')]
+
+def termsize(ui):
+    try:
+        import termios
+        TIOCGWINSZ = termios.TIOCGWINSZ  # unavailable on IRIX (issue3449)
+    except (AttributeError, ImportError):
+        return 80, 24
+
+    for dev in (ui.ferr, ui.fout, ui.fin):
+        try:
+            try:
+                fd = dev.fileno()
+            except AttributeError:
+                continue
+            if not os.isatty(fd):
+                continue
+            arri = fcntl.ioctl(fd, TIOCGWINSZ, '\0' * 8)
+            height, width = array.array('h', arri)[:2]
+            if width > 0 and height > 0:
+                return width, height
+        except ValueError:
+            pass
+        except IOError as e:
+            if e[0] == errno.EINVAL:
+                pass
+            else:
+                raise
+    return 80, 24
--- a/mercurial/scmutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/scmutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -14,6 +14,7 @@
 import os
 import re
 import shutil
+import socket
 import stat
 import tempfile
 import threading
@@ -27,18 +28,20 @@
     osutil,
     pathutil,
     phases,
+    pycompat,
     revset,
     similar,
     util,
 )
 
-if os.name == 'nt':
+if pycompat.osname == 'nt':
     from . import scmwindows as scmplatform
 else:
     from . import scmposix as scmplatform
 
 systemrcpath = scmplatform.systemrcpath
 userrcpath = scmplatform.userrcpath
+termsize = scmplatform.termsize
 
 class status(tuple):
     '''Named tuple with a list of files per status. The 'deleted', 'unknown'
@@ -139,6 +142,108 @@
     else:
         ui.status(_("no changes found\n"))
 
+def callcatch(ui, func):
+    """call func() with global exception handling
+
+    return func() if no exception happens. otherwise do some error handling
+    and return an exit code accordingly. does not handle all exceptions.
+    """
+    try:
+        return func()
+    # Global exception handling, alphabetically
+    # Mercurial-specific first, followed by built-in and library exceptions
+    except error.LockHeld as inst:
+        if inst.errno == errno.ETIMEDOUT:
+            reason = _('timed out waiting for lock held by %s') % inst.locker
+        else:
+            reason = _('lock held by %s') % inst.locker
+        ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
+    except error.LockUnavailable as inst:
+        ui.warn(_("abort: could not lock %s: %s\n") %
+               (inst.desc or inst.filename, inst.strerror))
+    except error.OutOfBandError as inst:
+        if inst.args:
+            msg = _("abort: remote error:\n")
+        else:
+            msg = _("abort: remote error\n")
+        ui.warn(msg)
+        if inst.args:
+            ui.warn(''.join(inst.args))
+        if inst.hint:
+            ui.warn('(%s)\n' % inst.hint)
+    except error.RepoError as inst:
+        ui.warn(_("abort: %s!\n") % inst)
+        if inst.hint:
+            ui.warn(_("(%s)\n") % inst.hint)
+    except error.ResponseError as inst:
+        ui.warn(_("abort: %s") % inst.args[0])
+        if not isinstance(inst.args[1], basestring):
+            ui.warn(" %r\n" % (inst.args[1],))
+        elif not inst.args[1]:
+            ui.warn(_(" empty string\n"))
+        else:
+            ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
+    except error.CensoredNodeError as inst:
+        ui.warn(_("abort: file censored %s!\n") % inst)
+    except error.RevlogError as inst:
+        ui.warn(_("abort: %s!\n") % inst)
+    except error.SignalInterrupt:
+        ui.warn(_("killed!\n"))
+    except error.InterventionRequired as inst:
+        ui.warn("%s\n" % inst)
+        if inst.hint:
+            ui.warn(_("(%s)\n") % inst.hint)
+        return 1
+    except error.Abort as inst:
+        ui.warn(_("abort: %s\n") % inst)
+        if inst.hint:
+            ui.warn(_("(%s)\n") % inst.hint)
+    except ImportError as inst:
+        ui.warn(_("abort: %s!\n") % inst)
+        m = str(inst).split()[-1]
+        if m in "mpatch bdiff".split():
+            ui.warn(_("(did you forget to compile extensions?)\n"))
+        elif m in "zlib".split():
+            ui.warn(_("(is your Python install correct?)\n"))
+    except IOError as inst:
+        if util.safehasattr(inst, "code"):
+            ui.warn(_("abort: %s\n") % inst)
+        elif util.safehasattr(inst, "reason"):
+            try: # usually it is in the form (errno, strerror)
+                reason = inst.reason.args[1]
+            except (AttributeError, IndexError):
+                # it might be anything, for example a string
+                reason = inst.reason
+            if isinstance(reason, unicode):
+                # SSLError of Python 2.7.9 contains a unicode
+                reason = reason.encode(encoding.encoding, 'replace')
+            ui.warn(_("abort: error: %s\n") % reason)
+        elif (util.safehasattr(inst, "args")
+              and inst.args and inst.args[0] == errno.EPIPE):
+            pass
+        elif getattr(inst, "strerror", None):
+            if getattr(inst, "filename", None):
+                ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
+            else:
+                ui.warn(_("abort: %s\n") % inst.strerror)
+        else:
+            raise
+    except OSError as inst:
+        if getattr(inst, "filename", None) is not None:
+            ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
+        else:
+            ui.warn(_("abort: %s\n") % inst.strerror)
+    except MemoryError:
+        ui.warn(_("abort: out of memory\n"))
+    except SystemExit as inst:
+        # Commands shouldn't sys.exit directly, but give a return code.
+        # Just in case catch this and and pass exit code to caller.
+        return inst.code
+    except socket.error as inst:
+        ui.warn(_("abort: %s\n") % inst.args[-1])
+
+    return -1
+
 def checknewlabel(repo, lbl, kind):
     # Do not use the "kind" parameter in ui output.
     # It makes strings difficult to translate.
@@ -176,7 +281,7 @@
     val = ui.config('ui', 'portablefilenames', 'warn')
     lval = val.lower()
     bval = util.parsebool(val)
-    abort = os.name == 'nt' or lval == 'abort'
+    abort = pycompat.osname == 'nt' or lval == 'abort'
     warn = bval or lval == 'warn'
     if bval is None and not (warn or abort or lval == 'ignore'):
         raise error.ConfigError(
@@ -754,7 +859,7 @@
     if _rcpath is None:
         if 'HGRCPATH' in encoding.environ:
             _rcpath = []
-            for p in os.environ['HGRCPATH'].split(os.pathsep):
+            for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
                 if not p:
                     continue
                 p = util.expandpath(p)
@@ -1356,7 +1461,7 @@
 
         # Only Windows/NTFS has slow file closing. So only enable by default
         # on that platform. But allow to be enabled elsewhere for testing.
-        defaultenabled = os.name == 'nt'
+        defaultenabled = pycompat.osname == 'nt'
         enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
 
         if not enabled:
--- a/mercurial/scmwindows.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/scmwindows.py	Wed Jan 18 11:43:36 2017 -0500
@@ -3,8 +3,11 @@
 import os
 
 from . import (
+    encoding,
     osutil,
+    pycompat,
     util,
+    win32,
 )
 
 try:
@@ -32,7 +35,7 @@
     if not isinstance(value, str) or not value:
         return rcpath
     value = util.localpath(value)
-    for p in value.split(os.pathsep):
+    for p in value.split(pycompat.ospathsep):
         if p.lower().endswith('mercurial.ini'):
             rcpath.append(p)
         elif os.path.isdir(p):
@@ -46,8 +49,11 @@
     home = os.path.expanduser('~')
     path = [os.path.join(home, 'mercurial.ini'),
             os.path.join(home, '.hgrc')]
-    userprofile = os.environ.get('USERPROFILE')
+    userprofile = encoding.environ.get('USERPROFILE')
     if userprofile and userprofile != home:
         path.append(os.path.join(userprofile, 'mercurial.ini'))
         path.append(os.path.join(userprofile, '.hgrc'))
     return path
+
+def termsize(ui):
+    return win32.termsize()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/server.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,162 @@
+# server.py - utility and factory of server
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+import os
+import sys
+import tempfile
+
+from .i18n import _
+
+from . import (
+    chgserver,
+    commandserver,
+    error,
+    hgweb,
+    util,
+)
+
+def runservice(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
+               runargs=None, appendpid=False):
+    '''Run a command as a service.'''
+
+    def writepid(pid):
+        if opts['pid_file']:
+            if appendpid:
+                mode = 'a'
+            else:
+                mode = 'w'
+            fp = open(opts['pid_file'], mode)
+            fp.write(str(pid) + '\n')
+            fp.close()
+
+    if opts['daemon'] and not opts['daemon_postexec']:
+        # Signal child process startup with file removal
+        lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
+        os.close(lockfd)
+        try:
+            if not runargs:
+                runargs = util.hgcmd() + sys.argv[1:]
+            runargs.append('--daemon-postexec=unlink:%s' % lockpath)
+            # Don't pass --cwd to the child process, because we've already
+            # changed directory.
+            for i in xrange(1, len(runargs)):
+                if runargs[i].startswith('--cwd='):
+                    del runargs[i]
+                    break
+                elif runargs[i].startswith('--cwd'):
+                    del runargs[i:i + 2]
+                    break
+            def condfn():
+                return not os.path.exists(lockpath)
+            pid = util.rundetached(runargs, condfn)
+            if pid < 0:
+                raise error.Abort(_('child process failed to start'))
+            writepid(pid)
+        finally:
+            try:
+                os.unlink(lockpath)
+            except OSError as e:
+                if e.errno != errno.ENOENT:
+                    raise
+        if parentfn:
+            return parentfn(pid)
+        else:
+            return
+
+    if initfn:
+        initfn()
+
+    if not opts['daemon']:
+        writepid(util.getpid())
+
+    if opts['daemon_postexec']:
+        try:
+            os.setsid()
+        except AttributeError:
+            pass
+        for inst in opts['daemon_postexec']:
+            if inst.startswith('unlink:'):
+                lockpath = inst[7:]
+                os.unlink(lockpath)
+            elif inst.startswith('chdir:'):
+                os.chdir(inst[6:])
+            elif inst != 'none':
+                raise error.Abort(_('invalid value for --daemon-postexec: %s')
+                                  % inst)
+        util.hidewindow()
+        util.stdout.flush()
+        util.stderr.flush()
+
+        nullfd = os.open(os.devnull, os.O_RDWR)
+        logfilefd = nullfd
+        if logfile:
+            logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
+        os.dup2(nullfd, 0)
+        os.dup2(logfilefd, 1)
+        os.dup2(logfilefd, 2)
+        if nullfd not in (0, 1, 2):
+            os.close(nullfd)
+        if logfile and logfilefd not in (0, 1, 2):
+            os.close(logfilefd)
+
+    if runfn:
+        return runfn()
+
+_cmdservicemap = {
+    'chgunix': chgserver.chgunixservice,
+    'pipe': commandserver.pipeservice,
+    'unix': commandserver.unixforkingservice,
+}
+
+def _createcmdservice(ui, repo, opts):
+    mode = opts['cmdserver']
+    try:
+        return _cmdservicemap[mode](ui, repo, opts)
+    except KeyError:
+        raise error.Abort(_('unknown mode %s') % mode)
+
+def _createhgwebservice(ui, repo, opts):
+    # this way we can check if something was given in the command-line
+    if opts.get('port'):
+        opts['port'] = util.getport(opts.get('port'))
+
+    alluis = set([ui])
+    if repo:
+        baseui = repo.baseui
+        alluis.update([repo.baseui, repo.ui])
+    else:
+        baseui = ui
+    webconf = opts.get('web_conf') or opts.get('webdir_conf')
+    if webconf:
+        # load server settings (e.g. web.port) to "copied" ui, which allows
+        # hgwebdir to reload webconf cleanly
+        servui = ui.copy()
+        servui.readconfig(webconf, sections=['web'])
+        alluis.add(servui)
+    else:
+        servui = ui
+
+    optlist = ("name templates style address port prefix ipv6"
+               " accesslog errorlog certificate encoding")
+    for o in optlist.split():
+        val = opts.get(o, '')
+        if val in (None, ''): # should check against default options instead
+            continue
+        for u in alluis:
+            u.setconfig("web", o, val, 'serve')
+
+    app = hgweb.createapp(baseui, repo, webconf)
+    return hgweb.httpservice(servui, app, opts)
+
+def createservice(ui, repo, opts):
+    if opts["cmdserver"]:
+        return _createcmdservice(ui, repo, opts)
+    else:
+        return _createhgwebservice(ui, repo, opts)
--- a/mercurial/similar.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/similar.py	Wed Jan 18 11:43:36 2017 -0500
@@ -13,7 +13,6 @@
 from . import (
     bdiff,
     mdiff,
-    util,
 )
 
 def _findexactmatches(repo, added, removed):
@@ -43,6 +42,28 @@
     # Done
     repo.ui.progress(_('searching for exact renames'), None)
 
+def _ctxdata(fctx):
+    # lazily load text
+    orig = fctx.data()
+    return orig, mdiff.splitnewlines(orig)
+
+def _score(fctx, otherdata):
+    orig, lines = otherdata
+    text = fctx.data()
+    # bdiff.blocks() returns blocks of matching lines
+    # count the number of bytes in each
+    equal = 0
+    matches = bdiff.blocks(text, orig)
+    for x1, x2, y1, y2 in matches:
+        for line in lines[y1:y2]:
+            equal += len(line)
+
+    lengths = len(text) + len(orig)
+    return equal * 2.0 / lengths
+
+def score(fctx1, fctx2):
+    return _score(fctx1, _ctxdata(fctx2))
+
 def _findsimilarmatches(repo, added, removed, threshold):
     '''find potentially renamed files based on similar file content
 
@@ -54,35 +75,19 @@
         repo.ui.progress(_('searching for similar files'), i,
                          total=len(removed), unit=_('files'))
 
-        # lazily load text
-        @util.cachefunc
-        def data():
-            orig = r.data()
-            return orig, mdiff.splitnewlines(orig)
-
-        def score(text):
-            orig, lines = data()
-            # bdiff.blocks() returns blocks of matching lines
-            # count the number of bytes in each
-            equal = 0
-            matches = bdiff.blocks(text, orig)
-            for x1, x2, y1, y2 in matches:
-                for line in lines[y1:y2]:
-                    equal += len(line)
-
-            lengths = len(text) + len(orig)
-            return equal * 2.0 / lengths
-
+        data = None
         for a in added:
             bestscore = copies.get(a, (None, threshold))[1]
-            myscore = score(a.data())
+            if data is None:
+                data = _ctxdata(r)
+            myscore = _score(a, data)
             if myscore >= bestscore:
                 copies[a] = (r, myscore)
     repo.ui.progress(_('searching'), None)
 
     for dest, v in copies.iteritems():
-        source, score = v
-        yield source, dest, score
+        source, bscore = v
+        yield source, dest, bscore
 
 def findrenames(repo, added, removed, threshold):
     '''find renamed files -- yields (before, after, score) tuples'''
--- a/mercurial/simplemerge.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/simplemerge.py	Wed Jan 18 11:43:36 2017 -0500
@@ -19,7 +19,6 @@
 from __future__ import absolute_import
 
 import os
-import sys
 
 from .i18n import _
 from . import (
@@ -275,7 +274,7 @@
     def minimize(self, merge_regions):
         """Trim conflict regions of lines where A and B sides match.
 
-        Lines where both A and B have made the same changes at the begining
+        Lines where both A and B have made the same changes at the beginning
         or the end of each merge region are eliminated from the conflict
         region and are instead considered the same.
         """
@@ -441,7 +440,7 @@
         opener = scmutil.opener(os.path.dirname(local))
         out = opener(os.path.basename(local), "w", atomictemp=True)
     else:
-        out = sys.stdout
+        out = ui.fout
 
     m3 = Merge3Text(basetext, localtext, othertext)
     extrakwargs = {
--- a/mercurial/sshserver.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/sshserver.py	Wed Jan 18 11:43:36 2017 -0500
@@ -8,11 +8,11 @@
 
 from __future__ import absolute_import
 
-import os
 import sys
 
 from .i18n import _
 from . import (
+    encoding,
     error,
     hook,
     util,
@@ -26,6 +26,7 @@
         self.lock = None
         self.fin = ui.fin
         self.fout = ui.fout
+        self.name = 'ssh'
 
         hook.redirect(True)
         ui.fout = repo.ui.fout = ui.ferr
@@ -68,13 +69,6 @@
     def redirect(self):
         pass
 
-    def groupchunks(self, fh):
-        return iter(lambda: fh.read(4096), '')
-
-    def compresschunks(self, chunks):
-        for chunk in chunks:
-            yield chunk
-
     def sendresponse(self, v):
         self.fout.write("%d\n" % len(v))
         self.fout.write(v)
@@ -82,7 +76,13 @@
 
     def sendstream(self, source):
         write = self.fout.write
-        for chunk in source.gen:
+
+        if source.reader:
+            gen = iter(lambda: source.reader.read(4096), '')
+        else:
+            gen = source.gen
+
+        for chunk in gen:
             write(chunk)
         self.fout.flush()
 
@@ -131,5 +131,5 @@
         return cmd != ''
 
     def _client(self):
-        client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
+        client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
         return 'remote:ssh:' + client
--- a/mercurial/sslutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/sslutil.py	Wed Jan 18 11:43:36 2017 -0500
@@ -18,6 +18,7 @@
 from .i18n import _
 from . import (
     error,
+    pycompat,
     util,
 )
 
@@ -638,7 +639,7 @@
                 # According to RFC 2818 the most specific Common Name must
                 # be used.
                 if key == 'commonName':
-                    # 'subject' entries are unicide.
+                    # 'subject' entries are unicode.
                     try:
                         value = value.encode('ascii')
                     except UnicodeEncodeError:
@@ -667,9 +668,10 @@
       for using system certificate store CAs in addition to the provided
       cacerts file
     """
-    if sys.platform != 'darwin' or util.mainfrozen() or not sys.executable:
+    if (pycompat.sysplatform != 'darwin' or
+                        util.mainfrozen() or not pycompat.sysexecutable):
         return False
-    exe = os.path.realpath(sys.executable).lower()
+    exe = os.path.realpath(pycompat.sysexecutable).lower()
     return (exe.startswith('/usr/bin/python') or
             exe.startswith('/system/library/frameworks/python.framework/'))
 
@@ -706,7 +708,7 @@
     # because we'll get a certificate verification error later and the lack
     # of loaded CA certificates will be the reason why.
     # Assertion: this code is only called if certificates are being verified.
-    if os.name == 'nt':
+    if pycompat.osname == 'nt':
         if not _canloaddefaultcerts:
             ui.warn(_('(unable to load Windows CA certificates; see '
                       'https://mercurial-scm.org/wiki/SecureConnections for '
@@ -724,7 +726,7 @@
 
     # The Apple OpenSSL trick isn't available to us. If Python isn't able to
     # load system certs, we're out of luck.
-    if sys.platform == 'darwin':
+    if pycompat.sysplatform == 'darwin':
         # FUTURE Consider looking for Homebrew or MacPorts installed certs
         # files. Also consider exporting the keychain certs to a file during
         # Mercurial install.
@@ -737,7 +739,7 @@
     # / is writable on Windows. Out of an abundance of caution make sure
     # we're not on Windows because paths from _systemcacerts could be installed
     # by non-admin users.
-    assert os.name != 'nt'
+    assert pycompat.osname != 'nt'
 
     # Try to find CA certificates in well-known locations. We print a warning
     # when using a found file because we don't want too much silent magic
@@ -764,7 +766,7 @@
     return None
 
 def validatesocket(sock):
-    """Validate a socket meets security requiremnets.
+    """Validate a socket meets security requirements.
 
     The passed socket must have been created with ``wrapsocket()``.
     """
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/statprof.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,810 @@
+#!/usr/bin/env python
+## statprof.py
+## Copyright (C) 2012 Bryan O'Sullivan <bos@serpentine.com>
+## Copyright (C) 2011 Alex Fraser <alex at phatcore dot com>
+## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com>
+## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org>
+
+## This library is free software; you can redistribute it and/or
+## modify it under the terms of the GNU Lesser General Public
+## License as published by the Free Software Foundation; either
+## version 2.1 of the License, or (at your option) any later version.
+##
+## This library is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+## Lesser General Public License for more details.
+##
+## You should have received a copy of the GNU Lesser General Public
+## License along with this program; if not, contact:
+##
+## Free Software Foundation           Voice:  +1-617-542-5942
+## 59 Temple Place - Suite 330        Fax:    +1-617-542-2652
+## Boston, MA  02111-1307,  USA       gnu@gnu.org
+
+"""
+statprof is intended to be a fairly simple statistical profiler for
+python. It was ported directly from a statistical profiler for guile,
+also named statprof, available from guile-lib [0].
+
+[0] http://wingolog.org/software/guile-lib/statprof/
+
+To start profiling, call statprof.start():
+>>> start()
+
+Then run whatever it is that you want to profile, for example:
+>>> import test.pystone; test.pystone.pystones()
+
+Then stop the profiling and print out the results:
+>>> stop()
+>>> display()
+  %   cumulative      self
+ time    seconds   seconds  name
+ 26.72      1.40      0.37  pystone.py:79:Proc0
+ 13.79      0.56      0.19  pystone.py:133:Proc1
+ 13.79      0.19      0.19  pystone.py:208:Proc8
+ 10.34      0.16      0.14  pystone.py:229:Func2
+  6.90      0.10      0.10  pystone.py:45:__init__
+  4.31      0.16      0.06  pystone.py:53:copy
+    ...
+
+All of the numerical data is statistically approximate. In the
+following column descriptions, and in all of statprof, "time" refers
+to execution time (both user and system), not wall clock time.
+
+% time
+    The percent of the time spent inside the procedure itself (not
+    counting children).
+
+cumulative seconds
+    The total number of seconds spent in the procedure, including
+    children.
+
+self seconds
+    The total number of seconds spent in the procedure itself (not
+    counting children).
+
+name
+    The name of the procedure.
+
+By default statprof keeps the data collected from previous runs. If you
+want to clear the collected data, call reset():
+>>> reset()
+
+reset() can also be used to change the sampling frequency from the
+default of 1000 Hz. For example, to tell statprof to sample 50 times a
+second:
+>>> reset(50)
+
+This means that statprof will sample the call stack after every 1/50 of
+a second of user + system time spent running on behalf of the python
+process. When your process is idle (for example, blocking in a read(),
+as is the case at the listener), the clock does not advance. For this
+reason statprof is not currently not suitable for profiling io-bound
+operations.
+
+The profiler uses the hash of the code object itself to identify the
+procedures, so it won't confuse different procedures with the same name.
+They will show up as two different rows in the output.
+
+Right now the profiler is quite simplistic.  I cannot provide
+call-graphs or other higher level information.  What you see in the
+table is pretty much all there is. Patches are welcome :-)
+
+
+Threading
+---------
+
+Because signals only get delivered to the main thread in Python,
+statprof only profiles the main thread. However because the time
+reporting function uses per-process timers, the results can be
+significantly off if other threads' work patterns are not similar to the
+main thread's work patterns.
+"""
+# no-check-code
+from __future__ import absolute_import, division, print_function
+
+import collections
+import contextlib
+import getopt
+import inspect
+import json
+import os
+import signal
+import sys
+import tempfile
+import threading
+import time
+
+from . import (
+    encoding,
+    pycompat,
+)
+
+defaultdict = collections.defaultdict
+contextmanager = contextlib.contextmanager
+
+__all__ = ['start', 'stop', 'reset', 'display', 'profile']
+
+skips = set(["util.py:check", "extensions.py:closure",
+             "color.py:colorcmd", "dispatch.py:checkargs",
+             "dispatch.py:<lambda>", "dispatch.py:_runcatch",
+             "dispatch.py:_dispatch", "dispatch.py:_runcommand",
+             "pager.py:pagecmd", "dispatch.py:run",
+             "dispatch.py:dispatch", "dispatch.py:runcommand",
+             "hg.py:<module>", "evolve.py:warnobserrors",
+         ])
+
+###########################################################################
+## Utils
+
+def clock():
+    times = os.times()
+    return times[0] + times[1]
+
+
+###########################################################################
+## Collection data structures
+
+class ProfileState(object):
+    def __init__(self, frequency=None):
+        self.reset(frequency)
+
+    def reset(self, frequency=None):
+        # total so far
+        self.accumulated_time = 0.0
+        # start_time when timer is active
+        self.last_start_time = None
+        # a float
+        if frequency:
+            self.sample_interval = 1.0 / frequency
+        elif not hasattr(self, 'sample_interval'):
+            # default to 1000 Hz
+            self.sample_interval = 1.0 / 1000.0
+        else:
+            # leave the frequency as it was
+            pass
+        self.remaining_prof_time = None
+        # for user start/stop nesting
+        self.profile_level = 0
+
+        self.samples = []
+
+    def accumulate_time(self, stop_time):
+        self.accumulated_time += stop_time - self.last_start_time
+
+    def seconds_per_sample(self):
+        return self.accumulated_time / len(self.samples)
+
+state = ProfileState()
+
+
+class CodeSite(object):
+    cache = {}
+
+    __slots__ = (u'path', u'lineno', u'function', u'source')
+
+    def __init__(self, path, lineno, function):
+        self.path = path
+        self.lineno = lineno
+        self.function = function
+        self.source = None
+
+    def __eq__(self, other):
+        try:
+            return (self.lineno == other.lineno and
+                    self.path == other.path)
+        except:
+            return False
+
+    def __hash__(self):
+        return hash((self.lineno, self.path))
+
+    @classmethod
+    def get(cls, path, lineno, function):
+        k = (path, lineno)
+        try:
+            return cls.cache[k]
+        except KeyError:
+            v = cls(path, lineno, function)
+            cls.cache[k] = v
+            return v
+
+    def getsource(self, length):
+        if self.source is None:
+            lineno = self.lineno - 1
+            fp = None
+            try:
+                fp = open(self.path)
+                for i, line in enumerate(fp):
+                    if i == lineno:
+                        self.source = line.strip()
+                        break
+            except:
+                pass
+            finally:
+                if fp:
+                    fp.close()
+            if self.source is None:
+                self.source = ''
+
+        source = self.source
+        if len(source) > length:
+            source = source[:(length - 3)] + "..."
+        return source
+
+    def filename(self):
+        return os.path.basename(self.path)
+
+class Sample(object):
+    __slots__ = (u'stack', u'time')
+
+    def __init__(self, stack, time):
+        self.stack = stack
+        self.time = time
+
+    @classmethod
+    def from_frame(cls, frame, time):
+        stack = []
+
+        while frame:
+            stack.append(CodeSite.get(frame.f_code.co_filename, frame.f_lineno,
+                                      frame.f_code.co_name))
+            frame = frame.f_back
+
+        return Sample(stack, time)
+
+###########################################################################
+## SIGPROF handler
+
+def profile_signal_handler(signum, frame):
+    if state.profile_level > 0:
+        now = clock()
+        state.accumulate_time(now)
+
+        state.samples.append(Sample.from_frame(frame, state.accumulated_time))
+
+        signal.setitimer(signal.ITIMER_PROF,
+            state.sample_interval, 0.0)
+        state.last_start_time = now
+
+stopthread = threading.Event()
+def samplerthread(tid):
+    while not stopthread.is_set():
+        now = clock()
+        state.accumulate_time(now)
+
+        frame = sys._current_frames()[tid]
+        state.samples.append(Sample.from_frame(frame, state.accumulated_time))
+
+        state.last_start_time = now
+        time.sleep(state.sample_interval)
+
+    stopthread.clear()
+
+###########################################################################
+## Profiling API
+
+def is_active():
+    return state.profile_level > 0
+
+lastmechanism = None
+def start(mechanism='thread'):
+    '''Install the profiling signal handler, and start profiling.'''
+    state.profile_level += 1
+    if state.profile_level == 1:
+        state.last_start_time = clock()
+        rpt = state.remaining_prof_time
+        state.remaining_prof_time = None
+
+        global lastmechanism
+        lastmechanism = mechanism
+
+        if mechanism == 'signal':
+            signal.signal(signal.SIGPROF, profile_signal_handler)
+            signal.setitimer(signal.ITIMER_PROF,
+                rpt or state.sample_interval, 0.0)
+        elif mechanism == 'thread':
+            frame = inspect.currentframe()
+            tid = [k for k, f in sys._current_frames().items() if f == frame][0]
+            state.thread = threading.Thread(target=samplerthread,
+                                 args=(tid,), name="samplerthread")
+            state.thread.start()
+
+def stop():
+    '''Stop profiling, and uninstall the profiling signal handler.'''
+    state.profile_level -= 1
+    if state.profile_level == 0:
+        if lastmechanism == 'signal':
+            rpt = signal.setitimer(signal.ITIMER_PROF, 0.0, 0.0)
+            signal.signal(signal.SIGPROF, signal.SIG_IGN)
+            state.remaining_prof_time = rpt[0]
+        elif lastmechanism == 'thread':
+            stopthread.set()
+            state.thread.join()
+
+        state.accumulate_time(clock())
+        state.last_start_time = None
+        statprofpath = encoding.environ.get('STATPROF_DEST')
+        if statprofpath:
+            save_data(statprofpath)
+
+    return state
+
+def save_data(path):
+    with open(path, 'w+') as file:
+        file.write(str(state.accumulated_time) + '\n')
+        for sample in state.samples:
+            time = str(sample.time)
+            stack = sample.stack
+            sites = ['\1'.join([s.path, str(s.lineno), s.function])
+                     for s in stack]
+            file.write(time + '\0' + '\0'.join(sites) + '\n')
+
+def load_data(path):
+    lines = open(path, 'r').read().splitlines()
+
+    state.accumulated_time = float(lines[0])
+    state.samples = []
+    for line in lines[1:]:
+        parts = line.split('\0')
+        time = float(parts[0])
+        rawsites = parts[1:]
+        sites = []
+        for rawsite in rawsites:
+            siteparts = rawsite.split('\1')
+            sites.append(CodeSite.get(siteparts[0], int(siteparts[1]),
+                        siteparts[2]))
+
+        state.samples.append(Sample(sites, time))
+
+
+
+def reset(frequency=None):
+    '''Clear out the state of the profiler.  Do not call while the
+    profiler is running.
+
+    The optional frequency argument specifies the number of samples to
+    collect per second.'''
+    assert state.profile_level == 0, "Can't reset() while statprof is running"
+    CodeSite.cache.clear()
+    state.reset(frequency)
+
+
+@contextmanager
+def profile():
+    start()
+    try:
+        yield
+    finally:
+        stop()
+        display()
+
+
+###########################################################################
+## Reporting API
+
+class SiteStats(object):
+    def __init__(self, site):
+        self.site = site
+        self.selfcount = 0
+        self.totalcount = 0
+
+    def addself(self):
+        self.selfcount += 1
+
+    def addtotal(self):
+        self.totalcount += 1
+
+    def selfpercent(self):
+        return self.selfcount / len(state.samples) * 100
+
+    def totalpercent(self):
+        return self.totalcount / len(state.samples) * 100
+
+    def selfseconds(self):
+        return self.selfcount * state.seconds_per_sample()
+
+    def totalseconds(self):
+        return self.totalcount * state.seconds_per_sample()
+
+    @classmethod
+    def buildstats(cls, samples):
+        stats = {}
+
+        for sample in samples:
+            for i, site in enumerate(sample.stack):
+                sitestat = stats.get(site)
+                if not sitestat:
+                    sitestat = SiteStats(site)
+                    stats[site] = sitestat
+
+                sitestat.addtotal()
+
+                if i == 0:
+                    sitestat.addself()
+
+        return [s for s in stats.itervalues()]
+
+class DisplayFormats:
+    ByLine = 0
+    ByMethod = 1
+    AboutMethod = 2
+    Hotpath = 3
+    FlameGraph = 4
+    Json = 5
+
+def display(fp=None, format=3, data=None, **kwargs):
+    '''Print statistics, either to stdout or the given file object.'''
+    data = data or state
+
+    if fp is None:
+        import sys
+        fp = sys.stdout
+    if len(data.samples) == 0:
+        print('No samples recorded.', file=fp)
+        return
+
+    if format == DisplayFormats.ByLine:
+        display_by_line(data, fp)
+    elif format == DisplayFormats.ByMethod:
+        display_by_method(data, fp)
+    elif format == DisplayFormats.AboutMethod:
+        display_about_method(data, fp, **kwargs)
+    elif format == DisplayFormats.Hotpath:
+        display_hotpath(data, fp, **kwargs)
+    elif format == DisplayFormats.FlameGraph:
+        write_to_flame(data, fp, **kwargs)
+    elif format == DisplayFormats.Json:
+        write_to_json(data, fp)
+    else:
+        raise Exception("Invalid display format")
+
+    if format != DisplayFormats.Json:
+        print('---', file=fp)
+        print('Sample count: %d' % len(data.samples), file=fp)
+        print('Total time: %f seconds' % data.accumulated_time, file=fp)
+
+def display_by_line(data, fp):
+    '''Print the profiler data with each sample line represented
+    as one row in a table.  Sorted by self-time per line.'''
+    stats = SiteStats.buildstats(data.samples)
+    stats.sort(reverse=True, key=lambda x: x.selfseconds())
+
+    print('%5.5s %10.10s   %7.7s  %-8.8s' %
+          ('%  ', 'cumulative', 'self', ''), file=fp)
+    print('%5.5s  %9.9s  %8.8s  %-8.8s' %
+          ("time", "seconds", "seconds", "name"), file=fp)
+
+    for stat in stats:
+        site = stat.site
+        sitelabel = '%s:%d:%s' % (site.filename(), site.lineno, site.function)
+        print('%6.2f %9.2f %9.2f  %s' % (stat.selfpercent(),
+                                         stat.totalseconds(),
+                                         stat.selfseconds(),
+                                         sitelabel),
+              file=fp)
+
+def display_by_method(data, fp):
+    '''Print the profiler data with each sample function represented
+    as one row in a table.  Important lines within that function are
+    output as nested rows.  Sorted by self-time per line.'''
+    print('%5.5s %10.10s   %7.7s  %-8.8s' %
+          ('%  ', 'cumulative', 'self', ''), file=fp)
+    print('%5.5s  %9.9s  %8.8s  %-8.8s' %
+          ("time", "seconds", "seconds", "name"), file=fp)
+
+    stats = SiteStats.buildstats(data.samples)
+
+    grouped = defaultdict(list)
+    for stat in stats:
+        grouped[stat.site.filename() + ":" + stat.site.function].append(stat)
+
+    # compute sums for each function
+    functiondata = []
+    for fname, sitestats in grouped.iteritems():
+        total_cum_sec = 0
+        total_self_sec = 0
+        total_percent = 0
+        for stat in sitestats:
+            total_cum_sec += stat.totalseconds()
+            total_self_sec += stat.selfseconds()
+            total_percent += stat.selfpercent()
+
+        functiondata.append((fname,
+                             total_cum_sec,
+                             total_self_sec,
+                             total_percent,
+                             sitestats))
+
+    # sort by total self sec
+    functiondata.sort(reverse=True, key=lambda x: x[2])
+
+    for function in functiondata:
+        if function[3] < 0.05:
+            continue
+        print('%6.2f %9.2f %9.2f  %s' % (function[3], # total percent
+                                         function[1], # total cum sec
+                                         function[2], # total self sec
+                                         function[0]), # file:function
+              file=fp)
+        function[4].sort(reverse=True, key=lambda i: i.selfseconds())
+        for stat in function[4]:
+            # only show line numbers for significant locations (>1% time spent)
+            if stat.selfpercent() > 1:
+                source = stat.site.getsource(25)
+                stattuple = (stat.selfpercent(), stat.selfseconds(),
+                             stat.site.lineno, source)
+
+                print('%33.0f%% %6.2f   line %s: %s' % (stattuple), file=fp)
+
+def display_about_method(data, fp, function=None, **kwargs):
+    if function is None:
+        raise Exception("Invalid function")
+
+    filename = None
+    if ':' in function:
+        filename, function = function.split(':')
+
+    relevant_samples = 0
+    parents = {}
+    children = {}
+
+    for sample in data.samples:
+        for i, site in enumerate(sample.stack):
+            if site.function == function and (not filename
+                or site.filename() == filename):
+                relevant_samples += 1
+                if i != len(sample.stack) - 1:
+                    parent = sample.stack[i + 1]
+                    if parent in parents:
+                        parents[parent] = parents[parent] + 1
+                    else:
+                        parents[parent] = 1
+
+                if site in children:
+                    children[site] = children[site] + 1
+                else:
+                    children[site] = 1
+
+    parents = [(parent, count) for parent, count in parents.iteritems()]
+    parents.sort(reverse=True, key=lambda x: x[1])
+    for parent, count in parents:
+        print('%6.2f%%   %s:%s   line %s: %s' %
+            (count / relevant_samples * 100, parent.filename(),
+            parent.function, parent.lineno, parent.getsource(50)), file=fp)
+
+    stats = SiteStats.buildstats(data.samples)
+    stats = [s for s in stats
+               if s.site.function == function and
+               (not filename or s.site.filename() == filename)]
+
+    total_cum_sec = 0
+    total_self_sec = 0
+    total_self_percent = 0
+    total_cum_percent = 0
+    for stat in stats:
+        total_cum_sec += stat.totalseconds()
+        total_self_sec += stat.selfseconds()
+        total_self_percent += stat.selfpercent()
+        total_cum_percent += stat.totalpercent()
+
+    print(
+        '\n    %s:%s    Total: %0.2fs (%0.2f%%)    Self: %0.2fs (%0.2f%%)\n' %
+        (
+        filename or '___',
+        function,
+        total_cum_sec,
+        total_cum_percent,
+        total_self_sec,
+        total_self_percent
+        ), file=fp)
+
+    children = [(child, count) for child, count in children.iteritems()]
+    children.sort(reverse=True, key=lambda x: x[1])
+    for child, count in children:
+        print('        %6.2f%%   line %s: %s' %
+              (count / relevant_samples * 100, child.lineno,
+               child.getsource(50)), file=fp)
+
+def display_hotpath(data, fp, limit=0.05, **kwargs):
+    class HotNode(object):
+        def __init__(self, site):
+            self.site = site
+            self.count = 0
+            self.children = {}
+
+        def add(self, stack, time):
+            self.count += time
+            site = stack[0]
+            child = self.children.get(site)
+            if not child:
+                child = HotNode(site)
+                self.children[site] = child
+
+            if len(stack) > 1:
+                i = 1
+                # Skip boiler plate parts of the stack
+                while i < len(stack) and '%s:%s' % (stack[i].filename(), stack[i].function) in skips:
+                    i += 1
+                if i < len(stack):
+                    child.add(stack[i:], time)
+
+    root = HotNode(None)
+    lasttime = data.samples[0].time
+    for sample in data.samples:
+        root.add(sample.stack[::-1], sample.time - lasttime)
+        lasttime = sample.time
+
+    def _write(node, depth, multiple_siblings):
+        site = node.site
+        visiblechildren = [c for c in node.children.itervalues()
+                             if c.count >= (limit * root.count)]
+        if site:
+            indent = depth * 2 - 1
+            filename = ''
+            function = ''
+            if len(node.children) > 0:
+                childsite = list(node.children.itervalues())[0].site
+                filename = (childsite.filename() + ':').ljust(15)
+                function = childsite.function
+
+            # lots of string formatting
+            listpattern = ''.ljust(indent) +\
+                          ('\\' if multiple_siblings else '|') +\
+                          ' %4.1f%%  %s %s'
+            liststring = listpattern % (node.count / root.count * 100,
+                                        filename, function)
+            codepattern = '%' + str(55 - len(liststring)) + 's %s:  %s'
+            codestring = codepattern % ('line', site.lineno, site.getsource(30))
+
+            finalstring = liststring + codestring
+            childrensamples = sum([c.count for c in node.children.itervalues()])
+            # Make frames that performed more than 10% of the operation red
+            if node.count - childrensamples > (0.1 * root.count):
+                finalstring = '\033[91m' + finalstring + '\033[0m'
+            # Make frames that didn't actually perform work dark grey
+            elif node.count - childrensamples == 0:
+                finalstring = '\033[90m' + finalstring + '\033[0m'
+            print(finalstring, file=fp)
+
+        newdepth = depth
+        if len(visiblechildren) > 1 or multiple_siblings:
+            newdepth += 1
+
+        visiblechildren.sort(reverse=True, key=lambda x: x.count)
+        for child in visiblechildren:
+            _write(child, newdepth, len(visiblechildren) > 1)
+
+    if root.count > 0:
+        _write(root, 0, False)
+
+def write_to_flame(data, fp, scriptpath=None, outputfile=None, **kwargs):
+    if scriptpath is None:
+        scriptpath = encoding.environ['HOME'] + '/flamegraph.pl'
+    if not os.path.exists(scriptpath):
+        print("error: missing %s" % scriptpath, file=fp)
+        print("get it here: https://github.com/brendangregg/FlameGraph",
+              file=fp)
+        return
+
+    fd, path = tempfile.mkstemp()
+
+    file = open(path, "w+")
+
+    lines = {}
+    for sample in data.samples:
+        sites = [s.function for s in sample.stack]
+        sites.reverse()
+        line = ';'.join(sites)
+        if line in lines:
+            lines[line] = lines[line] + 1
+        else:
+            lines[line] = 1
+
+    for line, count in lines.iteritems():
+        file.write("%s %s\n" % (line, count))
+
+    file.close()
+
+    if outputfile is None:
+        outputfile = '~/flamegraph.svg'
+
+    os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile))
+    print("Written to %s" % outputfile, file=fp)
+
+def write_to_json(data, fp):
+    samples = []
+
+    for sample in data.samples:
+        stack = []
+
+        for frame in sample.stack:
+            stack.append((frame.path, frame.lineno, frame.function))
+
+        samples.append((sample.time, stack))
+
+    print(json.dumps(samples), file=fp)
+
+def printusage():
+    print("""
+The statprof command line allows you to inspect the last profile's results in
+the following forms:
+
+usage:
+    hotpath [-l --limit percent]
+        Shows a graph of calls with the percent of time each takes.
+        Red calls take over 10%% of the total time themselves.
+    lines
+        Shows the actual sampled lines.
+    functions
+        Shows the samples grouped by function.
+    function [filename:]functionname
+        Shows the callers and callees of a particular function.
+    flame [-s --script-path] [-o --output-file path]
+        Writes out a flamegraph to output-file (defaults to ~/flamegraph.svg)
+        Requires that ~/flamegraph.pl exist.
+        (Specify alternate script path with --script-path.)""")
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+
+    if len(argv) == 1:
+        printusage()
+        return 0
+
+    displayargs = {}
+
+    optstart = 2
+    displayargs['function'] = None
+    if argv[1] == 'hotpath':
+        displayargs['format'] = DisplayFormats.Hotpath
+    elif argv[1] == 'lines':
+        displayargs['format'] = DisplayFormats.ByLine
+    elif argv[1] == 'functions':
+        displayargs['format'] = DisplayFormats.ByMethod
+    elif argv[1] == 'function':
+        displayargs['format'] = DisplayFormats.AboutMethod
+        displayargs['function'] = argv[2]
+        optstart = 3
+    elif argv[1] == 'flame':
+        displayargs['format'] = DisplayFormats.FlameGraph
+    else:
+        printusage()
+        return 0
+
+    # process options
+    try:
+        opts, args = pycompat.getoptb(sys.argv[optstart:], "hl:f:o:p:",
+                                   ["help", "limit=", "file=", "output-file=", "script-path="])
+    except getopt.error as msg:
+        print(msg)
+        printusage()
+        return 2
+
+    displayargs['limit'] = 0.05
+    path = None
+    for o, value in opts:
+        if o in ("-l", "--limit"):
+            displayargs['limit'] = float(value)
+        elif o in ("-f", "--file"):
+            path = value
+        elif o in ("-o", "--output-file"):
+            displayargs['outputfile'] = value
+        elif o in ("-p", "--script-path"):
+            displayargs['scriptpath'] = value
+        elif o in ("-h", "help"):
+            printusage()
+            return 0
+        else:
+            assert False, "unhandled option %s" % o
+
+    load_data(path=path)
+
+    display(**displayargs)
+
+    return 0
+
+if __name__ == "__main__":
+    sys.exit(main())
--- a/mercurial/store.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/store.py	Wed Jan 18 11:43:36 2017 -0500
@@ -436,7 +436,7 @@
         self.entries = set(decodedir(fp.read()).splitlines())
         if '' in self.entries:
             fp.seek(0)
-            for n, line in enumerate(fp):
+            for n, line in enumerate(util.iterfile(fp)):
                 if not line.rstrip('\n'):
                     t = _('invalid entry in fncache, line %d') % (n + 1)
                     raise error.Abort(t)
--- a/mercurial/streamclone.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/streamclone.py	Wed Jan 18 11:43:36 2017 -0500
@@ -286,11 +286,11 @@
 def consumev1(repo, fp, filecount, bytecount):
     """Apply the contents from version 1 of a streaming clone file handle.
 
-    This takes the output from "streamout" and applies it to the specified
+    This takes the output from "stream_out" and applies it to the specified
     repository.
 
-    Like "streamout," the status line added by the wire protocol is not handled
-    by this function.
+    Like "stream_out," the status line added by the wire protocol is not
+    handled by this function.
     """
     with repo.lock():
         repo.ui.status(_('%d files to transfer, %s of data\n') %
--- a/mercurial/strutil.py	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-# strutil.py - string utilities for Mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-def findall(haystack, needle, start=0, end=None):
-    if end is None:
-        end = len(haystack)
-    if end < 0:
-        end += len(haystack)
-    if start < 0:
-        start += len(haystack)
-    while start < end:
-        c = haystack.find(needle, start, end)
-        if c == -1:
-            break
-        yield c
-        start = c + 1
-
-def rfindall(haystack, needle, start=0, end=None):
-    if end is None:
-        end = len(haystack)
-    if end < 0:
-        end += len(haystack)
-    if start < 0:
-        start += len(haystack)
-    while end >= 0:
-        c = haystack.rfind(needle, start, end)
-        if c == -1:
-            break
-        yield c
-        end = c - 1
--- a/mercurial/subrepo.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/subrepo.py	Wed Jan 18 11:43:36 2017 -0500
@@ -24,6 +24,7 @@
 from . import (
     cmdutil,
     config,
+    encoding,
     error,
     exchange,
     filemerge,
@@ -31,6 +32,7 @@
     node,
     pathutil,
     phases,
+    pycompat,
     scmutil,
     util,
 )
@@ -462,12 +464,12 @@
             return _("uncommitted changes in subrepository '%s'"
                      ) % subrelpath(self)
 
-    def bailifchanged(self, ignoreupdate=False):
+    def bailifchanged(self, ignoreupdate=False, hint=None):
         """raise Abort if subrepository is ``dirty()``
         """
         dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate)
         if dirtyreason:
-            raise error.Abort(dirtyreason)
+            raise error.Abort(dirtyreason, hint=hint)
 
     def basestate(self):
         """current working directory base state, disregarding .hgsubstate
@@ -1101,7 +1103,7 @@
             path = self.wvfs.reljoin(self._ctx.repo().origroot,
                                      self._path, filename)
             cmd.append(path)
-        env = dict(os.environ)
+        env = dict(encoding.environ)
         # Avoid localized output, preserve current locale for everything else.
         lc_all = env.get('LC_ALL')
         if lc_all:
@@ -1172,7 +1174,7 @@
                 changes.append(path)
         for path in changes:
             for ext in externals:
-                if path == ext or path.startswith(ext + os.sep):
+                if path == ext or path.startswith(ext + pycompat.ossep):
                     return True, True, bool(missing)
         return bool(changes), False, bool(missing)
 
@@ -1311,7 +1313,7 @@
             notfoundhint = _("check git is installed and in your PATH")
             if e.errno != errno.ENOENT:
                 raise error.Abort(genericerror % (self._path, e.strerror))
-            elif os.name == 'nt':
+            elif pycompat.osname == 'nt':
                 try:
                     self._gitexecutable = 'git.cmd'
                     out, err = self._gitnodir(['--version'])
@@ -1397,7 +1399,7 @@
         """
         self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
         if env is None:
-            env = os.environ.copy()
+            env = encoding.environ.copy()
         # disable localization for Git output (issue5176)
         env['LC_ALL'] = 'C'
         # fix for Git CVE-2015-7545
@@ -1632,7 +1634,7 @@
         if self._gitmissing():
             raise error.Abort(_("subrepo %s is missing") % self._relpath)
         cmd = ['commit', '-a', '-m', text]
-        env = os.environ.copy()
+        env = encoding.environ.copy()
         if user:
             cmd += ['--author', user]
         if date:
--- a/mercurial/templatekw.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templatekw.py	Wed Jan 18 11:43:36 2017 -0500
@@ -299,10 +299,22 @@
     """String. Statistics of changes with the following format:
     "modified files: +added/-removed lines"
     """
-    stats = patch.diffstatdata(util.iterlines(ctx.diff()))
+    stats = patch.diffstatdata(util.iterlines(ctx.diff(noprefix=False)))
     maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats)
     return '%s: +%s/-%s' % (len(stats), adds, removes)
 
+@templatekeyword('envvars')
+def showenvvars(repo, **args):
+    """A dictionary of environment variables. (EXPERIMENTAL)"""
+
+    env = repo.ui.exportableenviron()
+    env = util.sortdict((k, env[k]) for k in sorted(env))
+    makemap = lambda k: {'key': k, 'value': env[k]}
+    c = [makemap(k) for k in env]
+    f = _showlist('envvar', c, plural='envvars', **args)
+    return _hybrid(f, env, makemap,
+                   lambda x: '%s=%s' % (x['key'], x['value']))
+
 @templatekeyword('extras')
 def showextras(**args):
     """List of dicts with key, value entries of the 'extras'
@@ -458,7 +470,8 @@
         # just avoid crash, we might want to use the 'ff...' hash in future
         return
     args = args.copy()
-    args.update({'rev': repo.manifest.rev(mnode), 'node': hex(mnode)})
+    args.update({'rev': repo.manifestlog._revlog.rev(mnode),
+                 'node': hex(mnode)})
     return templ('manifest', **args)
 
 def shownames(namespace, **args):
@@ -594,5 +607,13 @@
     """Integer. The width of the current terminal."""
     return repo.ui.termwidth()
 
+@templatekeyword('troubles')
+def showtroubles(**args):
+    """List of strings. Evolution troubles affecting the changeset.
+
+    (EXPERIMENTAL)
+    """
+    return showlist('trouble', args['ctx'].troubles(), **args)
+
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = keywords.values()
--- a/mercurial/templater.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templater.py	Wed Jan 18 11:43:36 2017 -0500
@@ -17,6 +17,7 @@
     error,
     minirst,
     parser,
+    pycompat,
     registrar,
     revset as revsetmod,
     templatefilters,
@@ -792,7 +793,7 @@
 
 @templatefunc('rstdoc(text, style)')
 def rstdoc(context, mapping, args):
-    """Format ReStructuredText."""
+    """Format reStructuredText."""
     if len(args) != 2:
         # i18n: "rstdoc" is a keyword
         raise error.ParseError(_("rstdoc expects two arguments"))
@@ -1243,8 +1244,8 @@
         # only plain name is allowed to honor template paths
         if (not style
             or style in (os.curdir, os.pardir)
-            or os.sep in style
-            or os.altsep and os.altsep in style):
+            or pycompat.ossep in style
+            or pycompat.osaltsep and pycompat.osaltsep in style):
             continue
         locations = [os.path.join(style, 'map'), 'map-' + style]
         locations.append('map')
--- a/mercurial/templates/gitweb/fileannotate.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/gitweb/fileannotate.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -28,7 +28,7 @@
 annotate |
 <a href="{url|urlescape}diff/{symrev}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
 <a href="{url|urlescape}comparison/{symrev}/{file|urlescape}{sessionvars%urlparameter}">comparison</a> |
-<a href="{url|urlescape}raw-annotate/{symrev}/{file|urlescape}">raw</a> |
+<a href="{url|urlescape}raw-file/{symrev}/{file|urlescape}">raw</a> |
 <a href="{url|urlescape}help{sessionvars%urlparameter}">help</a>
 <br/>
 </div>
--- a/mercurial/templates/gitweb/footer.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/gitweb/footer.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -1,4 +1,3 @@
-<script type="text/javascript">process_dates()</script>
 <div class="page_footer">
 <div class="page_footer_text">{repo|escape}</div>
 <div class="rss_logo">
--- a/mercurial/templates/gitweb/graph.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/gitweb/graph.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -45,7 +45,7 @@
 <ul id="graphnodes"></ul>
 </div>
 
-<script>
+<script{if(nonce, ' nonce="{nonce}"')}>
 <!-- hide script content
 
 var data = {jsdata|json};
@@ -108,7 +108,7 @@
 | {changenav%navgraph}
 </div>
 
-<script type="text/javascript">
+<script type="text/javascript"{if(nonce, ' nonce="{nonce}"')}>
     ajaxScrollInit(
             '{url|urlescape}graph/{rev}?revcount=%next%&style={style}',
             {revcount}+60,
--- a/mercurial/templates/gitweb/index.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/gitweb/index.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -22,6 +22,5 @@
 <div class="page_footer">
 {motd}
 </div>
-<script type="text/javascript">process_dates()</script>
 </body>
 </html>
--- a/mercurial/templates/gitweb/shortlog.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/gitweb/shortlog.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -40,7 +40,7 @@
 {changenav%navshort}
 </div>
 
-<script type="text/javascript">
+<script type="text/javascript"{if(nonce, ' nonce="{nonce}"')}>
     ajaxScrollInit(
             '{url|urlescape}shortlog/%next%{sessionvars%urlparameter}',
             '{nextentry%"{node}"}', <!-- NEXTHASH
--- a/mercurial/templates/map-cmdline.default	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/map-cmdline.default	Wed Jan 18 11:43:36 2017 -0500
@@ -1,9 +1,9 @@
 # Base templates. Due to name clashes with existing keywords, we have
 # to replace some keywords with 'lkeyword', for 'labelled keyword'
-changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{summary}\n'
 changeset_quiet = '{lnode}'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
-changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lfiles}{lfile_copies_switch}{description}\n'
+changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{ltroubles}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
 
 # File templates
 lfiles = '{if(files,
@@ -28,13 +28,15 @@
                                                % ' {name} ({source})'}\n"))}'
 
 # General templates
-cset = '{label("log.changeset changeset.{phase}",
+_trouble_label = 'trouble.{trouble}'
+_cset_labels = 'log.changeset changeset.{phase}{if(troubles, " changeset.troubled {troubles%_trouble_label}")}'
+cset = '{label("{_cset_labels}",
                "changeset:   {rev}:{node|short}")}\n'
 
 lphase = '{label("log.phase",
                  "phase:       {phase}")}\n'
 
-fullcset = '{label("log.changeset changeset.{phase}",
+fullcset = '{label("{_cset_labels}",
                    "changeset:   {rev}:{node}")}\n'
 
 parent = '{label("log.parent changeset.{phase}",
@@ -64,6 +66,9 @@
 ldate = '{label("log.date",
                 "date:        {date|date}")}\n'
 
+ltroubles = '{if(troubles, "{label('log.trouble',
+                                   'trouble:     {join(troubles, ", ")}')}\n")}'
+
 extra = '{label("ui.debug log.extra",
                 "extra:       {key}={value|stringescape}")}\n'
 
--- a/mercurial/templates/monoblue/fileannotate.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/monoblue/fileannotate.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -37,7 +37,7 @@
         <li class="current">annotate</li>
         <li><a href="{url|urlescape}diff/{symrev}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
         <li><a href="{url|urlescape}comparison/{symrev}/{file|urlescape}{sessionvars%urlparameter}">comparison</a></li>
-        <li><a href="{url|urlescape}raw-annotate/{symrev}/{file|urlescape}">raw</a></li>
+        <li><a href="{url|urlescape}raw-file/{symrev}/{file|urlescape}">raw</a></li>
     </ul>
 
     <h2 class="no-link no-border">{file|escape}@{node|short} (annotated)</h2>
--- a/mercurial/templates/monoblue/footer.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/monoblue/footer.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -1,4 +1,3 @@
-    <script type="text/javascript">process_dates()</script>
     <div class="page-footer">
         <p>Mercurial Repository: {repo|escape}</p>
         <ul class="rss-logo">
--- a/mercurial/templates/monoblue/graph.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/monoblue/graph.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -40,7 +40,7 @@
         <ul id="graphnodes"></ul>
     </div>
 
-    <script>
+    <script{if(nonce, ' nonce="{nonce}"')}>
     <!-- hide script content
 
     document.getElementById('noscript').style.display = 'none';
@@ -104,7 +104,7 @@
         | {changenav%navgraph}
     </div>
 
-    <script type="text/javascript">
+    <script type="text/javascript"{if(nonce, ' nonce="{nonce}"')}>
     ajaxScrollInit(
             '{url|urlescape}graph/{rev}?revcount=%next%&style={style}',
             {revcount}+60,
--- a/mercurial/templates/monoblue/index.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/monoblue/index.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -30,6 +30,5 @@
     </div>
 
 </div>
-<script type="text/javascript">process_dates()</script>
 </body>
 </html>
--- a/mercurial/templates/monoblue/shortlog.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/monoblue/shortlog.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -41,7 +41,7 @@
     {changenav%navshort}
     </div>
 
-    <script type="text/javascript">
+    <script type="text/javascript"{if(nonce, ' nonce="{nonce}"')}>
     ajaxScrollInit(
             '{url|urlescape}shortlog/%next%{sessionvars%urlparameter}',
             '{nextentry%"{node}"}', <!-- NEXTHASH
--- a/mercurial/templates/paper/fileannotate.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/paper/fileannotate.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -28,7 +28,7 @@
 <li><a href="{url|urlescape}comparison/{symrev}/{file|urlescape}{sessionvars%urlparameter}">comparison</a></li>
 <li class="active">annotate</li>
 <li><a href="{url|urlescape}log/{symrev}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li>
-<li><a href="{url|urlescape}raw-annotate/{symrev}/{file|urlescape}">raw</a></li>
+<li><a href="{url|urlescape}raw-file/{symrev}/{file|urlescape}">raw</a></li>
 </ul>
 <ul>
 <li><a href="{url|urlescape}help{sessionvars%urlparameter}">help</a></li>
--- a/mercurial/templates/paper/footer.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/paper/footer.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -1,4 +1,3 @@
-<script type="text/javascript">process_dates()</script>
 {motd}
 
 </body>
--- a/mercurial/templates/paper/graph.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/paper/graph.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -59,7 +59,7 @@
 <ul id="graphnodes"></ul>
 </div>
 
-<script type="text/javascript">
+<script type="text/javascript"{if(nonce, ' nonce="{nonce}"')}>
 <!-- hide script content
 
 var data = {jsdata|json};
@@ -121,7 +121,7 @@
 | rev {rev}: {changenav%navgraph}
 </div>
 
-<script type="text/javascript">
+<script type="text/javascript"{if(nonce, ' nonce="{nonce}"')}>
     ajaxScrollInit(
             '{url|urlescape}graph/{rev}?revcount=%next%&style={style}',
             {revcount}+60,
--- a/mercurial/templates/paper/shortlog.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/paper/shortlog.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -72,7 +72,7 @@
 | rev {rev}: {changenav%navshort}
 </div>
 
-<script type="text/javascript">
+<script type="text/javascript"{if(nonce, ' nonce="{nonce}"')}>
     ajaxScrollInit(
             '{url|urlescape}shortlog/%next%{sessionvars%urlparameter}',
             '{nextentry%"{node}"}', <!-- NEXTHASH
--- a/mercurial/templates/spartan/fileannotate.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/spartan/fileannotate.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -13,7 +13,7 @@
 <a href="{url|urlescape}file/{symrev}{path|urlescape}{sessionvars%urlparameter}">files</a>
 <a href="{url|urlescape}file/{symrev}/{file|urlescape}{sessionvars%urlparameter}">file</a>
 <a href="{url|urlescape}log/{symrev}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
-<a href="{url|urlescape}raw-annotate/{symrev}/{file|urlescape}">raw</a>
+<a href="{url|urlescape}raw-file/{symrev}/{file|urlescape}">raw</a>
 <a href="{url|urlescape}help{sessionvars%urlparameter}">help</a>
 </div>
 
--- a/mercurial/templates/spartan/footer.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/spartan/footer.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -1,4 +1,3 @@
-<script type="text/javascript">process_dates()</script>
 {motd}
 <div class="logo">
 <a href="{logourl}">
--- a/mercurial/templates/spartan/graph.tmpl	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/spartan/graph.tmpl	Wed Jan 18 11:43:36 2017 -0500
@@ -36,7 +36,7 @@
 <ul id="graphnodes"></ul>
 </div>
 
-<script type="text/javascript">
+<script type="text/javascript"{if(nonce, ' nonce="{nonce}"')}>
 <!-- hide script content
 
 var data = {jsdata|json};
--- a/mercurial/templates/static/mercurial.js	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/templates/static/mercurial.js	Wed Jan 18 11:43:36 2017 -0500
@@ -433,3 +433,7 @@
     window.addEventListener('resize', scrollHandler);
     scrollHandler();
 }
+
+document.addEventListener('DOMContentLoaded', function() {
+   process_dates();
+}, false);
--- a/mercurial/ui.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/ui.py	Wed Jan 18 11:43:36 2017 -0500
@@ -7,6 +7,7 @@
 
 from __future__ import absolute_import
 
+import contextlib
 import errno
 import getpass
 import inspect
@@ -22,9 +23,11 @@
 
 from . import (
     config,
+    encoding,
     error,
     formatter,
     progress,
+    pycompat,
     scmutil,
     util,
 )
@@ -93,6 +96,12 @@
 
 class ui(object):
     def __init__(self, src=None):
+        """Create a fresh new ui object if no src given
+
+        Use uimod.ui.load() to create a ui which knows global and user configs.
+        In most cases, you should use ui.copy() to create a copy of an existing
+        ui object.
+        """
         # _buffers: used for temporary capture of output
         self._buffers = []
         # 3-tuple describing how each buffer in the stack behaves.
@@ -129,18 +138,33 @@
 
             self.httppasswordmgrdb = src.httppasswordmgrdb
         else:
-            self.fout = sys.stdout
-            self.ferr = sys.stderr
-            self.fin = sys.stdin
+            self.fout = util.stdout
+            self.ferr = util.stderr
+            self.fin = util.stdin
 
             # shared read-only environment
-            self.environ = os.environ
-            # we always trust global config files
-            for f in scmutil.rcpath():
-                self.readconfig(f, trust=True)
+            self.environ = encoding.environ
 
             self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
 
+        allowed = self.configlist('experimental', 'exportableenviron')
+        if '*' in allowed:
+            self._exportableenviron = self.environ
+        else:
+            self._exportableenviron = {}
+            for k in allowed:
+                if k in self.environ:
+                    self._exportableenviron[k] = self.environ[k]
+
+    @classmethod
+    def load(cls):
+        """Create a ui and load global and user configs"""
+        u = cls()
+        # we always trust global config files
+        for f in scmutil.rcpath():
+            u.readconfig(f, trust=True)
+        return u
+
     def copy(self):
         return self.__class__(self)
 
@@ -175,7 +199,7 @@
     def readconfig(self, filename, root=None, trust=False,
                    sections=None, remap=None):
         try:
-            fp = open(filename)
+            fp = open(filename, u'rb')
         except IOError:
             if not sections: # ignore unless we were looking for something
                 return
@@ -225,7 +249,7 @@
         if section in (None, 'paths'):
             # expand vars and ~
             # translate paths relative to root (or home) into absolute paths
-            root = root or os.getcwd()
+            root = root or pycompat.getcwd()
             for c in self._tcfg, self._ucfg, self._ocfg:
                 for n, p in c.items('paths'):
                     # Ignore sub-options.
@@ -234,8 +258,9 @@
                     if not p:
                         continue
                     if '%%' in p:
+                        s = self.configsource('paths', n) or 'none'
                         self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
-                                  % (n, p, self.configsource('paths', n)))
+                                  % (n, p, s))
                         p = p.replace('%%', '%')
                     p = util.expandpath(p)
                     if not util.hasscheme(p) and not os.path.isabs(p):
@@ -276,7 +301,7 @@
         return untrusted and self._ucfg or self._tcfg
 
     def configsource(self, section, name, untrusted=False):
-        return self._data(untrusted).source(section, name) or 'none'
+        return self._data(untrusted).source(section, name)
 
     def config(self, section, name, default=None, untrusted=False):
         if isinstance(name, list):
@@ -520,7 +545,7 @@
         result = self.config(section, name, untrusted=untrusted)
         if result is None:
             result = default or []
-        if isinstance(result, basestring):
+        if isinstance(result, bytes):
             result = _configlist(result.lstrip(' ,\n'))
             if result is None:
                 result = default or []
@@ -569,9 +594,11 @@
         - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
         - True otherwise
         '''
-        if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
+        if ('HGPLAIN' not in encoding.environ and
+                'HGPLAINEXCEPT' not in encoding.environ):
             return False
-        exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
+        exceptions = encoding.environ.get('HGPLAINEXCEPT',
+                '').strip().split(',')
         if feature and exceptions:
             return feature not in exceptions
         return True
@@ -584,13 +611,13 @@
         If not found and ui.askusername is True, ask the user, else use
         ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
         """
-        user = os.environ.get("HGUSER")
+        user = encoding.environ.get("HGUSER")
         if user is None:
             user = self.config("ui", ["username", "user"])
             if user is not None:
                 user = os.path.expandvars(user)
         if user is None:
-            user = os.environ.get("EMAIL")
+            user = encoding.environ.get("EMAIL")
         if user is None and self.configbool("ui", "askusername"):
             user = self.prompt(_("enter a commit username:"), default=None)
         if user is None and not self.interactive():
@@ -733,7 +760,7 @@
         is curses, the interface for histedit is text and the interface for
         selecting chunk is crecord (the best curses interface available).
 
-        Consider the following exemple:
+        Consider the following example:
         ui.interface = curses
         ui.interface.histedit = text
 
@@ -814,12 +841,12 @@
     def termwidth(self):
         '''how wide is the terminal in columns?
         '''
-        if 'COLUMNS' in os.environ:
+        if 'COLUMNS' in encoding.environ:
             try:
-                return int(os.environ['COLUMNS'])
+                return int(encoding.environ['COLUMNS'])
             except ValueError:
                 pass
-        return util.termwidth()
+        return scmutil.termsize(self)[0]
 
     def formatted(self):
         '''should formatted output be used?
@@ -954,7 +981,10 @@
             # disable getpass() only if explicitly specified. it's still valid
             # to interact with tty even if fin is not a tty.
             if self.configbool('ui', 'nontty'):
-                return self.fin.readline().rstrip('\n')
+                l = self.fin.readline()
+                if not l:
+                    raise EOFError
+                return l.rstrip('\n')
             else:
                 return getpass.getpass('')
         except EOFError:
@@ -991,7 +1021,8 @@
             opts['label'] = opts.get('label', '') + ' ui.debug'
             self.write(*msg, **opts)
 
-    def edit(self, text, user, extra=None, editform=None, pending=None):
+    def edit(self, text, user, extra=None, editform=None, pending=None,
+             tmpdir=None):
         extra_defaults = {
             'prefix': 'editor',
             'suffix': '.txt',
@@ -999,8 +1030,13 @@
         if extra is not None:
             extra_defaults.update(extra)
         extra = extra_defaults
+
+        tdir = None
+        if self.configbool('experimental', 'editortmpinhg'):
+            tdir = tmpdir
         (fd, name) = tempfile.mkstemp(prefix='hg-' + extra['prefix'] + '-',
-                                      suffix=extra['suffix'], text=True)
+                                      suffix=extra['suffix'], text=True,
+                                      dir=tdir)
         try:
             f = os.fdopen(fd, "w")
             f.write(text)
@@ -1068,17 +1104,17 @@
 
     def geteditor(self):
         '''return editor to use'''
-        if sys.platform == 'plan9':
+        if pycompat.sysplatform == 'plan9':
             # vi is the MIPS instruction simulator on Plan 9. We
             # instead default to E to plumb commit messages to
             # avoid confusion.
             editor = 'E'
         else:
             editor = 'vi'
-        return (os.environ.get("HGEDITOR") or
+        return (encoding.environ.get("HGEDITOR") or
                 self.config("ui", "editor") or
-                os.environ.get("VISUAL") or
-                os.environ.get("EDITOR", editor))
+                encoding.environ.get("VISUAL") or
+                encoding.environ.get("EDITOR", editor))
 
     @util.propertycache
     def _progbar(self):
@@ -1190,6 +1226,31 @@
                 " update your code.)") % version
         self.develwarn(msg, stacklevel=2, config='deprec-warn')
 
+    def exportableenviron(self):
+        """The environment variables that are safe to export, e.g. through
+        hgweb.
+        """
+        return self._exportableenviron
+
+    @contextlib.contextmanager
+    def configoverride(self, overrides, source=""):
+        """Context manager for temporary config overrides
+        `overrides` must be a dict of the following structure:
+        {(section, name) : value}"""
+        backups = {}
+        try:
+            for (section, name), value in overrides.items():
+                backups[(section, name)] = self.backupconfig(section, name)
+                self.setconfig(section, name, value, source)
+            yield
+        finally:
+            for __, backup in backups.items():
+                self.restoreconfig(backup)
+            # just restoring ui.quiet config to the previous value is not enough
+            # as it does not update ui.quiet class member
+            if ('ui', 'quiet') in overrides:
+                self.fixconfig(section='ui')
+
 class paths(dict):
     """Represents a collection of paths and their configs.
 
--- a/mercurial/unionrepo.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/unionrepo.py	Wed Jan 18 11:43:36 2017 -0500
@@ -13,8 +13,6 @@
 
 from __future__ import absolute_import
 
-import os
-
 from .i18n import _
 from .node import nullid
 
@@ -27,6 +25,7 @@
     manifest,
     mdiff,
     pathutil,
+    pycompat,
     revlog,
     scmutil,
     util,
@@ -94,7 +93,7 @@
         return mdiff.textdiff(self.revision(self.node(rev1)),
                               self.revision(self.node(rev2)))
 
-    def revision(self, nodeorrev):
+    def revision(self, nodeorrev, raw=False):
         """return an uncompressed revision of a given node or revision
         number.
         """
@@ -152,18 +151,18 @@
     def baserevdiff(self, rev1, rev2):
         return changelog.changelog.revdiff(self, rev1, rev2)
 
-class unionmanifest(unionrevlog, manifest.manifest):
+class unionmanifest(unionrevlog, manifest.manifestrevlog):
     def __init__(self, opener, opener2, linkmapper):
-        manifest.manifest.__init__(self, opener)
-        manifest2 = manifest.manifest(opener2)
+        manifest.manifestrevlog.__init__(self, opener)
+        manifest2 = manifest.manifestrevlog(opener2)
         unionrevlog.__init__(self, opener, self.indexfile, manifest2,
                              linkmapper)
 
     def baserevision(self, nodeorrev):
-        return manifest.manifest.revision(self, nodeorrev)
+        return manifest.manifestrevlog.revision(self, nodeorrev)
 
     def baserevdiff(self, rev1, rev2):
-        return manifest.manifest.revdiff(self, rev1, rev2)
+        return manifest.manifestrevlog.revdiff(self, rev1, rev2)
 
 class unionfilelog(unionrevlog, filelog.filelog):
     def __init__(self, opener, path, opener2, linkmapper, repo):
@@ -229,7 +228,7 @@
         return unionpeer(self)
 
     def getcwd(self):
-        return os.getcwd() # always outside the repo
+        return pycompat.getcwd() # always outside the repo
 
 def instance(ui, path, create):
     if create:
@@ -237,13 +236,13 @@
     parentpath = ui.config("bundle", "mainreporoot", "")
     if not parentpath:
         # try to find the correct path to the working directory repo
-        parentpath = cmdutil.findrepo(os.getcwd())
+        parentpath = cmdutil.findrepo(pycompat.getcwd())
         if parentpath is None:
             parentpath = ''
     if parentpath:
         # Try to make the full path relative so we get a nice, short URL.
         # In particular, we don't want temp dir names in test outputs.
-        cwd = os.getcwd()
+        cwd = pycompat.getcwd()
         if parentpath == cwd:
             parentpath = ''
         else:
--- a/mercurial/url.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/url.py	Wed Jan 18 11:43:36 2017 -0500
@@ -15,6 +15,7 @@
 
 from .i18n import _
 from . import (
+    encoding,
     error,
     httpconnection as httpconnectionmod,
     keepalive,
@@ -79,7 +80,8 @@
 
 class proxyhandler(urlreq.proxyhandler):
     def __init__(self, ui):
-        proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
+        proxyurl = (ui.config("http_proxy", "host") or
+                        encoding.environ.get('http_proxy'))
         # XXX proxyauthinfo = None
 
         if proxyurl:
@@ -97,7 +99,7 @@
             no_list.extend([p.lower() for
                             p in ui.configlist("http_proxy", "no")])
             no_list.extend([p.strip().lower() for
-                            p in os.getenv("no_proxy", '').split(',')
+                            p in encoding.environ.get("no_proxy", '').split(',')
                             if p.strip()])
             # "http_proxy.always" config is for running tests on localhost
             if ui.configbool("http_proxy", "always"):
@@ -112,17 +114,6 @@
         else:
             proxies = {}
 
-        # urllib2 takes proxy values from the environment and those
-        # will take precedence if found. So, if there's a config entry
-        # defining a proxy, drop the environment ones
-        if ui.config("http_proxy", "host"):
-            for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
-                try:
-                    if env in os.environ:
-                        del os.environ[env]
-                except OSError:
-                    pass
-
         urlreq.proxyhandler.__init__(self, proxies)
         self.ui = ui
 
@@ -475,6 +466,11 @@
     # user agent they deem appropriate.
     agent = 'mercurial/proto-1.0 (Mercurial %s)' % util.version()
     opener.addheaders = [('User-agent', agent)]
+
+    # This header should only be needed by wire protocol requests. But it has
+    # been sent on all requests since forever. We keep sending it for backwards
+    # compatibility reasons. Modern versions of the wire protocol use
+    # X-HgProto-<N> for advertising client support.
     opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
     return opener
 
--- a/mercurial/util.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/util.py	Wed Jan 18 11:43:36 2017 -0500
@@ -24,10 +24,12 @@
 import hashlib
 import imp
 import os
+import platform as pyplatform
 import re as remod
 import shutil
 import signal
 import socket
+import stat
 import string
 import subprocess
 import sys
@@ -46,28 +48,24 @@
     pycompat,
 )
 
-for attr in (
-    'empty',
-    'httplib',
-    'httpserver',
-    'pickle',
-    'queue',
-    'urlerr',
-    'urlparse',
-    # we do import urlreq, but we do it outside the loop
-    #'urlreq',
-    'stringio',
-    'socketserver',
-    'xmlrpclib',
-):
-    a = pycompat.sysstr(attr)
-    globals()[a] = getattr(pycompat, a)
-
-# This line is to make pyflakes happy:
+empty = pycompat.empty
+httplib = pycompat.httplib
+httpserver = pycompat.httpserver
+pickle = pycompat.pickle
+queue = pycompat.queue
+socketserver = pycompat.socketserver
+stderr = pycompat.stderr
+stdin = pycompat.stdin
+stdout = pycompat.stdout
+stringio = pycompat.stringio
+urlerr = pycompat.urlerr
+urlparse = pycompat.urlparse
 urlreq = pycompat.urlreq
-
-if os.name == 'nt':
+xmlrpclib = pycompat.xmlrpclib
+
+if pycompat.osname == 'nt':
     from . import windows as platform
+    stdout = platform.winstdout(pycompat.stdout)
 else:
     from . import posix as platform
 
@@ -122,7 +120,6 @@
 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
 statisexec = platform.statisexec
 statislink = platform.statislink
-termwidth = platform.termwidth
 testpid = platform.testpid
 umask = platform.umask
 unlink = platform.unlink
@@ -141,6 +138,12 @@
 def safehasattr(thing, attr):
     return getattr(thing, attr, _notset) is not _notset
 
+def bitsfrom(container):
+    bits = 0
+    for bit in container:
+        bits |= bit
+    return bits
+
 DIGESTS = {
     'md5': hashlib.md5,
     'sha1': hashlib.sha1,
@@ -235,13 +238,17 @@
     buffer = buffer
 except NameError:
     if not pycompat.ispy3:
-        def buffer(sliceable, offset=0):
+        def buffer(sliceable, offset=0, length=None):
+            if length is not None:
+                return sliceable[offset:offset + length]
             return sliceable[offset:]
     else:
-        def buffer(sliceable, offset=0):
+        def buffer(sliceable, offset=0, length=None):
+            if length is not None:
+                return memoryview(sliceable)[offset:offset + length]
             return memoryview(sliceable)[offset:]
 
-closefds = os.name == 'posix'
+closefds = pycompat.osname == 'posix'
 
 _chunksize = 4096
 
@@ -798,7 +805,7 @@
         cmd = cmd.replace('INFILE', inname)
         cmd = cmd.replace('OUTFILE', outname)
         code = os.system(cmd)
-        if sys.platform == 'OpenVMS' and code & 1:
+        if pycompat.sysplatform == 'OpenVMS' and code & 1:
             code = 0
         if code:
             raise Abort(_("command '%s' failed: %s") %
@@ -919,7 +926,7 @@
         a.pop()
         b.pop()
     b.reverse()
-    return os.sep.join((['..'] * len(a)) + b) or '.'
+    return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
 
 def mainfrozen():
     """return True if we are a frozen executable.
@@ -934,10 +941,13 @@
 # the location of data files matching the source code
 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
     # executable version (py2exe) doesn't support __file__
-    datapath = os.path.dirname(sys.executable)
+    datapath = os.path.dirname(pycompat.sysexecutable)
 else:
     datapath = os.path.dirname(__file__)
 
+if not isinstance(datapath, bytes):
+    datapath = pycompat.fsencode(datapath)
+
 i18n.setdatapath(datapath)
 
 _hgexecutable = None
@@ -948,16 +958,16 @@
     Defaults to $HG or 'hg' in the search path.
     """
     if _hgexecutable is None:
-        hg = os.environ.get('HG')
+        hg = encoding.environ.get('HG')
         mainmod = sys.modules['__main__']
         if hg:
             _sethgexecutable(hg)
         elif mainfrozen():
             if getattr(sys, 'frozen', None) == 'macosx_app':
                 # Env variable set by py2app
-                _sethgexecutable(os.environ['EXECUTABLEPATH'])
+                _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
             else:
-                _sethgexecutable(sys.executable)
+                _sethgexecutable(pycompat.sysexecutable)
         elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
             _sethgexecutable(mainmod.__file__)
         else:
@@ -974,6 +984,21 @@
     fileno = getattr(f, 'fileno', None)
     return fileno and fileno() == sys.__stdout__.fileno()
 
+def shellenviron(environ=None):
+    """return environ with optional override, useful for shelling out"""
+    def py2shell(val):
+        'convert python object into string that is useful to shell'
+        if val is None or val is False:
+            return '0'
+        if val is True:
+            return '1'
+        return str(val)
+    env = dict(encoding.environ)
+    if environ:
+        env.update((k, py2shell(v)) for k, v in environ.iteritems())
+    env['HG'] = hgexecutable()
+    return env
+
 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
     '''enhanced shell command execution.
     run with environment maybe modified, maybe in different dir.
@@ -983,22 +1008,13 @@
 
     if out is specified, it is assumed to be a file-like object that has a
     write() method. stdout and stderr will be redirected to out.'''
-    if environ is None:
-        environ = {}
     try:
-        sys.stdout.flush()
+        stdout.flush()
     except Exception:
         pass
-    def py2shell(val):
-        'convert python object into string that is useful to shell'
-        if val is None or val is False:
-            return '0'
-        if val is True:
-            return '1'
-        return str(val)
     origcmd = cmd
     cmd = quotecommand(cmd)
-    if sys.platform == 'plan9' and (sys.version_info[0] == 2
+    if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
                                     and sys.version_info[1] < 7):
         # subprocess kludge to work around issues in half-baked Python
         # ports, notably bichued/python:
@@ -1006,9 +1022,7 @@
             os.chdir(cwd)
         rc = os.system(cmd)
     else:
-        env = dict(os.environ)
-        env.update((k, py2shell(v)) for k, v in environ.iteritems())
-        env['HG'] = hgexecutable()
+        env = shellenviron(environ)
         if out is None or _isstdout(out):
             rc = subprocess.call(cmd, shell=True, close_fds=closefds,
                                  env=env, cwd=cwd)
@@ -1020,7 +1034,7 @@
                 out.write(line)
             proc.wait()
             rc = proc.returncode
-        if sys.platform == 'OpenVMS' and rc & 1:
+        if pycompat.sysplatform == 'OpenVMS' and rc & 1:
             rc = 0
     if rc and onerr:
         errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
@@ -1175,7 +1189,7 @@
             return _("filename ends with '%s', which is not allowed "
                      "on Windows") % t
 
-if os.name == 'nt':
+if pycompat.osname == 'nt':
     checkosfilename = checkwinfilename
 else:
     checkosfilename = platform.checkosfilename
@@ -1303,9 +1317,9 @@
     def _makefspathcacheentry(dir):
         return dict((normcase(n), n) for n in os.listdir(dir))
 
-    seps = os.sep
-    if os.altsep:
-        seps = seps + os.altsep
+    seps = pycompat.ossep
+    if pycompat.osaltsep:
+        seps = seps + pycompat.osaltsep
     # Protect backslashes. This gets silly very quickly.
     seps.replace('\\','\\\\')
     pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
@@ -1370,7 +1384,8 @@
 
 def endswithsep(path):
     '''Check path ends with os.sep or os.altsep.'''
-    return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
+    return (path.endswith(pycompat.ossep)
+            or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
 
 def splitpath(path):
     '''Split path by os.sep.
@@ -1378,12 +1393,12 @@
     an alternative of simple "xxx.split(os.sep)".
     It is recommended to use os.path.normpath() before using this
     function if need.'''
-    return path.split(os.sep)
+    return path.split(pycompat.ossep)
 
 def gui():
     '''Are we running in a GUI?'''
-    if sys.platform == 'darwin':
-        if 'SSH_CONNECTION' in os.environ:
+    if pycompat.sysplatform == 'darwin':
+        if 'SSH_CONNECTION' in encoding.environ:
             # handle SSH access to a box where the user is logged in
             return False
         elif getattr(osutil, 'isgui', None):
@@ -1393,7 +1408,7 @@
             # pure build; use a safe default
             return True
     else:
-        return os.name == "nt" or os.environ.get("DISPLAY")
+        return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
 
 def mktempcopy(name, emptyok=False, createmode=None):
     """Create a temporary file with the same contents from name
@@ -1454,7 +1469,7 @@
     def __eq__(self, old):
         try:
             # if ambiguity between stat of new and old file is
-            # avoided, comparision of size, ctime and mtime is enough
+            # avoided, comparison of size, ctime and mtime is enough
             # to exactly detect change of a file regardless of platform
             return (self.stat.st_size == old.stat.st_size and
                     self.stat.st_ctime == old.stat.st_ctime and
@@ -1985,7 +2000,7 @@
         start, stop = lower(date), upper(date)
         return lambda x: x >= start and x <= stop
 
-def stringmatcher(pattern):
+def stringmatcher(pattern, casesensitive=True):
     """
     accepts a string, possibly starting with 're:' or 'literal:' prefix.
     returns the matcher name, pattern, and matcher function.
@@ -1995,6 +2010,9 @@
     >>> def test(pattern, *tests):
     ...     kind, pattern, matcher = stringmatcher(pattern)
     ...     return (kind, pattern, [bool(matcher(t)) for t in tests])
+    >>> def itest(pattern, *tests):
+    ...     kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
+    ...     return (kind, pattern, [bool(matcher(t)) for t in tests])
 
     exact matching (no prefix):
     >>> test('abcdefg', 'abc', 'def', 'abcdefg')
@@ -2011,18 +2029,35 @@
     unknown prefixes are ignored and treated as literals
     >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
     ('literal', 'foo:bar', [False, False, True])
+
+    case insensitive regex matches
+    >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
+    ('re', 'A.+b', [False, False, True])
+
+    case insensitive literal matches
+    >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
+    ('literal', 'ABCDEFG', [False, False, True])
     """
     if pattern.startswith('re:'):
         pattern = pattern[3:]
         try:
-            regex = remod.compile(pattern)
+            flags = 0
+            if not casesensitive:
+                flags = remod.I
+            regex = remod.compile(pattern, flags)
         except remod.error as e:
             raise error.ParseError(_('invalid regular expression: %s')
                                    % e)
         return 're', pattern, regex.search
     elif pattern.startswith('literal:'):
         pattern = pattern[8:]
-    return 'literal', pattern, pattern.__eq__
+
+    match = pattern.__eq__
+
+    if not casesensitive:
+        ipat = encoding.lower(pattern)
+        match = lambda s: ipat == encoding.lower(s)
+    return 'literal', pattern, match
 
 def shortuser(user):
     """Return a short representation of a user name or email address."""
@@ -2206,6 +2241,78 @@
                             subsequent_indent=hangindent)
     return wrapper.fill(line).encode(encoding.encoding)
 
+if (pyplatform.python_implementation() == 'CPython' and
+    sys.version_info < (3, 0)):
+    # There is an issue in CPython that some IO methods do not handle EINTR
+    # correctly. The following table shows what CPython version (and functions)
+    # are affected (buggy: has the EINTR bug, okay: otherwise):
+    #
+    #                | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
+    #   --------------------------------------------------
+    #    fp.__iter__ | buggy   | buggy           | okay
+    #    fp.read*    | buggy   | okay [1]        | okay
+    #
+    # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
+    #
+    # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
+    # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
+    #
+    # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
+    # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
+    # CPython 2, because CPython 2 maintains an internal readahead buffer for
+    # fp.__iter__ but not other fp.read* methods.
+    #
+    # On modern systems like Linux, the "read" syscall cannot be interrupted
+    # when reading "fast" files like on-disk files. So the EINTR issue only
+    # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
+    # files approximately as "fast" files and use the fast (unsafe) code path,
+    # to minimize the performance impact.
+    if sys.version_info >= (2, 7, 4):
+        # fp.readline deals with EINTR correctly, use it as a workaround.
+        def _safeiterfile(fp):
+            return iter(fp.readline, '')
+    else:
+        # fp.read* are broken too, manually deal with EINTR in a stupid way.
+        # note: this may block longer than necessary because of bufsize.
+        def _safeiterfile(fp, bufsize=4096):
+            fd = fp.fileno()
+            line = ''
+            while True:
+                try:
+                    buf = os.read(fd, bufsize)
+                except OSError as ex:
+                    # os.read only raises EINTR before any data is read
+                    if ex.errno == errno.EINTR:
+                        continue
+                    else:
+                        raise
+                line += buf
+                if '\n' in buf:
+                    splitted = line.splitlines(True)
+                    line = ''
+                    for l in splitted:
+                        if l[-1] == '\n':
+                            yield l
+                        else:
+                            line = l
+                if not buf:
+                    break
+            if line:
+                yield line
+
+    def iterfile(fp):
+        fastpath = True
+        if type(fp) is file:
+            fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
+        if fastpath:
+            return fp
+        else:
+            return _safeiterfile(fp)
+else:
+    # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
+    def iterfile(fp):
+        return fp
+
 def iterlines(iterator):
     for chunk in iterator:
         for line in chunk.splitlines():
@@ -2224,9 +2331,9 @@
     if mainfrozen():
         if getattr(sys, 'frozen', None) == 'macosx_app':
             # Env variable set by py2app
-            return [os.environ['EXECUTABLEPATH']]
+            return [encoding.environ['EXECUTABLEPATH']]
         else:
-            return [sys.executable]
+            return [pycompat.sysexecutable]
     return gethgcmd()
 
 def rundetached(args, condfn):
@@ -2396,7 +2503,7 @@
 
     _safechars = "!~*'()+"
     _safepchars = "/!~*'()+:\\"
-    _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
+    _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
 
     def __init__(self, path, parsequery=True, parsefragment=True):
         # We slowly chomp away at path until we have only the path left
@@ -2410,7 +2517,7 @@
             path, self.fragment = path.split('#', 1)
 
         # special case for Windows drive letters and UNC paths
-        if hasdriveletter(path) or path.startswith(r'\\'):
+        if hasdriveletter(path) or path.startswith('\\\\'):
             self.path = path
             return
 
@@ -2488,7 +2595,7 @@
                   'path', 'fragment'):
             v = getattr(self, a)
             if v is not None:
-                setattr(self, a, pycompat.urlparse.unquote(v))
+                setattr(self, a, pycompat.urlunquote(v))
 
     def __repr__(self):
         attrs = []
@@ -2687,9 +2794,9 @@
         finally:
             elapsed = time.time() - start
             _timenesting[0] -= indent
-            sys.stderr.write('%s%s: %s\n' %
-                             (' ' * _timenesting[0], func.__name__,
-                              timecount(elapsed)))
+            stderr.write('%s%s: %s\n' %
+                         (' ' * _timenesting[0], func.__name__,
+                          timecount(elapsed)))
     return wrapper
 
 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
@@ -2754,7 +2861,7 @@
             else:
                 yield line % (fnmax, fnln, func)
 
-def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
+def debugstacktrace(msg='stacktrace', skip=0, f=stderr, otherf=stdout):
     '''Writes a message to f (stderr) with a nicely formatted stacktrace.
     Skips the 'skip' last entries. By default it will flush stdout first.
     It can be used everywhere and intentionally does not require an ui object.
@@ -2812,32 +2919,6 @@
         yield path[:pos]
         pos = path.rfind('/', 0, pos)
 
-# compression utility
-
-class nocompress(object):
-    def compress(self, x):
-        return x
-    def flush(self):
-        return ""
-
-compressors = {
-    None: nocompress,
-    # lambda to prevent early import
-    'BZ': lambda: bz2.BZ2Compressor(),
-    'GZ': lambda: zlib.compressobj(),
-    }
-# also support the old form by courtesies
-compressors['UN'] = compressors[None]
-
-def _makedecompressor(decompcls):
-    def generator(f):
-        d = decompcls()
-        for chunk in filechunkiter(f):
-            yield d.decompress(chunk)
-    def func(fh):
-        return chunkbuffer(generator(fh))
-    return func
-
 class ctxmanager(object):
     '''A context manager for use in 'with' blocks to allow multiple
     contexts to be entered at once.  This is both safer and more
@@ -2898,20 +2979,567 @@
             raise exc_val
         return received and suppressed
 
-def _bz2():
-    d = bz2.BZ2Decompressor()
-    # Bzip2 stream start with BZ, but we stripped it.
-    # we put it back for good measure.
-    d.decompress('BZ')
-    return d
-
-decompressors = {None: lambda fh: fh,
-                 '_truncatedBZ': _makedecompressor(_bz2),
-                 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
-                 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
-                 }
-# also support the old form by courtesies
-decompressors['UN'] = decompressors[None]
+# compression code
+
+SERVERROLE = 'server'
+CLIENTROLE = 'client'
+
+compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
+                                               (u'name', u'serverpriority',
+                                                u'clientpriority'))
+
+class compressormanager(object):
+    """Holds registrations of various compression engines.
+
+    This class essentially abstracts the differences between compression
+    engines to allow new compression formats to be added easily, possibly from
+    extensions.
+
+    Compressors are registered against the global instance by calling its
+    ``register()`` method.
+    """
+    def __init__(self):
+        self._engines = {}
+        # Bundle spec human name to engine name.
+        self._bundlenames = {}
+        # Internal bundle identifier to engine name.
+        self._bundletypes = {}
+        # Revlog header to engine name.
+        self._revlogheaders = {}
+        # Wire proto identifier to engine name.
+        self._wiretypes = {}
+
+    def __getitem__(self, key):
+        return self._engines[key]
+
+    def __contains__(self, key):
+        return key in self._engines
+
+    def __iter__(self):
+        return iter(self._engines.keys())
+
+    def register(self, engine):
+        """Register a compression engine with the manager.
+
+        The argument must be a ``compressionengine`` instance.
+        """
+        if not isinstance(engine, compressionengine):
+            raise ValueError(_('argument must be a compressionengine'))
+
+        name = engine.name()
+
+        if name in self._engines:
+            raise error.Abort(_('compression engine %s already registered') %
+                              name)
+
+        bundleinfo = engine.bundletype()
+        if bundleinfo:
+            bundlename, bundletype = bundleinfo
+
+            if bundlename in self._bundlenames:
+                raise error.Abort(_('bundle name %s already registered') %
+                                  bundlename)
+            if bundletype in self._bundletypes:
+                raise error.Abort(_('bundle type %s already registered by %s') %
+                                  (bundletype, self._bundletypes[bundletype]))
+
+            # No external facing name declared.
+            if bundlename:
+                self._bundlenames[bundlename] = name
+
+            self._bundletypes[bundletype] = name
+
+        wiresupport = engine.wireprotosupport()
+        if wiresupport:
+            wiretype = wiresupport.name
+            if wiretype in self._wiretypes:
+                raise error.Abort(_('wire protocol compression %s already '
+                                    'registered by %s') %
+                                  (wiretype, self._wiretypes[wiretype]))
+
+            self._wiretypes[wiretype] = name
+
+        revlogheader = engine.revlogheader()
+        if revlogheader and revlogheader in self._revlogheaders:
+            raise error.Abort(_('revlog header %s already registered by %s') %
+                              (revlogheader, self._revlogheaders[revlogheader]))
+
+        if revlogheader:
+            self._revlogheaders[revlogheader] = name
+
+        self._engines[name] = engine
+
+    @property
+    def supportedbundlenames(self):
+        return set(self._bundlenames.keys())
+
+    @property
+    def supportedbundletypes(self):
+        return set(self._bundletypes.keys())
+
+    def forbundlename(self, bundlename):
+        """Obtain a compression engine registered to a bundle name.
+
+        Will raise KeyError if the bundle type isn't registered.
+
+        Will abort if the engine is known but not available.
+        """
+        engine = self._engines[self._bundlenames[bundlename]]
+        if not engine.available():
+            raise error.Abort(_('compression engine %s could not be loaded') %
+                              engine.name())
+        return engine
+
+    def forbundletype(self, bundletype):
+        """Obtain a compression engine registered to a bundle type.
+
+        Will raise KeyError if the bundle type isn't registered.
+
+        Will abort if the engine is known but not available.
+        """
+        engine = self._engines[self._bundletypes[bundletype]]
+        if not engine.available():
+            raise error.Abort(_('compression engine %s could not be loaded') %
+                              engine.name())
+        return engine
+
+    def supportedwireengines(self, role, onlyavailable=True):
+        """Obtain compression engines that support the wire protocol.
+
+        Returns a list of engines in prioritized order, most desired first.
+
+        If ``onlyavailable`` is set, filter out engines that can't be
+        loaded.
+        """
+        assert role in (SERVERROLE, CLIENTROLE)
+
+        attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
+
+        engines = [self._engines[e] for e in self._wiretypes.values()]
+        if onlyavailable:
+            engines = [e for e in engines if e.available()]
+
+        def getkey(e):
+            # Sort first by priority, highest first. In case of tie, sort
+            # alphabetically. This is arbitrary, but ensures output is
+            # stable.
+            w = e.wireprotosupport()
+            return -1 * getattr(w, attr), w.name
+
+        return list(sorted(engines, key=getkey))
+
+    def forwiretype(self, wiretype):
+        engine = self._engines[self._wiretypes[wiretype]]
+        if not engine.available():
+            raise error.Abort(_('compression engine %s could not be loaded') %
+                              engine.name())
+        return engine
+
+    def forrevlogheader(self, header):
+        """Obtain a compression engine registered to a revlog header.
+
+        Will raise KeyError if the revlog header value isn't registered.
+        """
+        return self._engines[self._revlogheaders[header]]
+
+compengines = compressormanager()
+
+class compressionengine(object):
+    """Base class for compression engines.
+
+    Compression engines must implement the interface defined by this class.
+    """
+    def name(self):
+        """Returns the name of the compression engine.
+
+        This is the key the engine is registered under.
+
+        This method must be implemented.
+        """
+        raise NotImplementedError()
+
+    def available(self):
+        """Whether the compression engine is available.
+
+        The intent of this method is to allow optional compression engines
+        that may not be available in all installations (such as engines relying
+        on C extensions that may not be present).
+        """
+        return True
+
+    def bundletype(self):
+        """Describes bundle identifiers for this engine.
+
+        If this compression engine isn't supported for bundles, returns None.
+
+        If this engine can be used for bundles, returns a 2-tuple of strings of
+        the user-facing "bundle spec" compression name and an internal
+        identifier used to denote the compression format within bundles. To
+        exclude the name from external usage, set the first element to ``None``.
+
+        If bundle compression is supported, the class must also implement
+        ``compressstream`` and `decompressorreader``.
+        """
+        return None
+
+    def wireprotosupport(self):
+        """Declare support for this compression format on the wire protocol.
+
+        If this compression engine isn't supported for compressing wire
+        protocol payloads, returns None.
+
+        Otherwise, returns ``compenginewireprotosupport`` with the following
+        fields:
+
+        * String format identifier
+        * Integer priority for the server
+        * Integer priority for the client
+
+        The integer priorities are used to order the advertisement of format
+        support by server and client. The highest integer is advertised
+        first. Integers with non-positive values aren't advertised.
+
+        The priority values are somewhat arbitrary and only used for default
+        ordering. The relative order can be changed via config options.
+
+        If wire protocol compression is supported, the class must also implement
+        ``compressstream`` and ``decompressorreader``.
+        """
+        return None
+
+    def revlogheader(self):
+        """Header added to revlog chunks that identifies this engine.
+
+        If this engine can be used to compress revlogs, this method should
+        return the bytes used to identify chunks compressed with this engine.
+        Else, the method should return ``None`` to indicate it does not
+        participate in revlog compression.
+        """
+        return None
+
+    def compressstream(self, it, opts=None):
+        """Compress an iterator of chunks.
+
+        The method receives an iterator (ideally a generator) of chunks of
+        bytes to be compressed. It returns an iterator (ideally a generator)
+        of bytes of chunks representing the compressed output.
+
+        Optionally accepts an argument defining how to perform compression.
+        Each engine treats this argument differently.
+        """
+        raise NotImplementedError()
+
+    def decompressorreader(self, fh):
+        """Perform decompression on a file object.
+
+        Argument is an object with a ``read(size)`` method that returns
+        compressed data. Return value is an object with a ``read(size)`` that
+        returns uncompressed data.
+        """
+        raise NotImplementedError()
+
+    def revlogcompressor(self, opts=None):
+        """Obtain an object that can be used to compress revlog entries.
+
+        The object has a ``compress(data)`` method that compresses binary
+        data. This method returns compressed binary data or ``None`` if
+        the data could not be compressed (too small, not compressible, etc).
+        The returned data should have a header uniquely identifying this
+        compression format so decompression can be routed to this engine.
+        This header should be identified by the ``revlogheader()`` return
+        value.
+
+        The object has a ``decompress(data)`` method that decompresses
+        data. The method will only be called if ``data`` begins with
+        ``revlogheader()``. The method should return the raw, uncompressed
+        data or raise a ``RevlogError``.
+
+        The object is reusable but is not thread safe.
+        """
+        raise NotImplementedError()
+
+class _zlibengine(compressionengine):
+    def name(self):
+        return 'zlib'
+
+    def bundletype(self):
+        return 'gzip', 'GZ'
+
+    def wireprotosupport(self):
+        return compewireprotosupport('zlib', 20, 20)
+
+    def revlogheader(self):
+        return 'x'
+
+    def compressstream(self, it, opts=None):
+        opts = opts or {}
+
+        z = zlib.compressobj(opts.get('level', -1))
+        for chunk in it:
+            data = z.compress(chunk)
+            # Not all calls to compress emit data. It is cheaper to inspect
+            # here than to feed empty chunks through generator.
+            if data:
+                yield data
+
+        yield z.flush()
+
+    def decompressorreader(self, fh):
+        def gen():
+            d = zlib.decompressobj()
+            for chunk in filechunkiter(fh):
+                while chunk:
+                    # Limit output size to limit memory.
+                    yield d.decompress(chunk, 2 ** 18)
+                    chunk = d.unconsumed_tail
+
+        return chunkbuffer(gen())
+
+    class zlibrevlogcompressor(object):
+        def compress(self, data):
+            insize = len(data)
+            # Caller handles empty input case.
+            assert insize > 0
+
+            if insize < 44:
+                return None
+
+            elif insize <= 1000000:
+                compressed = zlib.compress(data)
+                if len(compressed) < insize:
+                    return compressed
+                return None
+
+            # zlib makes an internal copy of the input buffer, doubling
+            # memory usage for large inputs. So do streaming compression
+            # on large inputs.
+            else:
+                z = zlib.compressobj()
+                parts = []
+                pos = 0
+                while pos < insize:
+                    pos2 = pos + 2**20
+                    parts.append(z.compress(data[pos:pos2]))
+                    pos = pos2
+                parts.append(z.flush())
+
+                if sum(map(len, parts)) < insize:
+                    return ''.join(parts)
+                return None
+
+        def decompress(self, data):
+            try:
+                return zlib.decompress(data)
+            except zlib.error as e:
+                raise error.RevlogError(_('revlog decompress error: %s') %
+                                        str(e))
+
+    def revlogcompressor(self, opts=None):
+        return self.zlibrevlogcompressor()
+
+compengines.register(_zlibengine())
+
+class _bz2engine(compressionengine):
+    def name(self):
+        return 'bz2'
+
+    def bundletype(self):
+        return 'bzip2', 'BZ'
+
+    # We declare a protocol name but don't advertise by default because
+    # it is slow.
+    def wireprotosupport(self):
+        return compewireprotosupport('bzip2', 0, 0)
+
+    def compressstream(self, it, opts=None):
+        opts = opts or {}
+        z = bz2.BZ2Compressor(opts.get('level', 9))
+        for chunk in it:
+            data = z.compress(chunk)
+            if data:
+                yield data
+
+        yield z.flush()
+
+    def decompressorreader(self, fh):
+        def gen():
+            d = bz2.BZ2Decompressor()
+            for chunk in filechunkiter(fh):
+                yield d.decompress(chunk)
+
+        return chunkbuffer(gen())
+
+compengines.register(_bz2engine())
+
+class _truncatedbz2engine(compressionengine):
+    def name(self):
+        return 'bz2truncated'
+
+    def bundletype(self):
+        return None, '_truncatedBZ'
+
+    # We don't implement compressstream because it is hackily handled elsewhere.
+
+    def decompressorreader(self, fh):
+        def gen():
+            # The input stream doesn't have the 'BZ' header. So add it back.
+            d = bz2.BZ2Decompressor()
+            d.decompress('BZ')
+            for chunk in filechunkiter(fh):
+                yield d.decompress(chunk)
+
+        return chunkbuffer(gen())
+
+compengines.register(_truncatedbz2engine())
+
+class _noopengine(compressionengine):
+    def name(self):
+        return 'none'
+
+    def bundletype(self):
+        return 'none', 'UN'
+
+    # Clients always support uncompressed payloads. Servers don't because
+    # unless you are on a fast network, uncompressed payloads can easily
+    # saturate your network pipe.
+    def wireprotosupport(self):
+        return compewireprotosupport('none', 0, 10)
+
+    # We don't implement revlogheader because it is handled specially
+    # in the revlog class.
+
+    def compressstream(self, it, opts=None):
+        return it
+
+    def decompressorreader(self, fh):
+        return fh
+
+    class nooprevlogcompressor(object):
+        def compress(self, data):
+            return None
+
+    def revlogcompressor(self, opts=None):
+        return self.nooprevlogcompressor()
+
+compengines.register(_noopengine())
+
+class _zstdengine(compressionengine):
+    def name(self):
+        return 'zstd'
+
+    @propertycache
+    def _module(self):
+        # Not all installs have the zstd module available. So defer importing
+        # until first access.
+        try:
+            from . import zstd
+            # Force delayed import.
+            zstd.__version__
+            return zstd
+        except ImportError:
+            return None
+
+    def available(self):
+        return bool(self._module)
+
+    def bundletype(self):
+        return 'zstd', 'ZS'
+
+    def wireprotosupport(self):
+        return compewireprotosupport('zstd', 50, 50)
+
+    def revlogheader(self):
+        return '\x28'
+
+    def compressstream(self, it, opts=None):
+        opts = opts or {}
+        # zstd level 3 is almost always significantly faster than zlib
+        # while providing no worse compression. It strikes a good balance
+        # between speed and compression.
+        level = opts.get('level', 3)
+
+        zstd = self._module
+        z = zstd.ZstdCompressor(level=level).compressobj()
+        for chunk in it:
+            data = z.compress(chunk)
+            if data:
+                yield data
+
+        yield z.flush()
+
+    def decompressorreader(self, fh):
+        zstd = self._module
+        dctx = zstd.ZstdDecompressor()
+        return chunkbuffer(dctx.read_from(fh))
+
+    class zstdrevlogcompressor(object):
+        def __init__(self, zstd, level=3):
+            # Writing the content size adds a few bytes to the output. However,
+            # it allows decompression to be more optimal since we can
+            # pre-allocate a buffer to hold the result.
+            self._cctx = zstd.ZstdCompressor(level=level,
+                                             write_content_size=True)
+            self._dctx = zstd.ZstdDecompressor()
+            self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
+            self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+
+        def compress(self, data):
+            insize = len(data)
+            # Caller handles empty input case.
+            assert insize > 0
+
+            if insize < 50:
+                return None
+
+            elif insize <= 1000000:
+                compressed = self._cctx.compress(data)
+                if len(compressed) < insize:
+                    return compressed
+                return None
+            else:
+                z = self._cctx.compressobj()
+                chunks = []
+                pos = 0
+                while pos < insize:
+                    pos2 = pos + self._compinsize
+                    chunk = z.compress(data[pos:pos2])
+                    if chunk:
+                        chunks.append(chunk)
+                    pos = pos2
+                chunks.append(z.flush())
+
+                if sum(map(len, chunks)) < insize:
+                    return ''.join(chunks)
+                return None
+
+        def decompress(self, data):
+            insize = len(data)
+
+            try:
+                # This was measured to be faster than other streaming
+                # decompressors.
+                dobj = self._dctx.decompressobj()
+                chunks = []
+                pos = 0
+                while pos < insize:
+                    pos2 = pos + self._decompinsize
+                    chunk = dobj.decompress(data[pos:pos2])
+                    if chunk:
+                        chunks.append(chunk)
+                    pos = pos2
+                # Frame should be exhausted, so no finish() API.
+
+                return ''.join(chunks)
+            except Exception as e:
+                raise error.RevlogError(_('revlog decompress error: %s') %
+                                        str(e))
+
+    def revlogcompressor(self, opts=None):
+        opts = opts or {}
+        return self.zstdrevlogcompressor(self._module,
+                                         level=opts.get('level', 3))
+
+compengines.register(_zstdengine())
 
 # convenient shortcut
 dst = debugstacktrace
--- a/mercurial/verify.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/verify.py	Wed Jan 18 11:43:36 2017 -0500
@@ -51,7 +51,7 @@
         self.errors = 0
         self.warnings = 0
         self.havecl = len(repo.changelog) > 0
-        self.havemf = len(repo.manifest) > 0
+        self.havemf = len(repo.manifestlog._revlog) > 0
         self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
         self.lrugetctx = util.lrucachefunc(repo.changectx)
         self.refersmf = False
@@ -201,7 +201,8 @@
                         progress=None):
         repo = self.repo
         ui = self.ui
-        mf = self.repo.manifest.dirlog(dir)
+        mfl = self.repo.manifestlog
+        mf = mfl._revlog.dirlog(dir)
 
         if not dir:
             self.ui.status(_("checking manifests\n"))
@@ -235,7 +236,8 @@
                 self.err(lr, _("%s not in changesets") % short(n), label)
 
             try:
-                for f, fn, fl in mf.readshallowdelta(n).iterentries():
+                mfdelta = mfl.get(dir, n).readdelta(shallow=True)
+                for f, fn, fl in mfdelta.iterentries():
                     if not f:
                         self.err(lr, _("entry without name in manifest"))
                     elif f == "/dev/null":  # ignore this in very old repos
@@ -423,7 +425,7 @@
 
             # cross-check
             if f in filenodes:
-                fns = [(lr, n) for n, lr in filenodes[f].iteritems()]
+                fns = [(v, k) for k, v in filenodes[f].iteritems()]
                 for lr, node in sorted(fns):
                     self.err(lr, _("manifest refers to unknown revision %s") %
                              short(node), f)
--- a/mercurial/win32.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/win32.py	Wed Jan 18 11:43:36 2017 -0500
@@ -14,6 +14,11 @@
 import random
 import subprocess
 
+from . import (
+    encoding,
+    pycompat,
+)
+
 _kernel32 = ctypes.windll.kernel32
 _advapi32 = ctypes.windll.advapi32
 _user32 = ctypes.windll.user32
@@ -347,23 +352,25 @@
     pid = _kernel32.GetCurrentProcessId()
     _user32.EnumWindows(_WNDENUMPROC(callback), pid)
 
-def termwidth():
+def termsize():
     # cmd.exe does not handle CR like a unix console, the CR is
     # counted in the line length. On 80 columns consoles, if 80
     # characters are written, the following CR won't apply on the
     # current line but on the new one. Keep room for it.
-    width = 79
+    width = 80 - 1
+    height = 25
     # Query stderr to avoid problems with redirections
     screenbuf = _kernel32.GetStdHandle(
                   _STD_ERROR_HANDLE) # don't close the handle returned
     if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE:
-        return width
+        return width, height
     csbi = _CONSOLE_SCREEN_BUFFER_INFO()
     if not _kernel32.GetConsoleScreenBufferInfo(
                         screenbuf, ctypes.byref(csbi)):
-        return width
-    width = csbi.srWindow.Right - csbi.srWindow.Left
-    return width
+        return width, height
+    width = csbi.srWindow.Right - csbi.srWindow.Left  # don't '+ 1'
+    height = csbi.srWindow.Bottom - csbi.srWindow.Top + 1
+    return width, height
 
 def _1stchild(pid):
     '''return the 1st found child of the given pid
@@ -422,8 +429,8 @@
     pi = _PROCESS_INFORMATION()
 
     env = ''
-    for k in os.environ:
-        env += "%s=%s\0" % (k, os.environ[k])
+    for k in encoding.environ:
+        env += "%s=%s\0" % (k, encoding.environ[k])
     if not env:
         env = '\0'
     env += '\0'
@@ -431,12 +438,12 @@
     args = subprocess.list2cmdline(args)
     # Not running the command in shell mode makes Python 2.6 hang when
     # writing to hgweb output socket.
-    comspec = os.environ.get("COMSPEC", "cmd.exe")
+    comspec = encoding.environ.get("COMSPEC", "cmd.exe")
     args = comspec + " /c " + args
 
     res = _kernel32.CreateProcessA(
         None, args, None, None, False, _CREATE_NO_WINDOW,
-        env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi))
+        env, pycompat.getcwd(), ctypes.byref(si), ctypes.byref(pi))
     if not res:
         raise ctypes.WinError()
 
--- a/mercurial/windows.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/windows.py	Wed Jan 18 11:43:36 2017 -0500
@@ -18,6 +18,7 @@
 from . import (
     encoding,
     osutil,
+    pycompat,
     win32,
 )
 
@@ -38,7 +39,6 @@
 setsignalhandler = win32.setsignalhandler
 spawndetached = win32.spawndetached
 split = os.path.split
-termwidth = win32.termwidth
 testpid = win32.testpid
 unlink = win32.unlink
 
@@ -172,14 +172,12 @@
             self.close()
             raise IOError(errno.EPIPE, 'Broken pipe')
 
-sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
-
 def _is_win_9x():
     '''return true if run on windows 95, 98 or me.'''
     try:
         return sys.getwindowsversion()[3] == 1
     except AttributeError:
-        return 'command' in os.environ.get('comspec', '')
+        return 'command' in encoding.environ.get('comspec', '')
 
 def openhardlinks():
     return not _is_win_9x()
@@ -217,7 +215,7 @@
         msvcrt.setmode(fno(), os.O_BINARY)
 
 def pconvert(path):
-    return path.replace(os.sep, '/')
+    return path.replace(pycompat.ossep, '/')
 
 def localpath(path):
     return path.replace('/', '\\')
@@ -305,8 +303,8 @@
     PATH isn't searched if command is an absolute or relative path.
     An extension from PATHEXT is found and added if not present.
     If command isn't found None is returned.'''
-    pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
-    pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
+    pathext = encoding.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
+    pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)]
     if os.path.splitext(command)[1].lower() in pathexts:
         pathexts = ['']
 
@@ -318,10 +316,10 @@
                 return executable
         return None
 
-    if os.sep in command:
+    if pycompat.ossep in command:
         return findexisting(command)
 
-    for path in os.environ.get('PATH', '').split(os.pathsep):
+    for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
         executable = findexisting(os.path.join(path, command))
         if executable is not None:
             return executable
--- a/mercurial/wireproto.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/wireproto.py	Wed Jan 18 11:43:36 2017 -0500
@@ -10,7 +10,6 @@
 import hashlib
 import itertools
 import os
-import sys
 import tempfile
 
 from .i18n import _
@@ -78,21 +77,6 @@
     #    """
     #    raise NotImplementedError()
 
-    def groupchunks(self, fh):
-        """Generator of chunks to send to the client.
-
-        Some protocols may have compressed the contents.
-        """
-        raise NotImplementedError()
-
-    def compresschunks(self, chunks):
-        """Generator of possible compressed chunks to send to the client.
-
-        This is like ``groupchunks()`` except it accepts a generator as
-        its argument.
-        """
-        raise NotImplementedError()
-
 class remotebatch(peer.batcher):
     '''batches the queued calls; uses as few roundtrips as possible'''
     def __init__(self, remote):
@@ -529,10 +513,19 @@
     """wireproto reply: binary stream
 
     The call was successful and the result is a stream.
-    Iterate on the `self.gen` attribute to retrieve chunks.
+
+    Accepts either a generator or an object with a ``read(size)`` method.
+
+    ``v1compressible`` indicates whether this data can be compressed to
+    "version 1" clients (technically: HTTP peers using
+    application/mercurial-0.1 media type). This flag should NOT be used on
+    new commands because new clients should support a more modern compression
+    mechanism.
     """
-    def __init__(self, gen):
+    def __init__(self, gen=None, reader=None, v1compressible=False):
         self.gen = gen
+        self.reader = reader
+        self.v1compressible = v1compressible
 
 class pushres(object):
     """wireproto reply: success with simple integer return
@@ -581,8 +574,8 @@
             opts[k] = others[k]
             del others[k]
     if others:
-        sys.stderr.write("warning: %s ignored unexpected arguments %s\n"
-                         % (cmd, ",".join(others)))
+        util.stderr.write("warning: %s ignored unexpected arguments %s\n"
+                          % (cmd, ",".join(others)))
     return opts
 
 def bundle1allowed(repo, action):
@@ -614,6 +607,55 @@
 
     return ui.configbool('server', 'bundle1', True)
 
+def supportedcompengines(ui, proto, role):
+    """Obtain the list of supported compression engines for a request."""
+    assert role in (util.CLIENTROLE, util.SERVERROLE)
+
+    compengines = util.compengines.supportedwireengines(role)
+
+    # Allow config to override default list and ordering.
+    if role == util.SERVERROLE:
+        configengines = ui.configlist('server', 'compressionengines')
+        config = 'server.compressionengines'
+    else:
+        # This is currently implemented mainly to facilitate testing. In most
+        # cases, the server should be in charge of choosing a compression engine
+        # because a server has the most to lose from a sub-optimal choice. (e.g.
+        # CPU DoS due to an expensive engine or a network DoS due to poor
+        # compression ratio).
+        configengines = ui.configlist('experimental',
+                                      'clientcompressionengines')
+        config = 'experimental.clientcompressionengines'
+
+    # No explicit config. Filter out the ones that aren't supposed to be
+    # advertised and return default ordering.
+    if not configengines:
+        attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
+        return [e for e in compengines
+                if getattr(e.wireprotosupport(), attr) > 0]
+
+    # If compression engines are listed in the config, assume there is a good
+    # reason for it (like server operators wanting to achieve specific
+    # performance characteristics). So fail fast if the config references
+    # unusable compression engines.
+    validnames = set(e.name() for e in compengines)
+    invalidnames = set(e for e in configengines if e not in validnames)
+    if invalidnames:
+        raise error.Abort(_('invalid compression engine defined in %s: %s') %
+                          (config, ', '.join(sorted(invalidnames))))
+
+    compengines = [e for e in compengines if e.name() in configengines]
+    compengines = sorted(compengines,
+                         key=lambda e: configengines.index(e.name()))
+
+    if not compengines:
+        raise error.Abort(_('%s config option does not specify any known '
+                            'compression engines') % config,
+                          hint=_('usable compression engines: %s') %
+                          ', '.sorted(validnames))
+
+    return compengines
+
 # list of commands
 commands = {}
 
@@ -723,10 +765,23 @@
         capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
         caps.append('bundle2=' + urlreq.quote(capsblob))
     caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
-    caps.append(
-        'httpheader=%d' % repo.ui.configint('server', 'maxhttpheaderlen', 1024))
-    if repo.ui.configbool('experimental', 'httppostargs', False):
-        caps.append('httppostargs')
+
+    if proto.name == 'http':
+        caps.append('httpheader=%d' %
+                    repo.ui.configint('server', 'maxhttpheaderlen', 1024))
+        if repo.ui.configbool('experimental', 'httppostargs', False):
+            caps.append('httppostargs')
+
+        # FUTURE advertise 0.2rx once support is implemented
+        # FUTURE advertise minrx and mintx after consulting config option
+        caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
+
+        compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
+        if compengines:
+            comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
+                                 for e in compengines)
+            caps.append('compression=%s' % comptypes)
+
     return caps
 
 # If you are writing an extension and consider wrapping this function. Wrap
@@ -739,14 +794,14 @@
 def changegroup(repo, proto, roots):
     nodes = decodelist(roots)
     cg = changegroupmod.changegroup(repo, nodes, 'serve')
-    return streamres(proto.groupchunks(cg))
+    return streamres(reader=cg, v1compressible=True)
 
 @wireprotocommand('changegroupsubset', 'bases heads')
 def changegroupsubset(repo, proto, bases, heads):
     bases = decodelist(bases)
     heads = decodelist(heads)
     cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
-    return streamres(proto.groupchunks(cg))
+    return streamres(reader=cg, v1compressible=True)
 
 @wireprotocommand('debugwireargs', 'one two *')
 def debugwireargs(repo, proto, one, two, others):
@@ -781,7 +836,7 @@
             return ooberror(bundle2required)
 
     chunks = exchange.getbundlechunks(repo, 'serve', **opts)
-    return streamres(proto.compresschunks(chunks))
+    return streamres(gen=chunks, v1compressible=True)
 
 @wireprotocommand('heads')
 def heads(repo, proto):
@@ -870,7 +925,7 @@
         # LockError may be raised before the first result is yielded. Don't
         # emit output until we're sure we got the lock successfully.
         it = streamclone.generatev1wireproto(repo)
-        return streamres(getstream(it))
+        return streamres(gen=getstream(it))
     except error.LockError:
         return '2\n'
 
@@ -900,7 +955,7 @@
             if util.safehasattr(r, 'addpart'):
                 # The return looks streamable, we are in the bundle2 case and
                 # should return a stream.
-                return streamres(r.getchunks())
+                return streamres(gen=r.getchunks())
             return pushres(r)
 
         finally:
@@ -913,11 +968,11 @@
             try:
                 raise
             except error.Abort:
-                # The old code we moved used sys.stderr directly.
+                # The old code we moved used util.stderr directly.
                 # We did not change it to minimise code change.
                 # This need to be moved to something proper.
                 # Feel free to do it.
-                sys.stderr.write("abort: %s\n" % exc)
+                util.stderr.write("abort: %s\n" % exc)
                 return pushres(0)
             except error.PushRaced:
                 return pusherr(str(exc))
@@ -962,4 +1017,4 @@
                                                manargs, advargs))
         except error.PushRaced as exc:
             bundler.newpart('error:pushraced', [('message', str(exc))])
-        return streamres(bundler.getchunks())
+        return streamres(gen=bundler.getchunks())
--- a/mercurial/worker.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/mercurial/worker.py	Wed Jan 18 11:43:36 2017 -0500
@@ -11,10 +11,15 @@
 import os
 import signal
 import sys
-import threading
 
 from .i18n import _
-from . import error
+from . import (
+    encoding,
+    error,
+    pycompat,
+    scmutil,
+    util,
+)
 
 def countcpus():
     '''try to count the number of CPUs on the system'''
@@ -29,7 +34,7 @@
 
     # windows
     try:
-        n = int(os.environ['NUMBER_OF_PROCESSORS'])
+        n = int(encoding.environ['NUMBER_OF_PROCESSORS'])
         if n > 0:
             return n
     except (KeyError, ValueError):
@@ -48,7 +53,7 @@
             raise error.Abort(_('number of cpus must be an integer'))
     return min(max(countcpus(), 4), 32)
 
-if os.name == 'posix':
+if pycompat.osname == 'posix':
     _startupcost = 0.01
 else:
     _startupcost = 1e30
@@ -85,25 +90,12 @@
     workers = _numworkers(ui)
     oldhandler = signal.getsignal(signal.SIGINT)
     signal.signal(signal.SIGINT, signal.SIG_IGN)
-    pids, problem = [], [0]
-    for pargs in partition(args, workers):
-        pid = os.fork()
-        if pid == 0:
-            signal.signal(signal.SIGINT, oldhandler)
-            try:
-                os.close(rfd)
-                for i, item in func(*(staticargs + (pargs,))):
-                    os.write(wfd, '%d %s\n' % (i, item))
-                os._exit(0)
-            except KeyboardInterrupt:
-                os._exit(255)
-                # other exceptions are allowed to propagate, we rely
-                # on lock.py's pid checks to avoid release callbacks
-        pids.append(pid)
-    pids.reverse()
-    os.close(wfd)
-    fp = os.fdopen(rfd, 'rb', 0)
+    pids, problem = set(), [0]
     def killworkers():
+        # unregister SIGCHLD handler as all children will be killed. This
+        # function shouldn't be interrupted by another SIGCHLD; otherwise pids
+        # could be updated while iterating, which would cause inconsistency.
+        signal.signal(signal.SIGCHLD, oldchldhandler)
         # if one worker bails, there's no good reason to wait for the rest
         for p in pids:
             try:
@@ -111,24 +103,72 @@
             except OSError as err:
                 if err.errno != errno.ESRCH:
                     raise
-    def waitforworkers():
-        for _pid in pids:
-            st = _exitstatus(os.wait()[1])
+    def waitforworkers(blocking=True):
+        for pid in pids.copy():
+            p = st = 0
+            while True:
+                try:
+                    p, st = os.waitpid(pid, (0 if blocking else os.WNOHANG))
+                    break
+                except OSError as e:
+                    if e.errno == errno.EINTR:
+                        continue
+                    elif e.errno == errno.ECHILD:
+                        # child would already be reaped, but pids yet been
+                        # updated (maybe interrupted just after waitpid)
+                        pids.discard(pid)
+                        break
+                    else:
+                        raise
+            if p:
+                pids.discard(p)
+                st = _exitstatus(st)
             if st and not problem[0]:
                 problem[0] = st
-                killworkers()
-    t = threading.Thread(target=waitforworkers)
-    t.start()
+    def sigchldhandler(signum, frame):
+        waitforworkers(blocking=False)
+        if problem[0]:
+            killworkers()
+    oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
+    for pargs in partition(args, workers):
+        pid = os.fork()
+        if pid == 0:
+            signal.signal(signal.SIGINT, oldhandler)
+            signal.signal(signal.SIGCHLD, oldchldhandler)
+
+            def workerfunc():
+                os.close(rfd)
+                for i, item in func(*(staticargs + (pargs,))):
+                    os.write(wfd, '%d %s\n' % (i, item))
+
+            # make sure we use os._exit in all code paths. otherwise the worker
+            # may do some clean-ups which could cause surprises like deadlock.
+            # see sshpeer.cleanup for example.
+            try:
+                scmutil.callcatch(ui, workerfunc)
+            except KeyboardInterrupt:
+                os._exit(255)
+            except: # never return, therefore no re-raises
+                try:
+                    ui.traceback()
+                finally:
+                    os._exit(255)
+            else:
+                os._exit(0)
+        pids.add(pid)
+    os.close(wfd)
+    fp = os.fdopen(rfd, 'rb', 0)
     def cleanup():
         signal.signal(signal.SIGINT, oldhandler)
-        t.join()
+        waitforworkers()
+        signal.signal(signal.SIGCHLD, oldchldhandler)
         status = problem[0]
         if status:
             if status < 0:
                 os.kill(os.getpid(), -status)
             sys.exit(status)
     try:
-        for line in fp:
+        for line in util.iterfile(fp):
             l = line.split(' ', 1)
             yield int(l[0]), l[1][:-1]
     except: # re-raises
@@ -147,7 +187,7 @@
     elif os.WIFSIGNALED(code):
         return -os.WTERMSIG(code)
 
-if os.name != 'nt':
+if pycompat.osname != 'nt':
     _platformworker = _posixworker
     _exitstatus = _posixexitstatus
 
--- a/setup.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/setup.py	Wed Jan 18 11:43:36 2017 -0500
@@ -67,6 +67,7 @@
     from setuptools import setup
 else:
     from distutils.core import setup
+from distutils.ccompiler import new_compiler
 from distutils.core import Command, Extension
 from distutils.dist import Distribution
 from distutils.command.build import build
@@ -167,7 +168,8 @@
 # to not use any hgrc files and do no localization.
 env = {'HGMODULEPOLICY': 'py',
        'HGRCPATH': '',
-       'LANGUAGE': 'C'}
+       'LANGUAGE': 'C',
+       'PATH': ''} # make pypi modules that use os.environ['PATH'] happy
 if 'LD_LIBRARY_PATH' in os.environ:
     env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
 if 'SystemRoot' in os.environ:
@@ -275,7 +277,30 @@
         # too late for some cases
         return not self.pure and Distribution.has_ext_modules(self)
 
+# This is ugly as a one-liner. So use a variable.
+buildextnegops = dict(getattr(build_ext, 'negative_options', {}))
+buildextnegops['no-zstd'] = 'zstd'
+
 class hgbuildext(build_ext):
+    user_options = build_ext.user_options + [
+        ('zstd', None, 'compile zstd bindings [default]'),
+        ('no-zstd', None, 'do not compile zstd bindings'),
+    ]
+
+    boolean_options = build_ext.boolean_options + ['zstd']
+    negative_opt = buildextnegops
+
+    def initialize_options(self):
+        self.zstd = True
+        return build_ext.initialize_options(self)
+
+    def build_extensions(self):
+        # Filter out zstd if disabled via argument.
+        if not self.zstd:
+            self.extensions = [e for e in self.extensions
+                               if e.name != 'mercurial.zstd']
+
+        return build_ext.build_extensions(self)
 
     def build_extension(self, ext):
         try:
@@ -318,14 +343,16 @@
         if self.distribution.pure:
             self.distribution.ext_modules = []
         elif self.distribution.cffi:
-            import setup_mpatch_cffi
-            import setup_bdiff_cffi
-            exts = [setup_mpatch_cffi.ffi.distutils_extension(),
-                    setup_bdiff_cffi.ffi.distutils_extension()]
+            from mercurial.cffi import (
+                bdiff,
+                mpatch,
+            )
+            exts = [mpatch.ffi.distutils_extension(),
+                    bdiff.ffi.distutils_extension()]
             # cffi modules go here
             if sys.platform == 'darwin':
-                import setup_osutil_cffi
-                exts.append(setup_osutil_cffi.ffi.distutils_extension())
+                from mercurial.cffi import osutil
+                exts.append(osutil.ffi.distutils_extension())
             self.distribution.ext_modules = exts
         else:
             h = os.path.join(get_python_inc(), 'Python.h')
@@ -551,8 +578,14 @@
                   'mercurial/compat.h',
                   'mercurial/util.h']
 
+osutil_cflags = []
 osutil_ldflags = []
 
+# platform specific macros: HAVE_SETPROCTITLE
+for plat, func in [(re.compile('freebsd'), 'setproctitle')]:
+    if plat.search(sys.platform) and hasfunction(new_compiler(), func):
+        osutil_cflags.append('-DHAVE_%s' % func.upper())
+
 if sys.platform == 'darwin':
     osutil_ldflags += ['-framework', 'ApplicationServices']
 
@@ -573,12 +606,17 @@
                                     'mercurial/pathencode.c'],
               depends=common_depends),
     Extension('mercurial.osutil', ['mercurial/osutil.c'],
+              extra_compile_args=osutil_cflags,
               extra_link_args=osutil_ldflags,
               depends=common_depends),
     Extension('hgext.fsmonitor.pywatchman.bser',
               ['hgext/fsmonitor/pywatchman/bser.c']),
     ]
 
+sys.path.insert(0, 'contrib/python-zstandard')
+import setup_zstd
+extmodules.append(setup_zstd.get_c_extension(name='mercurial.zstd'))
+
 try:
     from distutils import cygwinccompiler
 
--- a/setup_bdiff_cffi.py	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-from __future__ import absolute_import
-
-import cffi
-import os
-
-ffi = cffi.FFI()
-ffi.set_source("_bdiff_cffi",
-    open(os.path.join(os.path.join(os.path.dirname(__file__), 'mercurial'),
-        'bdiff.c')).read(), include_dirs=['mercurial'])
-ffi.cdef("""
-struct bdiff_line {
-    int hash, n, e;
-    ssize_t len;
-    const char *l;
-};
-
-struct bdiff_hunk;
-struct bdiff_hunk {
-    int a1, a2, b1, b2;
-    struct bdiff_hunk *next;
-};
-
-int bdiff_splitlines(const char *a, ssize_t len, struct bdiff_line **lr);
-int bdiff_diff(struct bdiff_line *a, int an, struct bdiff_line *b, int bn,
-    struct bdiff_hunk *base);
-void bdiff_freehunks(struct bdiff_hunk *l);
-void free(void*);
-""")
-
-if __name__ == '__main__':
-    ffi.compile()
--- a/setup_mpatch_cffi.py	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-from __future__ import absolute_import
-
-import cffi
-import os
-
-ffi = cffi.FFI()
-mpatch_c = os.path.join(os.path.join(os.path.dirname(__file__), 'mercurial',
-                                     'mpatch.c'))
-ffi.set_source("_mpatch_cffi", open(mpatch_c).read(),
-               include_dirs=["mercurial"])
-ffi.cdef("""
-
-struct mpatch_frag {
-       int start, end, len;
-       const char *data;
-};
-
-struct mpatch_flist {
-       struct mpatch_frag *base, *head, *tail;
-};
-
-extern "Python" struct mpatch_flist* cffi_get_next_item(void*, ssize_t);
-
-int mpatch_decode(const char *bin, ssize_t len, struct mpatch_flist** res);
-ssize_t mpatch_calcsize(size_t len, struct mpatch_flist *l);
-void mpatch_lfree(struct mpatch_flist *a);
-static int mpatch_apply(char *buf, const char *orig, size_t len,
-                        struct mpatch_flist *l);
-struct mpatch_flist *mpatch_fold(void *bins,
-                       struct mpatch_flist* (*get_next_item)(void*, ssize_t),
-                       ssize_t start, ssize_t end);
-""")
-
-if __name__ == '__main__':
-    ffi.compile()
--- a/setup_osutil_cffi.py	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-from __future__ import absolute_import
-
-import cffi
-
-ffi = cffi.FFI()
-ffi.set_source("_osutil_cffi", """
-#include <sys/attr.h>
-#include <sys/vnode.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <time.h>
-
-typedef struct val_attrs {
-    uint32_t          length;
-    attribute_set_t   returned;
-    attrreference_t   name_info;
-    fsobj_type_t      obj_type;
-    struct timespec   mtime;
-    uint32_t          accessmask;
-    off_t             datalength;
-} __attribute__((aligned(4), packed)) val_attrs_t;
-""", include_dirs=['mercurial'])
-ffi.cdef('''
-
-typedef uint32_t attrgroup_t;
-
-typedef struct attrlist {
-    uint16_t     bitmapcount; /* number of attr. bit sets in list */
-    uint16_t   reserved;    /* (to maintain 4-byte alignment) */
-    attrgroup_t commonattr;  /* common attribute group */
-    attrgroup_t volattr;     /* volume attribute group */
-    attrgroup_t dirattr;     /* directory attribute group */
-    attrgroup_t fileattr;    /* file attribute group */
-    attrgroup_t forkattr;    /* fork attribute group */
-    ...;
-};
-
-typedef struct attribute_set {
-    ...;
-} attribute_set_t;
-
-typedef struct attrreference {
-    int attr_dataoffset;
-    int attr_length;
-    ...;
-} attrreference_t;
-
-typedef int ... off_t;
-
-typedef struct val_attrs {
-    uint32_t          length;
-    attribute_set_t   returned;
-    attrreference_t   name_info;
-    uint32_t          obj_type;
-    struct timespec   mtime;
-    uint32_t          accessmask;
-    off_t             datalength;
-    ...;
-} val_attrs_t;
-
-/* the exact layout of the above struct will be figured out during build time */
-
-typedef int ... time_t;
-
-typedef struct timespec {
-    time_t tv_sec;
-    ...;
-};
-
-int getattrlist(const char* path, struct attrlist * attrList, void * attrBuf,
-                size_t attrBufSize, unsigned int options);
-
-int getattrlistbulk(int dirfd, struct attrlist * attrList, void * attrBuf,
-                    size_t attrBufSize, uint64_t options);
-
-#define ATTR_BIT_MAP_COUNT ...
-#define ATTR_CMN_NAME ...
-#define ATTR_CMN_OBJTYPE ...
-#define ATTR_CMN_MODTIME ...
-#define ATTR_CMN_ACCESSMASK ...
-#define ATTR_CMN_ERROR ...
-#define ATTR_CMN_RETURNED_ATTRS ...
-#define ATTR_FILE_DATALENGTH ...
-
-#define VREG ...
-#define VDIR ...
-#define VLNK ...
-#define VBLK ...
-#define VCHR ...
-#define VFIFO ...
-#define VSOCK ...
-
-#define S_IFMT ...
-
-int open(const char *path, int oflag, int perm);
-int close(int);
-
-#define O_RDONLY ...
-''')
-
-if __name__ == '__main__':
-    ffi.compile()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/drawdag.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,311 @@
+# drawdag.py - convert ASCII revision DAG to actual changesets
+#
+# Copyright 2016 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""
+create changesets from an ASCII graph for testing purpose.
+
+For example, given the following input::
+
+    c d
+    |/
+    b
+    |
+    a
+
+4 changesets and 4 local tags will be created.
+`hg log -G -T "{rev} {desc} (tag: {tags})"` will output::
+
+    o  3 d (tag: d tip)
+    |
+    | o  2 c (tag: c)
+    |/
+    o  1 b (tag: b)
+    |
+    o  0 a (tag: a)
+
+For root nodes (nodes without parents) in the graph, they can be revsets
+pointing to existing nodes.  The ASCII graph could also have disconnected
+components with same names referring to the same changeset.
+
+Therefore, given the repo having the 4 changesets (and tags) above, with the
+following ASCII graph as input::
+
+    foo    bar       bar  foo
+     |     /          |    |
+    ancestor(c,d)     a   baz
+
+The result (`hg log -G -T "{desc}"`) will look like::
+
+    o    foo
+    |\
+    +---o  bar
+    | | |
+    | o |  baz
+    |  /
+    +---o  d
+    | |
+    +---o  c
+    | |
+    o |  b
+    |/
+    o  a
+
+Note that if you take the above `hg log` output directly as input. It will work
+as expected - the result would be an isomorphic graph::
+
+    o    foo
+    |\
+    | | o  d
+    | |/
+    | | o  c
+    | |/
+    | | o  bar
+    | |/|
+    | o |  b
+    | |/
+    o /  baz
+     /
+    o  a
+
+This is because 'o' is specially handled in the input: instead of using 'o' as
+the node name, the word to the right will be used.
+"""
+from __future__ import absolute_import, print_function
+
+import collections
+import itertools
+
+from mercurial.i18n import _
+from mercurial import (
+    cmdutil,
+    context,
+    error,
+    node,
+    scmutil,
+)
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+
+_pipechars = '\\/+-|'
+_nonpipechars = ''.join(chr(i) for i in xrange(33, 127)
+                        if chr(i) not in _pipechars)
+
+def _isname(ch):
+    """char -> bool. return True if ch looks like part of a name, False
+    otherwise"""
+    return ch in _nonpipechars
+
+def _parseasciigraph(text):
+    """str -> {str : [str]}. convert the ASCII graph to edges"""
+    lines = text.splitlines()
+    edges = collections.defaultdict(list)  # {node: []}
+
+    def get(y, x):
+        """(int, int) -> char. give a coordinate, return the char. return a
+        space for anything out of range"""
+        if x < 0 or y < 0:
+            return ' '
+        try:
+            return lines[y][x]
+        except IndexError:
+            return ' '
+
+    def getname(y, x):
+        """(int, int) -> str. like get(y, x) but concatenate left and right
+        parts. if name is an 'o', try to replace it to the right"""
+        result = ''
+        for i in itertools.count(0):
+            ch = get(y, x - i)
+            if not _isname(ch):
+                break
+            result = ch + result
+        for i in itertools.count(1):
+            ch = get(y, x + i)
+            if not _isname(ch):
+                break
+            result += ch
+        if result == 'o':
+            # special handling, find the name to the right
+            result = ''
+            for i in itertools.count(2):
+                ch = get(y, x + i)
+                if ch == ' ' or ch in _pipechars:
+                    if result or x + i >= len(lines[y]):
+                        break
+                else:
+                    result += ch
+            return result or 'o'
+        return result
+
+    def parents(y, x):
+        """(int, int) -> [str]. follow the ASCII edges at given position,
+        return a list of parents"""
+        visited = set([(y, x)])
+        visit = []
+        result = []
+
+        def follow(y, x, expected):
+            """conditionally append (y, x) to visit array, if it's a char
+            in excepted. 'o' in expected means an '_isname' test.
+            if '-' (or '+') is not in excepted, and get(y, x) is '-' (or '+'),
+            the next line (y + 1, x) will be checked instead."""
+            ch = get(y, x)
+            if any(ch == c and c not in expected for c in '-+'):
+                y += 1
+                return follow(y + 1, x, expected)
+            if ch in expected or ('o' in expected and _isname(ch)):
+                visit.append((y, x))
+
+        #  -o-  # starting point:
+        #  /|\ # follow '-' (horizontally), and '/|\' (to the bottom)
+        follow(y + 1, x, '|')
+        follow(y + 1, x - 1, '/')
+        follow(y + 1, x + 1, '\\')
+        follow(y, x - 1, '-')
+        follow(y, x + 1, '-')
+
+        while visit:
+            y, x = visit.pop()
+            if (y, x) in visited:
+                continue
+            visited.add((y, x))
+            ch = get(y, x)
+            if _isname(ch):
+                result.append(getname(y, x))
+                continue
+            elif ch == '|':
+                follow(y + 1, x, '/|o')
+                follow(y + 1, x - 1, '/')
+                follow(y + 1, x + 1, '\\')
+            elif ch == '+':
+                follow(y, x - 1, '-')
+                follow(y, x + 1, '-')
+                follow(y + 1, x - 1, '/')
+                follow(y + 1, x + 1, '\\')
+                follow(y + 1, x, '|')
+            elif ch == '\\':
+                follow(y + 1, x + 1, '\\|o')
+            elif ch == '/':
+                follow(y + 1, x - 1, '/|o')
+            elif ch == '-':
+                follow(y, x - 1, '-+o')
+                follow(y, x + 1, '-+o')
+        return result
+
+    for y, line in enumerate(lines):
+        for x, ch in enumerate(line):
+            if ch == '#':  # comment
+                break
+            if _isname(ch):
+                edges[getname(y, x)] += parents(y, x)
+
+    return dict(edges)
+
+class simplefilectx(object):
+    def __init__(self, path, data):
+        self._data = data
+        self._path = path
+
+    def data(self):
+        return self._data
+
+    def path(self):
+        return self._path
+
+    def renamed(self):
+        return None
+
+    def flags(self):
+        return ''
+
+class simplecommitctx(context.committablectx):
+    def __init__(self, repo, name, parentctxs, added=None):
+        opts = {
+            'changes': scmutil.status([], added or [], [], [], [], [], []),
+            'date': '0 0',
+            'extra': {'branch': 'default'},
+        }
+        super(simplecommitctx, self).__init__(self, name, **opts)
+        self._repo = repo
+        self._name = name
+        self._parents = parentctxs
+        self._parents.sort(key=lambda c: c.node())
+        while len(self._parents) < 2:
+            self._parents.append(repo[node.nullid])
+
+    def filectx(self, key):
+        return simplefilectx(key, self._name)
+
+    def commit(self):
+        return self._repo.commitctx(self)
+
+def _walkgraph(edges):
+    """yield node, parents in topologically order"""
+    visible = set(edges.keys())
+    remaining = {}  # {str: [str]}
+    for k, vs in edges.iteritems():
+        for v in vs:
+            if v not in remaining:
+                remaining[v] = []
+        remaining[k] = vs[:]
+    while remaining:
+        leafs = [k for k, v in remaining.items() if not v]
+        if not leafs:
+            raise error.Abort(_('the graph has cycles'))
+        for leaf in sorted(leafs):
+            if leaf in visible:
+                yield leaf, edges[leaf]
+            del remaining[leaf]
+            for k, v in remaining.iteritems():
+                if leaf in v:
+                    v.remove(leaf)
+
+@command('debugdrawdag', [])
+def debugdrawdag(ui, repo, **opts):
+    """read an ASCII graph from stdin and create changesets
+
+    The ASCII graph is like what :hg:`log -G` outputs, with each `o` replaced
+    to the name of the node. The command will create dummy changesets and local
+    tags with those names to make the dummy changesets easier to be referred
+    to.
+
+    If the name of a node is a single character 'o', It will be replaced by the
+    word to the right. This makes it easier to reuse
+    :hg:`log -G -T '{desc}'` outputs.
+
+    For root (no parents) nodes, revset can be used to query existing repo.
+    Note that the revset cannot have confusing characters which can be seen as
+    the part of the graph edges, like `|/+-\`.
+    """
+    text = ui.fin.read()
+
+    # parse the graph and make sure len(parents) <= 2 for each node
+    edges = _parseasciigraph(text)
+    for k, v in edges.iteritems():
+        if len(v) > 2:
+            raise error.Abort(_('%s: too many parents: %s')
+                              % (k, ' '.join(v)))
+
+    committed = {None: node.nullid}  # {name: node}
+
+    # for leaf nodes, try to find existing nodes in repo
+    for name, parents in edges.iteritems():
+        if len(parents) == 0:
+            try:
+                committed[name] = scmutil.revsingle(repo, name)
+            except error.RepoLookupError:
+                pass
+
+    # commit in topological order
+    for name, parents in _walkgraph(edges):
+        if name in committed:
+            continue
+        pctxs = [repo[committed[n]] for n in parents]
+        ctx = simplecommitctx(repo, name, pctxs, [name])
+        n = ctx.commit()
+        committed[name] = n
+        repo.tag(name, n, message=None, user=None, date=None, local=True)
--- a/tests/dumbhttp.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/dumbhttp.py	Wed Jan 18 11:43:36 2017 -0500
@@ -11,7 +11,7 @@
 import sys
 
 from mercurial import (
-    cmdutil,
+    server,
     util,
 )
 
@@ -51,5 +51,5 @@
             'daemon': not options.foreground,
             'daemon_postexec': options.daemon_postexec}
     service = simplehttpservice(options.host, options.port)
-    cmdutil.service(opts, initfn=service.init, runfn=service.run,
-                    runargs=[sys.executable, __file__] + sys.argv[1:])
+    server.runservice(opts, initfn=service.init, runfn=service.run,
+                      runargs=[sys.executable, __file__] + sys.argv[1:])
--- a/tests/dummysmtpd.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/dummysmtpd.py	Wed Jan 18 11:43:36 2017 -0500
@@ -11,7 +11,7 @@
 import sys
 
 from mercurial import (
-    cmdutil,
+    server,
     sslutil,
     ui as uimod,
 )
@@ -37,7 +37,7 @@
         if not pair:
             return
         conn, addr = pair
-        ui = uimod.ui()
+        ui = uimod.ui.load()
         try:
             # wrap_socket() would block, but we don't care
             conn = sslutil.wrapserversocket(conn, ui, certfile=self._certfile)
@@ -75,8 +75,8 @@
             dummysmtpsecureserver(addr, opts.certificate)
         log('listening at %s:%d\n' % addr)
 
-    cmdutil.service(vars(opts), initfn=init, runfn=run,
-                    runargs=[sys.executable, __file__] + sys.argv[1:])
+    server.runservice(vars(opts), initfn=init, runfn=run,
+                      runargs=[sys.executable, __file__] + sys.argv[1:])
 
 if __name__ == '__main__':
     main()
--- a/tests/failfilemerge.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/failfilemerge.py	Wed Jan 18 11:43:36 2017 -0500
@@ -1,4 +1,4 @@
-# extension to emulate interupting filemerge._filemerge
+# extension to emulate interrupting filemerge._filemerge
 
 from __future__ import absolute_import
 
--- a/tests/filterpyflakes.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/filterpyflakes.py	Wed Jan 18 11:43:36 2017 -0500
@@ -7,55 +7,33 @@
 import re
 import sys
 
-def makekey(typeandline):
-    """
-    for sorting lines by: msgtype, path/to/file, lineno, message
-
-    typeandline is a sequence of a message type and the entire message line
-    the message line format is path/to/file:line: message
-
-    >>> makekey((3, 'example.py:36: any message'))
-    (3, 'example.py', 36, ' any message')
-    >>> makekey((7, 'path/to/file.py:68: dummy message'))
-    (7, 'path/to/file.py', 68, ' dummy message')
-    >>> makekey((2, 'fn:88: m')) > makekey((2, 'fn:9: m'))
-    True
-    """
-
-    msgtype, line = typeandline
-    fname, line, message = line.split(":", 2)
-    # line as int for ordering 9 before 88
-    return msgtype, fname, int(line), message
-
-
 lines = []
 for line in sys.stdin:
-    # We whitelist tests (see more messages in pyflakes.messages)
+    # We blacklist tests that are too noisy for us
     pats = [
-            (r"imported but unused", None),
-            (r"local variable '.*' is assigned to but never used", None),
-            (r"unable to detect undefined names", None),
-            (r"undefined name '.*'",
-             r"undefined name '(WindowsError|memoryview)'")
-           ]
+        r"undefined name '(WindowsError|memoryview)'",
+        r"redefinition of unused '[^']+' from line",
+    ]
 
-    for msgtype, (pat, excl) in enumerate(pats):
-        if re.search(pat, line) and (not excl or not re.search(excl, line)):
+    keep = True
+    for pat in pats:
+        if re.search(pat, line):
+            keep = False
             break # pattern matches
-    else:
-        continue # no pattern matched, next line
-    fn = line.split(':', 1)[0]
-    f = open(fn)
-    data = f.read()
-    f.close()
-    if 'no-' 'check-code' in data:
-        continue
-    lines.append((msgtype, line))
+    if keep:
+        fn = line.split(':', 1)[0]
+        f = open(fn)
+        data = f.read()
+        f.close()
+        if 'no-' 'check-code' in data:
+            continue
+        lines.append(line)
 
-for msgtype, line in sorted(lines, key=makekey):
+for line in lines:
     sys.stdout.write(line)
 print()
 
 # self test of "undefined name" detection for other than 'memoryview'
 if False:
+    print(memoryview)
     print(undefinedname)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/flagprocessorext.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,137 @@
+# coding=UTF-8
+
+from __future__ import absolute_import
+
+import base64
+import zlib
+
+from mercurial import (
+    changegroup,
+    extensions,
+    filelog,
+    revlog,
+    util,
+)
+
+# Test only: These flags are defined here only in the context of testing the
+# behavior of the flag processor. The canonical way to add flags is to get in
+# touch with the community and make them known in revlog.
+REVIDX_NOOP = (1 << 3)
+REVIDX_BASE64 = (1 << 2)
+REVIDX_GZIP = (1 << 1)
+REVIDX_FAIL = 1
+
+def validatehash(self, text):
+    return True
+
+def bypass(self, text):
+    return False
+
+def noopdonothing(self, text):
+    return (text, True)
+
+def b64encode(self, text):
+    return (base64.b64encode(text), False)
+
+def b64decode(self, text):
+    return (base64.b64decode(text), True)
+
+def gzipcompress(self, text):
+    return (zlib.compress(text), False)
+
+def gzipdecompress(self, text):
+    return (zlib.decompress(text), True)
+
+def supportedoutgoingversions(orig, repo):
+    versions = orig(repo)
+    versions.discard('01')
+    versions.discard('02')
+    versions.add('03')
+    return versions
+
+def allsupportedversions(orig, ui):
+    versions = orig(ui)
+    versions.add('03')
+    return versions
+
+def noopaddrevision(orig, self, text, transaction, link, p1, p2,
+                    cachedelta=None, node=None,
+                    flags=revlog.REVIDX_DEFAULT_FLAGS):
+    if '[NOOP]' in text:
+        flags |= REVIDX_NOOP
+    return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
+                node=node, flags=flags)
+
+def b64addrevision(orig, self, text, transaction, link, p1, p2,
+                   cachedelta=None, node=None,
+                   flags=revlog.REVIDX_DEFAULT_FLAGS):
+    if '[BASE64]' in text:
+        flags |= REVIDX_BASE64
+    return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
+                node=node, flags=flags)
+
+def gzipaddrevision(orig, self, text, transaction, link, p1, p2,
+                    cachedelta=None, node=None,
+                    flags=revlog.REVIDX_DEFAULT_FLAGS):
+    if '[GZIP]' in text:
+        flags |= REVIDX_GZIP
+    return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
+                node=node, flags=flags)
+
+def failaddrevision(orig, self, text, transaction, link, p1, p2,
+                    cachedelta=None, node=None,
+                    flags=revlog.REVIDX_DEFAULT_FLAGS):
+    # This addrevision wrapper is meant to add a flag we will not have
+    # transforms registered for, ensuring we handle this error case.
+    if '[FAIL]' in text:
+        flags |= REVIDX_FAIL
+    return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
+                node=node, flags=flags)
+
+def extsetup(ui):
+    # Enable changegroup3 for flags to be sent over the wire
+    wrapfunction = extensions.wrapfunction
+    wrapfunction(changegroup,
+                 'supportedoutgoingversions',
+                 supportedoutgoingversions)
+    wrapfunction(changegroup,
+                 'allsupportedversions',
+                 allsupportedversions)
+
+    # Teach revlog about our test flags
+    flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL]
+    revlog.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
+    revlog.REVIDX_FLAGS_ORDER.extend(flags)
+
+    # Add wrappers for addrevision, responsible to set flags depending on the
+    # revision data contents.
+    wrapfunction(filelog.filelog, 'addrevision', noopaddrevision)
+    wrapfunction(filelog.filelog, 'addrevision', b64addrevision)
+    wrapfunction(filelog.filelog, 'addrevision', gzipaddrevision)
+    wrapfunction(filelog.filelog, 'addrevision', failaddrevision)
+
+    # Register flag processors for each extension
+    revlog.addflagprocessor(
+        REVIDX_NOOP,
+        (
+            noopdonothing,
+            noopdonothing,
+            validatehash,
+        )
+    )
+    revlog.addflagprocessor(
+        REVIDX_BASE64,
+        (
+            b64decode,
+            b64encode,
+            bypass,
+        ),
+    )
+    revlog.addflagprocessor(
+        REVIDX_GZIP,
+        (
+            gzipdecompress,
+            gzipcompress,
+            bypass
+        )
+    )
--- a/tests/get-with-headers.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/get-with-headers.py	Wed Jan 18 11:43:36 2017 -0500
@@ -35,6 +35,13 @@
     sys.argv.remove('--json')
     formatjson = True
 
+hgproto = None
+if '--hgproto' in sys.argv:
+    idx = sys.argv.index('--hgproto')
+    hgproto = sys.argv[idx + 1]
+    sys.argv.pop(idx)
+    sys.argv.pop(idx)
+
 tag = None
 def request(host, path, show):
     assert not path.startswith('/'), path
@@ -42,6 +49,8 @@
     headers = {}
     if tag:
         headers['If-None-Match'] = tag
+    if hgproto:
+        headers['X-HgProto-1'] = hgproto
 
     conn = httplib.HTTPConnection(host)
     conn.request("GET", '/' + path, None, headers)
--- a/tests/hghave.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/hghave.py	Wed Jan 18 11:43:36 2017 -0500
@@ -150,7 +150,7 @@
 
 @check("darcs", "darcs client")
 def has_darcs():
-    return matchoutput('darcs --version', br'2\.[2-9]', True)
+    return matchoutput('darcs --version', br'\b2\.([2-9]|\d{2})', True)
 
 @check("mtn", "monotone client (>= 1.0)")
 def has_mtn():
@@ -449,7 +449,7 @@
 @check("defaultcacerts", "can verify SSL certs by system's CA certs store")
 def has_defaultcacerts():
     from mercurial import sslutil, ui as uimod
-    ui = uimod.ui()
+    ui = uimod.ui.load()
     return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts
 
 @check("defaultcacertsloaded", "detected presence of loaded system CA certs")
@@ -462,7 +462,7 @@
     if not has_sslcontext():
         return False
 
-    ui = uimod.ui()
+    ui = uimod.ui.load()
     cafile = sslutil._defaultcacerts(ui)
     ctx = ssl.create_default_context()
     if cafile:
@@ -610,3 +610,12 @@
 @check("unziplinks", "unzip(1) understands and extracts symlinks")
 def unzip_understands_symlinks():
     return matchoutput('unzip --help', br'Info-ZIP')
+
+@check("zstd", "zstd Python module available")
+def has_zstd():
+    try:
+        import mercurial.zstd
+        mercurial.zstd.__version__
+        return True
+    except ImportError:
+        return False
--- a/tests/run-tests.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/run-tests.py	Wed Jan 18 11:43:36 2017 -0500
@@ -58,6 +58,11 @@
 import socket
 import subprocess
 import sys
+try:
+    import sysconfig
+except ImportError:
+    # sysconfig doesn't exist in Python 2.6
+    sysconfig = None
 import tempfile
 import threading
 import time
@@ -818,6 +823,8 @@
             offset = '' if i == 0 else '%s' % i
             env["HGPORT%s" % offset] = '%s' % (self._startport + i)
         env = os.environ.copy()
+        if sysconfig is not None:
+            env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
         env['TESTTMP'] = self._testtmp
         env['HOME'] = self._testtmp
         # This number should match portneeded in _getport
@@ -842,7 +849,7 @@
         env['TERM'] = 'xterm'
 
         for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
-                  'NO_PROXY').split():
+                  'NO_PROXY CHGDEBUG').split():
             if k in env:
                 del env[k]
 
--- a/tests/sitecustomize.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/sitecustomize.py	Wed Jan 18 11:43:36 2017 -0500
@@ -4,11 +4,10 @@
 if os.environ.get('COVERAGE_PROCESS_START'):
     try:
         import coverage
-        import random
+        import uuid
 
-        # uuid is better, but not available in Python 2.4.
         covpath = os.path.join(os.environ['COVERAGE_DIR'],
-                               'cov.%s' % random.randrange(0, 1000000000000))
+                               'cov.%s' % uuid.uuid1())
         cov = coverage.coverage(data_file=covpath, auto_data=True)
         cov._warn_no_data = False
         cov._warn_unimported_source = False
--- a/tests/test-ancestor.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-ancestor.py	Wed Jan 18 11:43:36 2017 -0500
@@ -11,7 +11,7 @@
 from mercurial.node import nullrev
 from mercurial import (
     ancestor,
-    commands,
+    debugcommands,
     hg,
     ui as uimod,
     util,
@@ -218,7 +218,7 @@
     '+3*3/*2*2/*4*4/*4/2*4/2*2',
 ]
 def test_gca():
-    u = uimod.ui()
+    u = uimod.ui.load()
     for i, dag in enumerate(dagtests):
         repo = hg.repository(u, 'gca%d' % i, create=1)
         cl = repo.changelog
@@ -226,7 +226,7 @@
             # C version not available
             return
 
-        commands.debugbuilddag(u, repo, dag)
+        debugcommands.debugbuilddag(u, repo, dag)
         # Compare the results of the Python and C versions. This does not
         # include choosing a winner when more than one gca exists -- we make
         # sure both return exactly the same set of gcas.
--- a/tests/test-annotate.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-annotate.t	Wed Jan 18 11:43:36 2017 -0500
@@ -480,6 +480,142 @@
   [255]
 #endif
 
+  $ hg revert --all --no-backup --quiet
+  $ hg id -n
+  20
+
+Test followlines() revset
+
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=.^)'
+  16: baz:0
+  19: baz:3
+  $ printf "0\n0\n" | cat - baz > baz1
+  $ mv baz1 baz
+  $ hg ci -m 'added two lines with 0'
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  $ echo 6 >> baz
+  $ hg ci -m 'added line 8'
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  $ sed 's/3/3+/' baz > baz.new
+  $ mv baz.new baz
+  $ hg ci -m 'baz:3->3+'
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  23: baz:3->3+
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2)'
+  21: added two lines with 0
+
+file patterns are okay
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
+  21: added two lines with 0
+
+renames are followed
+  $ hg mv baz qux
+  $ sed 's/4/4+/' qux > qux.new
+  $ mv qux.new qux
+  $ hg ci -m 'qux:4->4+'
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  23: baz:3->3+
+  24: qux:4->4+
+  $ hg up 23 --quiet
+
+merge
+  $ echo 7 >> baz
+  $ hg ci -m 'one more line, out of line range'
+  created new head
+  $ sed 's/3+/3-/' baz > baz.new
+  $ mv baz.new baz
+  $ hg ci -m 'baz:3+->3-'
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  23: baz:3->3+
+  26: baz:3+->3-
+  $ hg merge 24
+  merging baz and qux to qux
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m merge
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  23: baz:3->3+
+  24: qux:4->4+
+  26: baz:3+->3-
+  27: merge
+  $ hg up 24 --quiet
+  $ hg merge 26
+  merging qux and baz to qux
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m 'merge from other side'
+  created new head
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
+  16: baz:0
+  19: baz:3
+  20: baz:4
+  23: baz:3->3+
+  24: qux:4->4+
+  26: baz:3+->3-
+  28: merge from other side
+  $ hg up 23 --quiet
+
+check error cases
+  $ hg log -r 'followlines()'
+  hg: parse error: followlines takes at least 1 positional arguments
+  [255]
+  $ hg log -r 'followlines(baz)'
+  hg: parse error: followlines requires a line range
+  [255]
+  $ hg log -r 'followlines(baz, 1)'
+  hg: parse error: followlines expects a line range
+  [255]
+  $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
+  hg: parse error: followlines expects exactly one revision
+  [255]
+  $ hg log -r 'followlines("glob:*", 1:2)'
+  hg: parse error: followlines expects exactly one file
+  [255]
+  $ hg log -r 'followlines(baz, 1:)'
+  hg: parse error: line range bounds must be integers
+  [255]
+  $ hg log -r 'followlines(baz, :1)'
+  hg: parse error: line range bounds must be integers
+  [255]
+  $ hg log -r 'followlines(baz, x:4)'
+  hg: parse error: line range bounds must be integers
+  [255]
+  $ hg log -r 'followlines(baz, 5:4)'
+  hg: parse error: line range must be positive
+  [255]
+  $ hg log -r 'followlines(baz, 0:4)'
+  hg: parse error: fromline must be strictly positive
+  [255]
+  $ hg log -r 'followlines(baz, 2:40)'
+  abort: line range exceeds file size
+  [255]
+
 Test annotate with whitespace options
 
   $ cd ..
--- a/tests/test-archive.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-archive.t	Wed Jan 18 11:43:36 2017 -0500
@@ -61,6 +61,13 @@
   $ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
   $ cat hg.pid >> $DAEMON_PIDS
 
+check archive links' order
+
+  $ get-with-headers.py localhost:$HGPORT "?revcount=1" | grep '/archive/tip.'
+  <a href="/archive/tip.zip">zip</a>
+  <a href="/archive/tip.tar.gz">gz</a>
+  <a href="/archive/tip.tar.bz2">bz2</a>
+
 invalid arch type should give 404
 
   $ get-with-headers.py localhost:$HGPORT "archive/tip.invalid" | head -n 1
--- a/tests/test-atomictempfile.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-atomictempfile.py	Wed Jan 18 11:43:36 2017 -0500
@@ -68,7 +68,7 @@
             repetition = 3
 
             # repeat atomic write with checkambig=True, to examine
-            # whether st_mtime is advanced multiple times as expecetd
+            # whether st_mtime is advanced multiple times as expected
             for j in xrange(repetition):
                 atomicwrite(True)
             newstat = os.stat(self._filename)
@@ -77,7 +77,7 @@
                 continue
 
             # st_mtime should be advanced "repetition" times, because
-            # all atomicwrite() occured at same time (in sec)
+            # all atomicwrite() occurred at same time (in sec)
             self.assertTrue(newstat.st_mtime ==
                             ((oldstat.st_mtime + repetition) & 0x7fffffff))
             # no more examination is needed, if assumption above is true
--- a/tests/test-backout.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-backout.t	Wed Jan 18 11:43:36 2017 -0500
@@ -272,7 +272,7 @@
 
 (1) update to REV1 (REV2 => REV1)
 (2) revert by REV1^1
-(3) commit backnig out revision (REV3)
+(3) commit backing out revision (REV3)
 (4) update to REV2 (REV3 => REV2)
 (5) merge with REV3 (REV2 => REV2, REV3)
 
@@ -287,7 +287,7 @@
   > preupdate.visibility = sh $TESTTMP/checkvisibility.sh preupdate
   > EOF
 
-("-m" is needed to avoid writing dirstte changes out at other than
+("-m" is needed to avoid writing dirstate changes out at other than
 invocation of the hook to be examined)
 
   $ hg backout --merge -d '3 0' 1 --tool=true -m 'fixed comment'
--- a/tests/test-basic.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-basic.t	Wed Jan 18 11:43:36 2017 -0500
@@ -6,7 +6,6 @@
   defaults.shelve=--date "0 0"
   defaults.tag=-d "0 0"
   devel.all-warnings=true
-  extensions.chgserver= (?)
   largefiles.usercache=$TESTTMP/.cache/largefiles (glob)
   ui.slash=True
   ui.interactive=False
@@ -35,7 +34,7 @@
 
   $ cat <<EOF > update_to_rev0.py
   > from mercurial import ui, hg, commands
-  > myui = ui.ui()
+  > myui = ui.ui.load()
   > repo = hg.repository(myui, path='.')
   > commands.update(myui, repo, rev=0)
   > EOF
--- a/tests/test-bdiff.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-bdiff.py	Wed Jan 18 11:43:36 2017 -0500
@@ -1,73 +1,151 @@
 from __future__ import absolute_import, print_function
+import collections
 import struct
+import unittest
+
+import silenttestrunner
+
 from mercurial import (
     bdiff,
     mpatch,
 )
 
-def test1(a, b):
-    d = bdiff.bdiff(a, b)
-    c = a
-    if d:
-        c = mpatch.patches(a, [d])
-    if c != b:
-        print("***", repr(a), repr(b))
-        print("bad:")
-        print(repr(c)[:200])
-        print(repr(d))
+class diffreplace(
+    collections.namedtuple('diffreplace', 'start end from_ to')):
+    def __repr__(self):
+        return 'diffreplace(%r, %r, %r, %r)' % self
+
+class BdiffTests(unittest.TestCase):
+
+    def assert_bdiff_applies(self, a, b):
+        d = bdiff.bdiff(a, b)
+        c = a
+        if d:
+            c = mpatch.patches(a, [d])
+        self.assertEqual(
+            c, b, ("bad diff+patch result from\n  %r to\n  "
+                   "%r: \nbdiff: %r\npatched: %r" % (a, b, d, c[:200])))
+
+    def assert_bdiff(self, a, b):
+        self.assert_bdiff_applies(a, b)
+        self.assert_bdiff_applies(b, a)
 
-def test(a, b):
-    print("***", repr(a), repr(b))
-    test1(a, b)
-    test1(b, a)
+    def test_bdiff_basic(self):
+        cases = [
+            ("a\nc\n\n\n\n", "a\nb\n\n\n"),
+            ("a\nb\nc\n", "a\nc\n"),
+            ("", ""),
+            ("a\nb\nc", "a\nb\nc"),
+            ("a\nb\nc\nd\n", "a\nd\n"),
+            ("a\nb\nc\nd\n", "a\nc\ne\n"),
+            ("a\nb\nc\n", "a\nc\n"),
+            ("a\n", "c\na\nb\n"),
+            ("a\n", ""),
+            ("a\n", "b\nc\n"),
+            ("a\n", "c\na\n"),
+            ("", "adjfkjdjksdhfksj"),
+            ("", "ab"),
+            ("", "abc"),
+            ("a", "a"),
+            ("ab", "ab"),
+            ("abc", "abc"),
+            ("a\n", "a\n"),
+            ("a\nb", "a\nb"),
+        ]
+        for a, b in cases:
+            self.assert_bdiff(a, b)
+
+    def showdiff(self, a, b):
+        bin = bdiff.bdiff(a, b)
+        pos = 0
+        q = 0
+        actions = []
+        while pos < len(bin):
+            p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
+            pos += 12
+            if p1:
+                actions.append(a[q:p1])
+            actions.append(diffreplace(p1, p2, a[p1:p2], bin[pos:pos + l]))
+            pos += l
+            q = p2
+        if q < len(a):
+            actions.append(a[q:])
+        return actions
 
-test("a\nc\n\n\n\n", "a\nb\n\n\n")
-test("a\nb\nc\n", "a\nc\n")
-test("", "")
-test("a\nb\nc", "a\nb\nc")
-test("a\nb\nc\nd\n", "a\nd\n")
-test("a\nb\nc\nd\n", "a\nc\ne\n")
-test("a\nb\nc\n", "a\nc\n")
-test("a\n", "c\na\nb\n")
-test("a\n", "")
-test("a\n", "b\nc\n")
-test("a\n", "c\na\n")
-test("", "adjfkjdjksdhfksj")
-test("", "ab")
-test("", "abc")
-test("a", "a")
-test("ab", "ab")
-test("abc", "abc")
-test("a\n", "a\n")
-test("a\nb", "a\nb")
+    def test_issue1295(self):
+        cases = [
+            ("x\n\nx\n\nx\n\nx\n\nz\n", "x\n\nx\n\ny\n\nx\n\nx\n\nz\n",
+             ['x\n\nx\n\n', diffreplace(6, 6, '', 'y\n\n'), 'x\n\nx\n\nz\n']),
+            ("x\n\nx\n\nx\n\nx\n\nz\n", "x\n\nx\n\ny\n\nx\n\ny\n\nx\n\nz\n",
+             ['x\n\nx\n\n',
+              diffreplace(6, 6, '', 'y\n\n'),
+              'x\n\n',
+              diffreplace(9, 9, '', 'y\n\n'),
+              'x\n\nz\n']),
+        ]
+        for old, new, want in cases:
+            self.assertEqual(self.showdiff(old, new), want)
+
+    def test_issue1295_varies_on_pure(self):
+            # we should pick up abbbc. rather than bc.de as the longest match
+        got = self.showdiff("a\nb\nb\nb\nc\n.\nd\ne\n.\nf\n",
+                            "a\nb\nb\na\nb\nb\nb\nc\n.\nb\nc\n.\nd\ne\nf\n")
+        want_c = ['a\nb\nb\n',
+                  diffreplace(6, 6, '', 'a\nb\nb\nb\nc\n.\n'),
+                  'b\nc\n.\nd\ne\n',
+                  diffreplace(16, 18, '.\n', ''),
+                  'f\n']
+        want_pure = [diffreplace(0, 0, '', 'a\nb\nb\n'),
+                     'a\nb\nb\nb\nc\n.\n',
+                     diffreplace(12, 12, '', 'b\nc\n.\n'),
+                     'd\ne\n',
+                     diffreplace(16, 18, '.\n', ''), 'f\n']
+        self.assert_(got in (want_c, want_pure),
+                     'got: %r, wanted either %r or %r' % (
+                         got, want_c, want_pure))
 
-#issue1295
-def showdiff(a, b):
-    bin = bdiff.bdiff(a, b)
-    pos = 0
-    while pos < len(bin):
-        p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
-        pos += 12
-        print(p1, p2, repr(bin[pos:pos + l]))
-        pos += l
-showdiff("x\n\nx\n\nx\n\nx\n\nz\n", "x\n\nx\n\ny\n\nx\n\nx\n\nz\n")
-showdiff("x\n\nx\n\nx\n\nx\n\nz\n", "x\n\nx\n\ny\n\nx\n\ny\n\nx\n\nz\n")
-# we should pick up abbbc. rather than bc.de as the longest match
-showdiff("a\nb\nb\nb\nc\n.\nd\ne\n.\nf\n",
-         "a\nb\nb\na\nb\nb\nb\nc\n.\nb\nc\n.\nd\ne\nf\n")
+    def test_fixws(self):
+        cases = [
+            (" \ta\r b\t\n", "ab\n", 1),
+            (" \ta\r b\t\n", " a b\n", 0),
+            ("", "", 1),
+            ("", "", 0),
+        ]
+        for a, b, allws in cases:
+            c = bdiff.fixws(a, allws)
+            self.assertEqual(
+                c, b, 'fixws(%r) want %r got %r (allws=%r)' % (a, b, c, allws))
 
-print("done")
+    def test_nice_diff_for_trivial_change(self):
+        self.assertEqual(self.showdiff(
+            ''.join('<%s\n-\n' % i for i in range(5)),
+            ''.join('>%s\n-\n' % i for i in range(5))),
+                         [diffreplace(0, 3, '<0\n', '>0\n'),
+                          '-\n',
+                          diffreplace(5, 8, '<1\n', '>1\n'),
+                          '-\n',
+                          diffreplace(10, 13, '<2\n', '>2\n'),
+                          '-\n',
+                          diffreplace(15, 18, '<3\n', '>3\n'),
+                          '-\n',
+                          diffreplace(20, 23, '<4\n', '>4\n'),
+                          '-\n'])
 
-def testfixws(a, b, allws):
-    c = bdiff.fixws(a, allws)
-    if c != b:
-        print("*** fixws", repr(a), repr(b), allws)
-        print("got:")
-        print(repr(c))
+    def test_prefer_appending(self):
+        # 1 line to 3 lines
+        self.assertEqual(self.showdiff('a\n', 'a\n' * 3),
+                         ['a\n', diffreplace(2, 2, '', 'a\na\n')])
+        # 1 line to 5 lines
+        self.assertEqual(self.showdiff('a\n', 'a\n' * 5),
+                         ['a\n', diffreplace(2, 2, '', 'a\na\na\na\n')])
 
-testfixws(" \ta\r b\t\n", "ab\n", 1)
-testfixws(" \ta\r b\t\n", " a b\n", 0)
-testfixws("", "", 1)
-testfixws("", "", 0)
+    def test_prefer_removing_trailing(self):
+        # 3 lines to 1 line
+        self.assertEqual(self.showdiff('a\n' * 3, 'a\n'),
+                         ['a\n', diffreplace(2, 6, 'a\na\n', '')])
+        # 5 lines to 1 line
+        self.assertEqual(self.showdiff('a\n' * 5, 'a\n'),
+                         ['a\n', diffreplace(2, 10, 'a\na\na\na\n', '')])
 
-print("done")
+if __name__ == '__main__':
+    silenttestrunner.main(__name__)
--- a/tests/test-bdiff.py.out	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-*** 'a\nc\n\n\n\n' 'a\nb\n\n\n'
-*** 'a\nb\nc\n' 'a\nc\n'
-*** '' ''
-*** 'a\nb\nc' 'a\nb\nc'
-*** 'a\nb\nc\nd\n' 'a\nd\n'
-*** 'a\nb\nc\nd\n' 'a\nc\ne\n'
-*** 'a\nb\nc\n' 'a\nc\n'
-*** 'a\n' 'c\na\nb\n'
-*** 'a\n' ''
-*** 'a\n' 'b\nc\n'
-*** 'a\n' 'c\na\n'
-*** '' 'adjfkjdjksdhfksj'
-*** '' 'ab'
-*** '' 'abc'
-*** 'a' 'a'
-*** 'ab' 'ab'
-*** 'abc' 'abc'
-*** 'a\n' 'a\n'
-*** 'a\nb' 'a\nb'
-6 6 'y\n\n'
-6 6 'y\n\n'
-9 9 'y\n\n'
-0 0 'a\nb\nb\n'
-12 12 'b\nc\n.\n'
-16 18 ''
-done
-done
--- a/tests/test-bisect.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-bisect.t	Wed Jan 18 11:43:36 2017 -0500
@@ -456,7 +456,7 @@
   > #!/usr/bin/env python
   > import sys
   > from mercurial import ui, hg
-  > repo = hg.repository(ui.ui(), '.')
+  > repo = hg.repository(ui.ui.load(), '.')
   > if repo['.'].rev() < 6:
   >     sys.exit(1)
   > EOF
--- a/tests/test-bookmarks-pushpull.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-bookmarks-pushpull.t	Wed Jan 18 11:43:36 2017 -0500
@@ -550,7 +550,7 @@
 
   $ cd ..
 
-Test to show result of bookmarks comparision
+Test to show result of bookmarks comparison
 
   $ mkdir bmcomparison
   $ cd bmcomparison
--- a/tests/test-bookmarks.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-bookmarks.t	Wed Jan 18 11:43:36 2017 -0500
@@ -572,8 +572,8 @@
 
   $ hg bookmark -r3 Y
   moving bookmark 'Y' forward from db815d6d32e6
-  $ cp -r  ../cloned-bookmarks-update ../cloned-bookmarks-manual-update
-  $ cp -r  ../cloned-bookmarks-update ../cloned-bookmarks-manual-update-with-divergence
+  $ cp -R ../cloned-bookmarks-update ../cloned-bookmarks-manual-update
+  $ cp -R ../cloned-bookmarks-update ../cloned-bookmarks-manual-update-with-divergence
 
 (manual version)
 
@@ -734,7 +734,7 @@
      summary:     0
   
 
-no-op update doesn't deactive bookmarks
+no-op update doesn't deactivate bookmarks
 
   $ hg bookmarks
    * four                      3:9ba5f110a0b3
--- a/tests/test-bundle-type.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-bundle-type.t	Wed Jan 18 11:43:36 2017 -0500
@@ -35,17 +35,21 @@
 
 test bundle types
 
-  $ for t in "None" "bzip2" "gzip" "none-v2" "v2" "v1" "gzip-v1"; do
-  >   echo % test bundle type $t
-  >   hg init t$t
+  $ testbundle() {
+  >   echo % test bundle type $1
+  >   hg init t$1
   >   cd t1
-  >   hg bundle -t $t ../b$t ../t$t
-  >   f -q -B6 -D ../b$t; echo
-  >   cd ../t$t
-  >   hg debugbundle ../b$t
-  >   hg debugbundle --spec ../b$t
+  >   hg bundle -t $1 ../b$1 ../t$1
+  >   f -q -B6 -D ../b$1; echo
+  >   cd ../t$1
+  >   hg debugbundle ../b$1
+  >   hg debugbundle --spec ../b$1
   >   echo
   >   cd ..
+  > }
+
+  $ for t in "None" "bzip2" "gzip" "none-v2" "v2" "v1" "gzip-v1"; do
+  >   testbundle $t
   > done
   % test bundle type None
   searching for changes
@@ -107,6 +111,69 @@
   gzip-v1
   
 
+Compression level can be adjusted for bundle2 bundles
+
+  $ hg init test-complevel
+  $ cd test-complevel
+
+  $ cat > file0 << EOF
+  > this is a file
+  > with some text
+  > and some more text
+  > and other content
+  > EOF
+  $ cat > file1 << EOF
+  > this is another file
+  > with some other content
+  > and repeated, repeated, repeated, repeated content
+  > EOF
+  $ hg -q commit -A -m initial
+
+  $ hg bundle -a -t gzip-v2 gzip-v2.hg
+  1 changesets found
+  $ f --size gzip-v2.hg
+  gzip-v2.hg: size=427
+
+  $ hg --config experimental.bundlecomplevel=1 bundle -a -t gzip-v2 gzip-v2-level1.hg
+  1 changesets found
+  $ f --size gzip-v2-level1.hg
+  gzip-v2-level1.hg: size=435
+
+  $ cd ..
+
+#if zstd
+
+  $ for t in "zstd" "zstd-v2"; do
+  >   testbundle $t
+  > done
+  % test bundle type zstd
+  searching for changes
+  1 changesets found
+  HG20\x00\x00 (esc)
+  Stream params: sortdict([('Compression', 'ZS')])
+  changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
+      c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+  zstd-v2
+  
+  % test bundle type zstd-v2
+  searching for changes
+  1 changesets found
+  HG20\x00\x00 (esc)
+  Stream params: sortdict([('Compression', 'ZS')])
+  changegroup -- "sortdict([('version', '02'), ('nbchanges', '1')])"
+      c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
+  zstd-v2
+  
+#else
+
+zstd is a valid engine but isn't available
+
+  $ hg -R t1 bundle -a -t zstd irrelevant.hg
+  abort: compression engine zstd could not be loaded
+  [255]
+
+#endif
+
 test garbage file
 
   $ echo garbage > bgarbage
--- a/tests/test-bundle.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-bundle.t	Wed Jan 18 11:43:36 2017 -0500
@@ -268,13 +268,13 @@
 packed1 is produced properly
 
   $ hg -R test debugcreatestreamclonebundle packed.hg
-  writing 2663 bytes for 6 files
+  writing 2664 bytes for 6 files
   bundle requirements: generaldelta, revlogv1
 
   $ f -B 64 --size --sha1 --hexdump packed.hg
-  packed.hg: size=2826, sha1=e139f97692a142b19cdcff64a69697d5307ce6d4
+  packed.hg: size=2827, sha1=9d14cb90c66a21462d915ab33656f38b9deed686
   0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
-  0010: 00 00 00 00 0a 67 00 16 67 65 6e 65 72 61 6c 64 |.....g..generald|
+  0010: 00 00 00 00 0a 68 00 16 67 65 6e 65 72 61 6c 64 |.....h..generald|
   0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
   0030: 74 61 2f 61 64 69 66 66 65 72 65 6e 74 66 69 6c |ta/adifferentfil|
 
--- a/tests/test-cat.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-cat.t	Wed Jan 18 11:43:36 2017 -0500
@@ -68,3 +68,14 @@
   $ echo b-wdir > b
   $ hg cat -r 'wdir()' b
   b-wdir
+
+Environment variables are not visible by default
+
+  $ PATTERN='t4' hg log -r '.' -T "{ifcontains('PATTERN', envvars, 'yes', 'no')}\n"
+  no
+
+Environment variable visibility can be explicit
+
+  $ PATTERN='t4' hg log -r '.' -T "{envvars % '{key} -> {value}\n'}" \
+  >                 --config "experimental.exportableenviron=PATTERN"
+  PATTERN -> t4
--- a/tests/test-check-code.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-check-code.t	Wed Jan 18 11:43:36 2017 -0500
@@ -7,13 +7,44 @@
 New errors are not allowed. Warnings are strongly discouraged.
 (The writing "no-che?k-code" is for not skipping this file when checking.)
 
-  $ hg locate | sed 's-\\-/-g' |
-  >   xargs "$check_code" --warnings --per-file=0 || false
-  Skipping hgext/fsmonitor/pywatchman/__init__.py it has no-che?k-code (glob)
-  Skipping hgext/fsmonitor/pywatchman/bser.c it has no-che?k-code (glob)
-  Skipping hgext/fsmonitor/pywatchman/capabilities.py it has no-che?k-code (glob)
-  Skipping hgext/fsmonitor/pywatchman/msc_stdint.h it has no-che?k-code (glob)
-  Skipping hgext/fsmonitor/pywatchman/pybser.py it has no-che?k-code (glob)
+  $ hg locate -X contrib/python-zstandard -X hgext/fsmonitor/pywatchman |
+  > sed 's-\\-/-g' | xargs "$check_code" --warnings --per-file=0 || false
   Skipping i18n/polib.py it has no-che?k-code (glob)
+  mercurial/demandimport.py:312:
+   >     if os.environ.get('HGDEMANDIMPORT') != 'disable':
+   use encoding.environ instead (py3)
+  mercurial/encoding.py:54:
+   >     environ = os.environ
+   use encoding.environ instead (py3)
+  mercurial/encoding.py:56:
+   >     environ = os.environb
+   use encoding.environ instead (py3)
+  mercurial/encoding.py:61:
+   >                    for k, v in os.environ.items())
+   use encoding.environ instead (py3)
+  mercurial/encoding.py:203:
+   >                    for k, v in os.environ.items())
+   use encoding.environ instead (py3)
   Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob)
   Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob)
+  mercurial/policy.py:45:
+   > policy = os.environ.get('HGMODULEPOLICY', policy)
+   use encoding.environ instead (py3)
+  Skipping mercurial/statprof.py it has no-che?k-code (glob)
+  [1]
+
+@commands in debugcommands.py should be in alphabetical order.
+
+  >>> import re
+  >>> commands = []
+  >>> with open('mercurial/debugcommands.py', 'rb') as fh:
+  ...     for line in fh:
+  ...         m = re.match("^@command\('([a-z]+)", line)
+  ...         if m:
+  ...             commands.append(m.group(1))
+  >>> scommands = list(sorted(commands))
+  >>> for i, command in enumerate(scommands):
+  ...     if command != commands[i]:
+  ...         print('commands in debugcommands.py not sorted; first differing '
+  ...               'command is %s; expected %s' % (commands[i], command))
+  ...         break
--- a/tests/test-check-commit.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-check-commit.t	Wed Jan 18 11:43:36 2017 -0500
@@ -8,7 +8,7 @@
 
   $ cd $TESTDIR/..
 
-  $ for node in `hg log --rev 'not public() and ::.' --template '{node|short}\n'`; do
+  $ for node in `hg log --rev 'not public() and ::. and not desc("# no-check-commit")' --template '{node|short}\n'`; do
   >    hg export $node | contrib/check-commit > ${TESTTMP}/check-commit.out
   >    if [ $? -ne 0 ]; then
   >        echo "Revision $node does not comply with rules"
--- a/tests/test-check-module-imports.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-check-module-imports.t	Wed Jan 18 11:43:36 2017 -0500
@@ -159,6 +159,7 @@
   $ hg locate 'set:**.py or grep(r"^#!.*?python")' \
   > 'tests/**.t' \
   > -X contrib/debugshell.py \
+  > -X contrib/python-zstandard/ \
   > -X contrib/win32/hgwebdir_wsgi.py \
   > -X doc/gendoc.py \
   > -X doc/hgmanpage.py \
--- a/tests/test-check-py3-commands.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-check-py3-commands.t	Wed Jan 18 11:43:36 2017 -0500
@@ -9,6 +9,6 @@
   >   $PYTHON3 `which hg` $cmd 2>&1 2>&1 | tail -1
   > done
   version
-  TypeError: str expected, not bytes
+  warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
   debuginstall
-  TypeError: str expected, not bytes
+  TypeError: Can't convert 'bytes' object to str implicitly
--- a/tests/test-check-py3-compat.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-check-py3-compat.t	Wed Jan 18 11:43:36 2017 -0500
@@ -4,24 +4,32 @@
   $ cd "$TESTDIR"/..
 
   $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
-  hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
-  hgext/fsmonitor/pywatchman/__init__.py requires print_function
-  hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
-  hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
+  contrib/python-zstandard/setup.py not using absolute_import
+  contrib/python-zstandard/setup_zstd.py not using absolute_import
+  contrib/python-zstandard/tests/common.py not using absolute_import
+  contrib/python-zstandard/tests/test_cffi.py not using absolute_import
+  contrib/python-zstandard/tests/test_compressor.py not using absolute_import
+  contrib/python-zstandard/tests/test_data_structures.py not using absolute_import
+  contrib/python-zstandard/tests/test_decompressor.py not using absolute_import
+  contrib/python-zstandard/tests/test_estimate_sizes.py not using absolute_import
+  contrib/python-zstandard/tests/test_module_attributes.py not using absolute_import
+  contrib/python-zstandard/tests/test_roundtrip.py not using absolute_import
+  contrib/python-zstandard/tests/test_train_dictionary.py not using absolute_import
   i18n/check-translation.py not using absolute_import
   setup.py not using absolute_import
   tests/test-demandimport.py not using absolute_import
 
 #if py3exe
-  $ hg files 'set:(**.py) - grep(pygments)' | sed 's|\\|/|g' \
-  > | xargs $PYTHON3 contrib/check-py3-compat.py \
+  $ hg files 'set:(**.py) - grep(pygments)' -X hgext/fsmonitor/pywatchman \
+  > | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py \
   > | sed 's/[0-9][0-9]*)$/*)/'
   hgext/convert/transport.py: error importing: <ImportError> No module named 'svn.client' (error at transport.py:*)
-  hgext/fsmonitor/pywatchman/capabilities.py: error importing: <ImportError> No module named 'pybser' (error at __init__.py:*)
-  hgext/fsmonitor/pywatchman/pybser.py: error importing: <ImportError> No module named 'pybser' (error at __init__.py:*)
-  hgext/fsmonitor/watchmanclient.py: error importing: <ImportError> No module named 'pybser' (error at __init__.py:*)
-  hgext/mq.py: error importing: <TypeError> __import__() argument 1 must be str, not bytes (error at extensions.py:*)
-  mercurial/scmwindows.py: error importing: <ImportError> No module named 'winreg' (error at scmwindows.py:*)
+  hgext/fsmonitor/state.py: error importing: <SyntaxError> from __future__ imports must occur at the beginning of the file (__init__.py, line 30) (error at watchmanclient.py:*)
+  hgext/fsmonitor/watchmanclient.py: error importing: <SyntaxError> from __future__ imports must occur at the beginning of the file (__init__.py, line 30) (error at watchmanclient.py:*)
+  mercurial/cffi/bdiff.py: error importing: <ImportError> No module named 'mercurial.cffi' (error at check-py3-compat.py:*)
+  mercurial/cffi/mpatch.py: error importing: <ImportError> No module named 'mercurial.cffi' (error at check-py3-compat.py:*)
+  mercurial/cffi/osutil.py: error importing: <ImportError> No module named 'mercurial.cffi' (error at check-py3-compat.py:*)
+  mercurial/scmwindows.py: error importing: <ImportError> No module named 'msvcrt' (error at win32.py:*)
   mercurial/win32.py: error importing: <ImportError> No module named 'msvcrt' (error at win32.py:*)
   mercurial/windows.py: error importing: <ImportError> No module named 'msvcrt' (error at windows.py:*)
 
--- a/tests/test-check-pyflakes.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-check-pyflakes.t	Wed Jan 18 11:43:36 2017 -0500
@@ -6,10 +6,10 @@
 run pyflakes on all tracked files ending in .py or without a file ending
 (skipping binary file random-seed)
 
-  $ hg locate 'set:**.py or grep("^#!.*python")' \
+  $ hg locate 'set:**.py or grep("^#!.*python")' -X hgext/fsmonitor/pywatchman \
   > -X mercurial/pycompat.py \
   > 2>/dev/null \
   > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
-  tests/filterpyflakes.py:61: undefined name 'undefinedname'
+  contrib/python-zstandard/tests/test_data_structures.py:107: local variable 'size' is assigned to but never used
+  tests/filterpyflakes.py:39: undefined name 'undefinedname'
   
-
--- a/tests/test-chg.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-chg.t	Wed Jan 18 11:43:36 2017 -0500
@@ -16,6 +16,20 @@
   [255]
 
   $ cp $HGRCPATH.orig $HGRCPATH
+
+long socket path
+
+  $ sockpath=$TESTTMP/this/path/should/be/longer/than/one-hundred-and-seven/characters/where/107/is/the/typical/size/limit/of/unix-domain-socket
+  $ mkdir -p $sockpath
+  $ bakchgsockname=$CHGSOCKNAME
+  $ CHGSOCKNAME=$sockpath/server
+  $ export CHGSOCKNAME
+  $ chg root
+  $TESTTMP/foo
+  $ rm -rf $sockpath
+  $ CHGSOCKNAME=$bakchgsockname
+  $ export CHGSOCKNAME
+
   $ cd ..
 
 server lifecycle
@@ -46,7 +60,7 @@
 warm up server:
 
   $ CHGDEBUG= chg log 2>&1 | egrep 'instruction|start'
-  chg: debug: start cmdserver at $TESTTMP/extreload/chgsock/server
+  chg: debug: start cmdserver at $TESTTMP/extreload/chgsock/server.* (glob)
 
 new server should be started if extension modified:
 
@@ -55,7 +69,7 @@
   $ CHGDEBUG= chg log 2>&1 | egrep 'instruction|start'
   chg: debug: instruction: unlink $TESTTMP/extreload/chgsock/server-* (glob)
   chg: debug: instruction: reconnect
-  chg: debug: start cmdserver at $TESTTMP/extreload/chgsock/server
+  chg: debug: start cmdserver at $TESTTMP/extreload/chgsock/server.* (glob)
 
 old server will shut down, while new server should still be reachable:
 
@@ -77,7 +91,7 @@
 (this test makes sure that old server shut down automatically)
 
   $ CHGDEBUG= chg log 2>&1 | egrep 'instruction|start'
-  chg: debug: start cmdserver at $TESTTMP/extreload/chgsock/server
+  chg: debug: start cmdserver at $TESTTMP/extreload/chgsock/server.* (glob)
 
 shut down servers and restore environment:
 
--- a/tests/test-clone.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-clone.t	Wed Jan 18 11:43:36 2017 -0500
@@ -31,6 +31,10 @@
   default                       10:a7949464abda
   $ ls .hg/cache
   branch2-served
+  checkisexec
+  checklink
+  checklink-target
+  checknoexec
   rbc-names-v1
   rbc-revs-v1
 
@@ -45,6 +49,9 @@
 
   $ ls .hg/cache
   branch2-served
+  checkisexec
+  checklink
+  checklink-target
 
   $ cat a
   a
@@ -508,7 +515,7 @@
 
   $ cat <<EOF > simpleclone.py
   > from mercurial import ui, hg
-  > myui = ui.ui()
+  > myui = ui.ui.load()
   > repo = hg.repository(myui, 'a')
   > hg.clone(myui, {}, repo, dest="ua")
   > EOF
@@ -521,7 +528,7 @@
 
   $ cat <<EOF > branchclone.py
   > from mercurial import ui, hg, extensions
-  > myui = ui.ui()
+  > myui = ui.ui.load()
   > extensions.loadall(myui)
   > repo = hg.repository(myui, 'a')
   > hg.clone(myui, {}, repo, dest="ua", branch=["stable",])
--- a/tests/test-clonebundles.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-clonebundles.t	Wed Jan 18 11:43:36 2017 -0500
@@ -31,8 +31,8 @@
 
   $ cat server/access.log
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
-  * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phases%2Cbookmarks (glob)
+  * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
 Empty manifest file results in retrieval
 (the extension only checks if the manifest file exists)
--- a/tests/test-command-template.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-command-template.t	Wed Jan 18 11:43:36 2017 -0500
@@ -3707,7 +3707,7 @@
   a
   $ cd ..
   $ hg log -R r -r0 -T '{files % "{file|relpath}\n"}'
-  r/a (glob)
+  r/a
   $ cd r
 
 Test active bookmark templating
--- a/tests/test-commandserver.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-commandserver.t	Wed Jan 18 11:43:36 2017 -0500
@@ -135,6 +135,19 @@
   summary:     1
   
 
+check that "histedit --commands=-" can read rules from the input channel:
+
+  >>> import cStringIO
+  >>> from hgclient import readchannel, runcommand, check
+  >>> @check
+  ... def serverinput(server):
+  ...     readchannel(server)
+  ...     rules = 'pick eff892de26ec\n'
+  ...     runcommand(server, ['histedit', '0', '--commands=-',
+  ...                         '--config', 'extensions.histedit='],
+  ...                input=cStringIO.StringIO(rules))
+  *** runcommand histedit 0 --commands=- --config extensions.histedit=
+
 check that --cwd doesn't persist between requests:
 
   $ mkdir foo
@@ -223,8 +236,6 @@
   ...                         'id'],
   ...                input=stringio('some input'))
   *** runcommand --config hooks.pre-identify=python:hook.hook id
-  hook talking
-  now try to read something: 'some input'
   eff892de26ec tip
 
   $ rm hook.py*
@@ -596,6 +607,12 @@
   ...     runcommand(server, ['debuggetpass', '--config',
   ...                         'ui.interactive=True'],
   ...                input=stringio('1234\n'))
+  ...     runcommand(server, ['debuggetpass', '--config',
+  ...                         'ui.interactive=True'],
+  ...                input=stringio('\n'))
+  ...     runcommand(server, ['debuggetpass', '--config',
+  ...                         'ui.interactive=True'],
+  ...                input=stringio(''))
   ...     runcommand(server, ['debugprompt', '--config',
   ...                         'ui.interactive=True'],
   ...                input=stringio('5678\n'))
@@ -603,6 +620,11 @@
   ...     runcommand(server, ['debugwritestdout'])
   *** runcommand debuggetpass --config ui.interactive=True
   password: 1234
+  *** runcommand debuggetpass --config ui.interactive=True
+  password: 
+  *** runcommand debuggetpass --config ui.interactive=True
+  password: abort: response expected
+   [255]
   *** runcommand debugprompt --config ui.interactive=True
   prompt: 5678
   *** runcommand debugreadstdin
@@ -804,7 +826,7 @@
   $ echo foo > foo
   $ hg add foo
 
-(failuer before finalization)
+(failure before finalization)
 
   >>> from hgclient import readchannel, runcommand, check
   >>> @check
@@ -823,7 +845,7 @@
   *** runcommand log
   *** runcommand verify -q
 
-(failuer after finalization)
+(failure after finalization)
 
   >>> from hgclient import readchannel, runcommand, check
   >>> @check
--- a/tests/test-commit-amend.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-commit-amend.t	Wed Jan 18 11:43:36 2017 -0500
@@ -614,6 +614,7 @@
   parent:      11:3334b7925910
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
+  trouble:     unstable
   summary:     babar
   
 
@@ -638,7 +639,7 @@
   (no more unresolved files)
   $ hg ci -m 'merge bar'
   $ hg log --config diff.git=1 -pr .
-  changeset:   23:69c24fe01e35
+  changeset:   23:163cfd7219f7
   tag:         tip
   parent:      22:30d96aeaf27b
   parent:      21:1aa437659d19
@@ -657,7 +658,7 @@
    dd
   +=======
   +cc
-  +>>>>>>> merge rev:    1aa437659d19  bar - test: aazzcc
+  +>>>>>>> merge rev:    1aa437659d19 bar - test: aazzcc
   diff --git a/z b/zz
   rename from z
   rename to zz
@@ -671,7 +672,7 @@
   $ HGEDITOR="sh .hg/checkeditform.sh" hg ci --amend -m 'merge bar (amend message)' --edit
   HGEDITFORM=commit.amend.merge
   $ hg log --config diff.git=1 -pr .
-  changeset:   24:cfa2fbef3169
+  changeset:   24:bca52d4ed186
   tag:         tip
   parent:      22:30d96aeaf27b
   parent:      21:1aa437659d19
@@ -690,7 +691,7 @@
    dd
   +=======
   +cc
-  +>>>>>>> merge rev:    1aa437659d19  bar - test: aazzcc
+  +>>>>>>> merge rev:    1aa437659d19 bar - test: aazzcc
   diff --git a/z b/zz
   rename from z
   rename to zz
@@ -704,7 +705,7 @@
   $ hg mv zz z
   $ hg ci --amend -m 'merge bar (undo rename)'
   $ hg log --config diff.git=1 -pr .
-  changeset:   26:c34de68b014c
+  changeset:   26:12594a98ca3f
   tag:         tip
   parent:      22:30d96aeaf27b
   parent:      21:1aa437659d19
@@ -723,7 +724,7 @@
    dd
   +=======
   +cc
-  +>>>>>>> merge rev:    1aa437659d19  bar - test: aazzcc
+  +>>>>>>> merge rev:    1aa437659d19 bar - test: aazzcc
   
   $ hg debugrename z
   z not renamed
@@ -740,9 +741,9 @@
   $ echo aa >> aaa
   $ hg ci -m 'merge bar again'
   $ hg log --config diff.git=1 -pr .
-  changeset:   28:37d40dcef03b
+  changeset:   28:dffde028b388
   tag:         tip
-  parent:      26:c34de68b014c
+  parent:      26:12594a98ca3f
   parent:      27:4c94d5bc65f5
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -775,9 +776,9 @@
   $ hg mv aaa aa
   $ hg ci --amend -m 'merge bar again (undo rename)'
   $ hg log --config diff.git=1 -pr .
-  changeset:   30:537c6d1b3633
+  changeset:   30:18e3ba160489
   tag:         tip
-  parent:      26:c34de68b014c
+  parent:      26:12594a98ca3f
   parent:      27:4c94d5bc65f5
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -817,9 +818,9 @@
   use (c)hanged version, (d)elete, or leave (u)nresolved? c
   $ hg ci -m 'merge bar (with conflicts)'
   $ hg log --config diff.git=1 -pr .
-  changeset:   33:7afcba911942
+  changeset:   33:b4c3035e2544
   tag:         tip
-  parent:      32:6075d69d215d
+  parent:      32:4b216ca5ba97
   parent:      31:67db8847a540
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -829,9 +830,9 @@
   $ hg rm aa
   $ hg ci --amend -m 'merge bar (with conflicts, amended)'
   $ hg log --config diff.git=1 -pr .
-  changeset:   35:376965e47ddd
+  changeset:   35:1205ed810051
   tag:         tip
-  parent:      32:6075d69d215d
+  parent:      32:4b216ca5ba97
   parent:      31:67db8847a540
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -927,7 +928,7 @@
   HG: M: 
   HG: A: foo
   HG: R: 
-  HG: diff -r 376965e47ddd foo
+  HG: diff -r 1205ed810051 foo
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/foo	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
@@ -941,12 +942,12 @@
   HG: M: 
   HG: A: foo y
   HG: R: 
-  HG: diff -r 376965e47ddd foo
+  HG: diff -r 1205ed810051 foo
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/foo	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
   HG: +foo
-  HG: diff -r 376965e47ddd y
+  HG: diff -r 1205ed810051 y
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/y	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
@@ -959,18 +960,18 @@
   HG: M: 
   HG: A: foo y
   HG: R: a
-  HG: diff -r 376965e47ddd a
+  HG: diff -r 1205ed810051 a
   HG: --- a/a	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -1,2 +0,0 @@
   HG: -a
   HG: -a
-  HG: diff -r 376965e47ddd foo
+  HG: diff -r 1205ed810051 foo
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/foo	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
   HG: +foo
-  HG: diff -r 376965e47ddd y
+  HG: diff -r 1205ed810051 y
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/y	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
@@ -983,23 +984,23 @@
   HG: M: 
   HG: A: foo y
   HG: R: a x
-  HG: diff -r 376965e47ddd a
+  HG: diff -r 1205ed810051 a
   HG: --- a/a	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -1,2 +0,0 @@
   HG: -a
   HG: -a
-  HG: diff -r 376965e47ddd foo
+  HG: diff -r 1205ed810051 foo
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/foo	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
   HG: +foo
-  HG: diff -r 376965e47ddd x
+  HG: diff -r 1205ed810051 x
   HG: --- a/x	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -1,1 +0,0 @@
   HG: -x
-  HG: diff -r 376965e47ddd y
+  HG: diff -r 1205ed810051 y
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/y	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
@@ -1014,23 +1015,23 @@
   HG: M: 
   HG: A: foo y
   HG: R: a x
-  HG: diff -r 376965e47ddd a
+  HG: diff -r 1205ed810051 a
   HG: --- a/a	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -1,2 +0,0 @@
   HG: -a
   HG: -a
-  HG: diff -r 376965e47ddd foo
+  HG: diff -r 1205ed810051 foo
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/foo	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
   HG: +foo
-  HG: diff -r 376965e47ddd x
+  HG: diff -r 1205ed810051 x
   HG: --- a/x	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -1,1 +0,0 @@
   HG: -x
-  HG: diff -r 376965e47ddd y
+  HG: diff -r 1205ed810051 y
   HG: --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   HG: +++ b/y	Thu Jan 01 00:00:00 1970 +0000
   HG: @@ -0,0 +1,1 @@
@@ -1146,7 +1147,7 @@
   R olddirname/newfile.py
   $ hg debugindex newdirname/newfile.py
      rev    offset  length  delta linkrev nodeid       p1           p2
-       0         0      88     -1       3 34a4d536c0c0 000000000000 000000000000
+       0         0      89     -1       3 34a4d536c0c0 000000000000 000000000000
 
   $ echo a >> newdirname/commonfile.py
   $ hg ci --amend -m bug
@@ -1154,7 +1155,7 @@
   newdirname/newfile.py renamed from olddirname/newfile.py:690b295714aed510803d3020da9c70fca8336def (glob)
   $ hg debugindex newdirname/newfile.py
      rev    offset  length  delta linkrev nodeid       p1           p2
-       0         0      88     -1       3 34a4d536c0c0 000000000000 000000000000
+       0         0      89     -1       3 34a4d536c0c0 000000000000 000000000000
 
 #if execbit
 
--- a/tests/test-commit-interactive-curses.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-commit-interactive-curses.t	Wed Jan 18 11:43:36 2017 -0500
@@ -318,6 +318,25 @@
   foo
   hello world
   lower
+
+Check spacemovesdown
+
+  $ cat <<EOF >> $HGRCPATH
+  > [experimental]
+  > spacemovesdown = true
+  > EOF
+  $ cat <<EOF >testModeCommands
+  > TOGGLE
+  > TOGGLE
+  > X
+  > EOF
+  $ hg status -q
+  M b
+  M x
+  $ hg commit -i -m "nothing to commit?" -d "0 0"
+  no changes to record
+  [1]
+
 Check ui.interface logic for the chunkselector
 
 The default interface is text
@@ -325,7 +344,7 @@
   $ chunkselectorinterface() {
   > python <<EOF
   > from mercurial import hg, ui, parsers;\
-  > repo = hg.repository(ui.ui(), ".");\
+  > repo = hg.repository(ui.ui.load(), ".");\
   > print repo.ui.interface("chunkselector")
   > EOF
   > }
--- a/tests/test-commit-multiple.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-commit-multiple.t	Wed Jan 18 11:43:36 2017 -0500
@@ -92,7 +92,7 @@
   > def printfiles(repo, rev):
   >     print "revision %s files: %s" % (rev, repo[rev].files())
   > 
-  > repo = hg.repository(ui.ui(), '.')
+  > repo = hg.repository(ui.ui.load(), '.')
   > assert len(repo) == 6, \
   >        "initial: len(repo): %d, expected: 6" % len(repo)
   > 
--- a/tests/test-commit.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-commit.t	Wed Jan 18 11:43:36 2017 -0500
@@ -609,7 +609,7 @@
   $ cat > evil-commit.py <<EOF
   > from mercurial import ui, hg, context, node
   > notrc = u".h\u200cg".encode('utf-8') + '/hgrc'
-  > u = ui.ui()
+  > u = ui.ui.load()
   > r = hg.repository(u, '.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
@@ -633,7 +633,7 @@
   $ cat > evil-commit.py <<EOF
   > from mercurial import ui, hg, context, node
   > notrc = "HG~1/hgrc"
-  > u = ui.ui()
+  > u = ui.ui.load()
   > r = hg.repository(u, '.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
@@ -651,7 +651,7 @@
   $ cat > evil-commit.py <<EOF
   > from mercurial import ui, hg, context, node
   > notrc = "HG8B6C~2/hgrc"
-  > u = ui.ui()
+  > u = ui.ui.load()
   > r = hg.repository(u, '.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, path, '[hooks]\nupdate = echo owned')
@@ -689,4 +689,105 @@
   $ HGEDITOR="sh $TESTTMP/notouching.sh" hg commit
   abort: commit message unchanged
   [255]
+
+test that text below the --- >8 --- special string is ignored
+
+  $ cat <<'EOF' > $TESTTMP/lowercaseline.sh
+  > cat $1 | sed s/LINE/line/ | tee $1.new
+  > mv $1.new $1
+  > EOF
+
+  $ hg init ignore_below_special_string
+  $ cd ignore_below_special_string
+  $ echo foo > foo
+  $ hg add foo
+  $ hg commit -m "foo"
+  $ cat >> .hg/hgrc <<EOF
+  > [committemplate]
+  > changeset.commit = first LINE
+  >     HG: this is customized commit template
+  >     HG: {extramsg}
+  >     HG: ------------------------ >8 ------------------------
+  >     {diff()}
+  > EOF
+  $ echo foo2 > foo2
+  $ hg add foo2
+  $ HGEDITOR="sh $TESTTMP/notouching.sh" hg ci
+  abort: commit message unchanged
+  [255]
+  $ HGEDITOR="sh $TESTTMP/lowercaseline.sh" hg ci
+  first line
+  HG: this is customized commit template
+  HG: Leave message empty to abort commit.
+  HG: ------------------------ >8 ------------------------
+  diff -r e63c23eaa88a foo2
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/foo2	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +foo2
+  $ hg log -T '{desc}\n' -r .
+  first line
+
+test that the special string --- >8 --- isn't used when not at the beginning of
+a line
+
+  $ cat >> .hg/hgrc <<EOF
+  > [committemplate]
+  > changeset.commit = first LINE2
+  >     another line HG: ------------------------ >8 ------------------------
+  >     HG: this is customized commit template
+  >     HG: {extramsg}
+  >     HG: ------------------------ >8 ------------------------
+  >     {diff()}
+  > EOF
+  $ echo foo >> foo
+  $ HGEDITOR="sh $TESTTMP/lowercaseline.sh" hg ci
+  first line2
+  another line HG: ------------------------ >8 ------------------------
+  HG: this is customized commit template
+  HG: Leave message empty to abort commit.
+  HG: ------------------------ >8 ------------------------
+  diff -r 3661b22b0702 foo
+  --- a/foo	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/foo	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,1 +1,2 @@
+   foo
+  +foo
+  $ hg log -T '{desc}\n' -r .
+  first line2
+  another line HG: ------------------------ >8 ------------------------
+
+also test that this special string isn't accepted when there is some extra text
+at the end
+
+  $ cat >> .hg/hgrc <<EOF
+  > [committemplate]
+  > changeset.commit = first LINE3
+  >     HG: ------------------------ >8 ------------------------foobar
+  >     second line
+  >     HG: this is customized commit template
+  >     HG: {extramsg}
+  >     HG: ------------------------ >8 ------------------------
+  >     {diff()}
+  > EOF
+  $ echo foo >> foo
+  $ HGEDITOR="sh $TESTTMP/lowercaseline.sh" hg ci
+  first line3
+  HG: ------------------------ >8 ------------------------foobar
+  second line
+  HG: this is customized commit template
+  HG: Leave message empty to abort commit.
+  HG: ------------------------ >8 ------------------------
+  diff -r ce648f5f066f foo
+  --- a/foo	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/foo	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,2 +1,3 @@
+   foo
+   foo
+  +foo
+  $ hg log -T '{desc}\n' -r .
+  first line3
+  second line
+
   $ cd ..
+
--- a/tests/test-completion.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-completion.t	Wed Jan 18 11:43:36 2017 -0500
@@ -109,6 +109,7 @@
   debugsub
   debugsuccessorssets
   debugtemplate
+  debugupgraderepo
   debugwalk
   debugwireargs
 
@@ -274,6 +275,7 @@
   debugsub: rev
   debugsuccessorssets: 
   debugtemplate: rev, define
+  debugupgraderepo: optimize, run
   debugwalk: include, exclude
   debugwireargs: three, four, five, ssh, remotecmd, insecure
   files: rev, print0, include, exclude, template, subrepos
--- a/tests/test-config.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-config.t	Wed Jan 18 11:43:36 2017 -0500
@@ -84,6 +84,32 @@
    }
   ]
 
+Test empty config source:
+
+  $ cat <<EOF > emptysource.py
+  > def reposetup(ui, repo):
+  >     ui.setconfig('empty', 'source', 'value')
+  > EOF
+  $ cp .hg/hgrc .hg/hgrc.orig
+  $ cat <<EOF >> .hg/hgrc
+  > [extensions]
+  > emptysource = `pwd`/emptysource.py
+  > EOF
+
+  $ hg config --debug empty.source
+  read config from: * (glob)
+  none: value
+  $ hg config empty.source -Tjson
+  [
+   {
+    "name": "empty.source",
+    "source": "",
+    "value": "value"
+   }
+  ]
+
+  $ cp .hg/hgrc.orig .hg/hgrc
+
 Test "%unset"
 
   $ cat >> $HGRCPATH <<EOF
--- a/tests/test-conflict.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-conflict.t	Wed Jan 18 11:43:36 2017 -0500
@@ -55,7 +55,7 @@
   =======
   4
   5
-  >>>>>>> merge rev:    c0c68e4fe667  - test: branch1
+  >>>>>>> merge rev:    c0c68e4fe667 - test: branch1
   Hop we are done.
 
   $ hg status
--- a/tests/test-context.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-context.py	Wed Jan 18 11:43:36 2017 -0500
@@ -7,7 +7,7 @@
     ui as uimod,
 )
 
-u = uimod.ui()
+u = uimod.ui.load()
 
 repo = hg.repository(u, 'test1', create=1)
 os.chdir('test1')
--- a/tests/test-contrib-perf.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-contrib-perf.t	Wed Jan 18 11:43:36 2017 -0500
@@ -50,6 +50,7 @@
    perfancestorset
                  (no help text available)
    perfannotate  (no help text available)
+   perfbdiff     benchmark a bdiff between revisions
    perfbranchmap
                  benchmark the update of a branchmap
    perfcca       (no help text available)
@@ -94,6 +95,8 @@
                  (no help text available)
    perfrawfiles  (no help text available)
    perfrevlog    Benchmark reading a series of revisions from a revlog.
+   perfrevlogchunks
+                 Benchmark operations on revlog chunks.
    perfrevlogrevision
                  Benchmark obtaining a revlog revision.
    perfrevrange  (no help text available)
@@ -112,6 +115,8 @@
   $ hg perfancestors
   $ hg perfancestorset 2
   $ hg perfannotate a
+  $ hg perfbdiff -c 1
+  $ hg perfbdiff --alldata 1
   $ hg perfbranchmap
   $ hg perfcca
   $ hg perfchangegroupchangelog
@@ -141,6 +146,7 @@
   $ hg perfrawfiles 2
   $ hg perfrevlog .hg/store/data/a.i
   $ hg perfrevlogrevision -m 0
+  $ hg perfrevlogchunks -c
   $ hg perfrevrange
   $ hg perfrevset 'all()'
   $ hg perfstartup
--- a/tests/test-convert-darcs.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-convert-darcs.t	Wed Jan 18 11:43:36 2017 -0500
@@ -8,7 +8,7 @@
 
   $ mkdir darcs-repo
   $ cd darcs-repo
-  $ darcs init
+  $ darcs init -q
   $ echo a > a
   $ darcs record -a -l -m p0
   Finished recording patch 'p0'
@@ -43,6 +43,7 @@
   Backing up ./a(*) (glob)
   We have conflicts in the following files:
   ./a
+   (?)
   $ sleep 1
   $ echo e > a
   $ echo f > f
@@ -54,13 +55,13 @@
 
 test file and directory move
 
-  $ darcs mv f ff
+  $ darcs mv -q f ff
 
 Test remove + move
 
-  $ darcs remove dir/d2
+  $ darcs remove -q dir/d2
   $ rm dir/d2
-  $ darcs mv dir dir2
+  $ darcs mv -q dir dir2
   $ darcs record -a -l -m p3
   Finished recording patch 'p3'
 
--- a/tests/test-convert-git.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-convert-git.t	Wed Jan 18 11:43:36 2017 -0500
@@ -374,6 +374,42 @@
   A bar-copied2
     bar
 
+renamelimit config option works
+
+  $ cd git-repo2
+  $ cat >> copy-source << EOF
+  > sc0
+  > sc1
+  > sc2
+  > sc3
+  > sc4
+  > sc5
+  > sc6
+  > EOF
+  $ git add copy-source
+  $ commit -m 'add copy-source'
+  $ cp copy-source source-copy0
+  $ echo 0 >> source-copy0
+  $ cp copy-source source-copy1
+  $ echo 1 >> source-copy1
+  $ git add source-copy0 source-copy1
+  $ commit -a -m 'copy copy-source 2 times'
+  $ cd ..
+
+  $ hg -q convert --config convert.git.renamelimit=1 \
+  > --config convert.git.findcopiesharder=true --datesort git-repo2 fullrepo2
+  $ hg -R fullrepo2 status -C --change master
+  A source-copy0
+  A source-copy1
+
+  $ hg -q convert --config convert.git.renamelimit=100 \
+  > --config convert.git.findcopiesharder=true --datesort git-repo2 fullrepo3
+  $ hg -R fullrepo3 status -C --change master
+  A source-copy0
+    copy-source
+  A source-copy1
+    copy-source
+
 test binary conversion (issue1359)
 
   $ count=19
@@ -446,6 +482,188 @@
   
   
 
+Various combinations of committeractions fail
+
+  $ hg --config convert.git.committeractions=messagedifferent,messagealways convert git-repo4 bad-committer
+  initializing destination bad-committer repository
+  abort: committeractions cannot define both messagedifferent and messagealways
+  [255]
+
+  $ hg --config convert.git.committeractions=dropcommitter,replaceauthor convert git-repo4 bad-committer
+  initializing destination bad-committer repository
+  abort: committeractions cannot define both dropcommitter and replaceauthor
+  [255]
+
+  $ hg --config convert.git.committeractions=dropcommitter,messagealways convert git-repo4 bad-committer
+  initializing destination bad-committer repository
+  abort: committeractions cannot define both dropcommitter and messagealways
+  [255]
+
+custom prefix on messagedifferent works
+
+  $ hg --config convert.git.committeractions=messagedifferent=different: convert git-repo4 git-repo4-hg-messagedifferentprefix
+  initializing destination git-repo4-hg-messagedifferentprefix repository
+  scanning source...
+  sorting...
+  converting...
+  1 addfoo
+  0 addfoo2
+  updating bookmarks
+
+  $ hg -R git-repo4-hg-messagedifferentprefix log -v
+  changeset:   1:2fe0c98a109d
+  bookmark:    master
+  tag:         tip
+  user:        nottest <test@example.org>
+  date:        Mon Jan 01 00:00:21 2007 +0000
+  files:       foo
+  description:
+  addfoo2
+  
+  different: test <test@example.org>
+  
+  
+  changeset:   0:0735477b0224
+  user:        test <test@example.org>
+  date:        Mon Jan 01 00:00:20 2007 +0000
+  files:       foo
+  description:
+  addfoo
+  
+  
+
+messagealways will always add the "committer: " line even if committer identical
+
+  $ hg --config convert.git.committeractions=messagealways convert git-repo4 git-repo4-hg-messagealways
+  initializing destination git-repo4-hg-messagealways repository
+  scanning source...
+  sorting...
+  converting...
+  1 addfoo
+  0 addfoo2
+  updating bookmarks
+
+  $ hg -R git-repo4-hg-messagealways log -v
+  changeset:   1:8db057d8cd37
+  bookmark:    master
+  tag:         tip
+  user:        nottest <test@example.org>
+  date:        Mon Jan 01 00:00:21 2007 +0000
+  files:       foo
+  description:
+  addfoo2
+  
+  committer: test <test@example.org>
+  
+  
+  changeset:   0:8f71fe9c98be
+  user:        test <test@example.org>
+  date:        Mon Jan 01 00:00:20 2007 +0000
+  files:       foo
+  description:
+  addfoo
+  
+  committer: test <test@example.org>
+  
+  
+
+custom prefix on messagealways works
+
+  $ hg --config convert.git.committeractions=messagealways=always: convert git-repo4 git-repo4-hg-messagealwaysprefix
+  initializing destination git-repo4-hg-messagealwaysprefix repository
+  scanning source...
+  sorting...
+  converting...
+  1 addfoo
+  0 addfoo2
+  updating bookmarks
+
+  $ hg -R git-repo4-hg-messagealwaysprefix log -v
+  changeset:   1:83c17174de79
+  bookmark:    master
+  tag:         tip
+  user:        nottest <test@example.org>
+  date:        Mon Jan 01 00:00:21 2007 +0000
+  files:       foo
+  description:
+  addfoo2
+  
+  always: test <test@example.org>
+  
+  
+  changeset:   0:2ac9bcb3534a
+  user:        test <test@example.org>
+  date:        Mon Jan 01 00:00:20 2007 +0000
+  files:       foo
+  description:
+  addfoo
+  
+  always: test <test@example.org>
+  
+  
+
+replaceauthor replaces author with committer
+
+  $ hg --config convert.git.committeractions=replaceauthor convert git-repo4 git-repo4-hg-replaceauthor
+  initializing destination git-repo4-hg-replaceauthor repository
+  scanning source...
+  sorting...
+  converting...
+  1 addfoo
+  0 addfoo2
+  updating bookmarks
+
+  $ hg -R git-repo4-hg-replaceauthor log -v
+  changeset:   1:122c1d8999ea
+  bookmark:    master
+  tag:         tip
+  user:        test <test@example.org>
+  date:        Mon Jan 01 00:00:21 2007 +0000
+  files:       foo
+  description:
+  addfoo2
+  
+  
+  changeset:   0:0735477b0224
+  user:        test <test@example.org>
+  date:        Mon Jan 01 00:00:20 2007 +0000
+  files:       foo
+  description:
+  addfoo
+  
+  
+
+dropcommitter removes the committer
+
+  $ hg --config convert.git.committeractions=dropcommitter convert git-repo4 git-repo4-hg-dropcommitter
+  initializing destination git-repo4-hg-dropcommitter repository
+  scanning source...
+  sorting...
+  converting...
+  1 addfoo
+  0 addfoo2
+  updating bookmarks
+
+  $ hg -R git-repo4-hg-dropcommitter log -v
+  changeset:   1:190b2da396cc
+  bookmark:    master
+  tag:         tip
+  user:        nottest <test@example.org>
+  date:        Mon Jan 01 00:00:21 2007 +0000
+  files:       foo
+  description:
+  addfoo2
+  
+  
+  changeset:   0:0735477b0224
+  user:        test <test@example.org>
+  date:        Mon Jan 01 00:00:20 2007 +0000
+  files:       foo
+  description:
+  addfoo
+  
+  
+
 --sourceorder should fail
 
   $ hg convert --sourcesort git-repo4 git-repo4-sourcesort-hg
@@ -714,7 +932,7 @@
   $ COMMIT_OBJ=1c/0ce3c5886f83a1d78a7b517cdff5cf9ca17bdd
   $ mv git-repo4/.git/objects/$COMMIT_OBJ git-repo4/.git/objects/$COMMIT_OBJ.tmp
   $ hg convert git-repo4 git-repo4-broken-hg 2>&1 | grep 'abort:'
-  abort: cannot retrieve number of commits in $TESTTMP/git-repo4/.git
+  abort: cannot retrieve number of commits in $TESTTMP/git-repo4/.git (glob)
   $ mv git-repo4/.git/objects/$COMMIT_OBJ.tmp git-repo4/.git/objects/$COMMIT_OBJ
 damage git repository by renaming a blob object
 
@@ -768,3 +986,182 @@
 
 #endif
 
+Conversion of extra commit metadata to extras works
+
+  $ git init gitextras >/dev/null 2>/dev/null
+  $ cd gitextras
+  $ touch foo
+  $ git add foo
+  $ commit -m initial
+  $ echo 1 > foo
+  $ tree=`git write-tree`
+
+Git doesn't provider a user-facing API to write extra metadata into the
+commit, so create the commit object by hand
+
+  $ git hash-object -t commit -w --stdin << EOF
+  > tree ${tree}
+  > parent ba6b1344e977ece9e00958dbbf17f1f09384b2c1
+  > author test <test@example.com> 1000000000 +0000
+  > committer test <test@example.com> 1000000000 +0000
+  > extra-1 extra-1
+  > extra-2 extra-2 with space
+  > convert_revision 0000aaaabbbbccccddddeeee
+  > 
+  > message with extras
+  > EOF
+  8123727c8361a4117d1a2d80e0c4e7d70c757f18
+
+  $ git reset --hard 8123727c8361a4117d1a2d80e0c4e7d70c757f18 > /dev/null
+
+  $ cd ..
+
+convert will not retain custom metadata keys by default
+
+  $ hg convert gitextras hgextras1
+  initializing destination hgextras1 repository
+  scanning source...
+  sorting...
+  converting...
+  1 initial
+  0 message with extras
+  updating bookmarks
+
+  $ hg -R hgextras1 log --debug -r 1
+  changeset:   1:e13a39880f68479127b2a80fa0b448cc8524aa09
+  bookmark:    master
+  tag:         tip
+  phase:       draft
+  parent:      0:dcb68977c55cd02cbd13b901df65c4b6e7b9c4b9
+  parent:      -1:0000000000000000000000000000000000000000
+  manifest:    0:6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50
+  user:        test <test@example.com>
+  date:        Sun Sep 09 01:46:40 2001 +0000
+  extra:       branch=default
+  extra:       convert_revision=8123727c8361a4117d1a2d80e0c4e7d70c757f18
+  description:
+  message with extras
+  
+  
+
+Attempting to convert a banned extra is disallowed
+
+  $ hg convert --config convert.git.extrakeys=tree,parent gitextras hgextras-banned
+  initializing destination hgextras-banned repository
+  abort: copying of extra key is forbidden: parent, tree
+  [255]
+
+Converting a specific extra works
+
+  $ hg convert --config convert.git.extrakeys=extra-1 gitextras hgextras2
+  initializing destination hgextras2 repository
+  scanning source...
+  sorting...
+  converting...
+  1 initial
+  0 message with extras
+  updating bookmarks
+
+  $ hg -R hgextras2 log --debug -r 1
+  changeset:   1:d40fb205d58597e6ecfd55b16f198be5bf436391
+  bookmark:    master
+  tag:         tip
+  phase:       draft
+  parent:      0:dcb68977c55cd02cbd13b901df65c4b6e7b9c4b9
+  parent:      -1:0000000000000000000000000000000000000000
+  manifest:    0:6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50
+  user:        test <test@example.com>
+  date:        Sun Sep 09 01:46:40 2001 +0000
+  extra:       branch=default
+  extra:       convert_revision=8123727c8361a4117d1a2d80e0c4e7d70c757f18
+  extra:       extra-1=extra-1
+  description:
+  message with extras
+  
+  
+
+Converting multiple extras works
+
+  $ hg convert --config convert.git.extrakeys=extra-1,extra-2 gitextras hgextras3
+  initializing destination hgextras3 repository
+  scanning source...
+  sorting...
+  converting...
+  1 initial
+  0 message with extras
+  updating bookmarks
+
+  $ hg -R hgextras3 log --debug -r 1
+  changeset:   1:0105af33379e7b6491501fd34141b7af700fe125
+  bookmark:    master
+  tag:         tip
+  phase:       draft
+  parent:      0:dcb68977c55cd02cbd13b901df65c4b6e7b9c4b9
+  parent:      -1:0000000000000000000000000000000000000000
+  manifest:    0:6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50
+  user:        test <test@example.com>
+  date:        Sun Sep 09 01:46:40 2001 +0000
+  extra:       branch=default
+  extra:       convert_revision=8123727c8361a4117d1a2d80e0c4e7d70c757f18
+  extra:       extra-1=extra-1
+  extra:       extra-2=extra-2 with space
+  description:
+  message with extras
+  
+  
+
+convert.git.saverev can be disabled to prevent convert_revision from being written
+
+  $ hg convert --config convert.git.saverev=false gitextras hgextras4
+  initializing destination hgextras4 repository
+  scanning source...
+  sorting...
+  converting...
+  1 initial
+  0 message with extras
+  updating bookmarks
+
+  $ hg -R hgextras4 log --debug -r 1
+  changeset:   1:1dcaf4ffe5bee43fa86db2800821f6f0af212c5c
+  bookmark:    master
+  tag:         tip
+  phase:       draft
+  parent:      0:a13935fec4daf06a5a87a7307ccb0fc94f98d06d
+  parent:      -1:0000000000000000000000000000000000000000
+  manifest:    0:6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50
+  user:        test <test@example.com>
+  date:        Sun Sep 09 01:46:40 2001 +0000
+  extra:       branch=default
+  description:
+  message with extras
+  
+  
+
+convert.git.saverev and convert.git.extrakeys can be combined to preserve
+convert_revision from source
+
+  $ hg convert --config convert.git.saverev=false --config convert.git.extrakeys=convert_revision gitextras hgextras5
+  initializing destination hgextras5 repository
+  scanning source...
+  sorting...
+  converting...
+  1 initial
+  0 message with extras
+  updating bookmarks
+
+  $ hg -R hgextras5 log --debug -r 1
+  changeset:   1:574d85931544d4542007664fee3747360e85ee28
+  bookmark:    master
+  tag:         tip
+  phase:       draft
+  parent:      0:a13935fec4daf06a5a87a7307ccb0fc94f98d06d
+  parent:      -1:0000000000000000000000000000000000000000
+  manifest:    0:6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50
+  user:        test <test@example.com>
+  date:        Sun Sep 09 01:46:40 2001 +0000
+  extra:       branch=default
+  extra:       convert_revision=0000aaaabbbbccccddddeeee
+  description:
+  message with extras
+  
+  
--- a/tests/test-convert-p4-filetypes.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-convert-p4-filetypes.t	Wed Jan 18 11:43:36 2017 -0500
@@ -307,11 +307,11 @@
 convert
   $ hg convert -s p4 $DEPOTPATH dst
   initializing destination dst repository
+  scanning source...
   reading p4 views
   collecting p4 changelists
   1 initial
   2 keywords
-  scanning source...
   sorting...
   converting...
   1 initial
--- a/tests/test-convert-p4.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-convert-p4.t	Wed Jan 18 11:43:36 2017 -0500
@@ -67,12 +67,12 @@
 convert
   $ hg convert -s p4 $DEPOTPATH dst
   initializing destination dst repository
+  scanning source...
   reading p4 views
   collecting p4 changelists
   1 initial
   2 change a
   3 change b/c
-  scanning source...
   sorting...
   converting...
   2 initial
@@ -98,13 +98,10 @@
 
 convert again
   $ hg convert -s p4 $DEPOTPATH dst
+  scanning source...
   reading p4 views
   collecting p4 changelists
-  1 initial
-  2 change a
-  3 change b/c
   4 change a b/c
-  scanning source...
   sorting...
   converting...
   0 change a b/c
@@ -130,14 +127,10 @@
 
 convert again
   $ hg convert -s p4 $DEPOTPATH dst
+  scanning source...
   reading p4 views
   collecting p4 changelists
-  1 initial
-  2 change a
-  3 change b/c
-  4 change a b/c
   5 add d e f
-  scanning source...
   sorting...
   converting...
   0 add d e f
--- a/tests/test-convert.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-convert.t	Wed Jan 18 11:43:36 2017 -0500
@@ -261,10 +261,58 @@
                     for large projects, and is only effective when
                     "convert.git.similarity" is greater than 0. The default is
                     False.
+      convert.git.renamelimit
+                    perform rename and copy detection up to this many changed
+                    files in a commit. Increasing this will make rename and copy
+                    detection more accurate but will significantly slow down
+                    computation on large projects. The option is only relevant
+                    if "convert.git.similarity" is greater than 0. The default
+                    is "400".
+      convert.git.committeractions
+                    list of actions to take when processing author and committer
+                    values.
+  
+          Git commits have separate author (who wrote the commit) and committer
+          (who applied the commit) fields. Not all destinations support separate
+          author and committer fields (including Mercurial). This config option
+          controls what to do with these author and committer fields during
+          conversion.
+  
+          A value of "messagedifferent" will append a "committer: ..." line to
+          the commit message if the Git committer is different from the author.
+          The prefix of that line can be specified using the syntax
+          "messagedifferent=<prefix>". e.g. "messagedifferent=git-committer:".
+          When a prefix is specified, a space will always be inserted between
+          the prefix and the value.
+  
+          "messagealways" behaves like "messagedifferent" except it will always
+          result in a "committer: ..." line being appended to the commit
+          message. This value is mutually exclusive with "messagedifferent".
+  
+          "dropcommitter" will remove references to the committer. Only
+          references to the author will remain. Actions that add references to
+          the committer will have no effect when this is set.
+  
+          "replaceauthor" will replace the value of the author field with the
+          committer. Other actions that add references to the committer will
+          still take effect when this is set.
+  
+          The default is "messagedifferent".
+  
+      convert.git.extrakeys
+                    list of extra keys from commit metadata to copy to the
+                    destination. Some Git repositories store extra metadata in
+                    commits. By default, this non-default metadata will be lost
+                    during conversion. Setting this config option can retain
+                    that metadata. Some built-in keys such as "parent" and
+                    "branch" are not allowed to be copied.
       convert.git.remoteprefix
                     remote refs are converted as bookmarks with
                     "convert.git.remoteprefix" as a prefix followed by a /. The
                     default is 'remote'.
+      convert.git.saverev
+                    whether to store the original Git commit ID in the metadata
+                    of the destination commit. The default is True.
       convert.git.skipsubmodules
                     does not convert root level .gitmodules files or files with
                     160000 mode indicating a submodule. Default is False.
--- a/tests/test-debugcommands.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-debugcommands.t	Wed Jan 18 11:43:36 2017 -0500
@@ -22,6 +22,11 @@
       full      : 44 (100.00%)
       deltas    :  0 ( 0.00%)
   
+  chunks        :  1
+      0x75 (u)  :  1 (100.00%)
+  chunks size   : 44
+      0x75 (u)  : 44 (100.00%)
+  
   avg chain length  : 0
   max chain length  : 0
   compression ratio : 0
--- a/tests/test-devel-warnings.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-devel-warnings.t	Wed Jan 18 11:43:36 2017 -0500
@@ -92,6 +92,7 @@
    */mercurial/dispatch.py:* in dispatch (glob)
    */mercurial/dispatch.py:* in _runcatch (glob)
    */mercurial/dispatch.py:* in callcatch (glob)
+   */mercurial/scmutil.py* in callcatch (glob)
    */mercurial/dispatch.py:* in _runcatchfunc (glob)
    */mercurial/dispatch.py:* in _dispatch (glob)
    */mercurial/dispatch.py:* in runcommand (glob)
@@ -127,6 +128,7 @@
    */mercurial/dispatch.py:* in dispatch (glob)
    */mercurial/dispatch.py:* in _runcatch (glob)
    */mercurial/dispatch.py:* in callcatch (glob)
+   */mercurial/scmutil.py* in callcatch (glob)
    */mercurial/dispatch.py:* in _runcatchfunc (glob)
    */mercurial/dispatch.py:* in _dispatch (glob)
    */mercurial/dispatch.py:* in runcommand (glob)
@@ -150,6 +152,7 @@
    */mercurial/dispatch.py:* in dispatch (glob)
    */mercurial/dispatch.py:* in _runcatch (glob)
    */mercurial/dispatch.py:* in callcatch (glob)
+   */mercurial/scmutil.py* in callcatch (glob)
    */mercurial/dispatch.py:* in _runcatchfunc (glob)
    */mercurial/dispatch.py:* in _dispatch (glob)
    */mercurial/dispatch.py:* in runcommand (glob)
@@ -171,6 +174,6 @@
   ** Mercurial Distributed SCM (*) (glob)
   ** Extensions loaded: * (glob)
   Traceback (most recent call last):
-  RuntimeError: programming error: transaction requires locking
+  mercurial.error.ProgrammingError: transaction requires locking
 
   $ cd ..
--- a/tests/test-diff-unified.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-diff-unified.t	Wed Jan 18 11:43:36 2017 -0500
@@ -229,6 +229,42 @@
   -a
   +b
 
+Git diff, adding extended headers
+
+  $ hg diff --git --config experimental.extendedheader.index=7 --config experimental.extendedheader.similarity=True
+  diff --git a/f1 b/f 1
+  similarity index 0%
+  rename from f1
+  rename to f 1
+  index 7898192..6178079 100644
+  --- a/f1
+  +++ b/f 1	
+  @@ -1,1 +1,1 @@
+  -a
+  +b
+
+  $ hg diff --git --config experimental.extendedheader.index=-1
+  invalid length for extendedheader.index: '-1'
+  diff --git a/f1 b/f 1
+  rename from f1
+  rename to f 1
+  --- a/f1
+  +++ b/f 1	
+  @@ -1,1 +1,1 @@
+  -a
+  +b
+
+  $ hg diff --git --config experimental.extendedheader.index=whatever
+  invalid value for extendedheader.index: 'whatever'
+  diff --git a/f1 b/f 1
+  rename from f1
+  rename to f 1
+  --- a/f1
+  +++ b/f 1	
+  @@ -1,1 +1,1 @@
+  -a
+  +b
+
 Git diff with noprefix
 
   $ hg --config diff.noprefix=True diff --git --nodates
--- a/tests/test-doctest.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-doctest.py	Wed Jan 18 11:43:36 2017 -0500
@@ -20,6 +20,7 @@
 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
 testmod('mercurial.dispatch')
 testmod('mercurial.encoding')
+testmod('mercurial.formatter')
 testmod('mercurial.hg')
 testmod('mercurial.hgweb.hgwebdir_mod')
 testmod('mercurial.match')
--- a/tests/test-duplicateoptions.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-duplicateoptions.py	Wed Jan 18 11:43:36 2017 -0500
@@ -21,7 +21,7 @@
 
 hgrc.close()
 
-u = uimod.ui()
+u = uimod.ui.load()
 extensions.loadall(u)
 
 globalshort = set()
--- a/tests/test-extension.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-extension.t	Wed Jan 18 11:43:36 2017 -0500
@@ -251,10 +251,10 @@
 
 #if demandimport absimport
 
-Examine whether module loading is delayed until actual refering, even
+Examine whether module loading is delayed until actual referring, even
 though module is imported with "absolute_import" feature.
 
-Files below in each packages are used for descirbed purpose:
+Files below in each packages are used for described purpose:
 
 - "called": examine whether "from MODULE import ATTR" works correctly
 - "unused": examine whether loading is delayed correctly
@@ -730,23 +730,15 @@
   > EOF
   $ echo "multirevs = multirevs.py" >> $HGRCPATH
 
-  $ hg help multirevs
-  Specifying Multiple Revisions
-  """""""""""""""""""""""""""""
+  $ hg help multirevs | tail
+        bookmark (this works because the last revision of the revset is used):
   
-      When Mercurial accepts more than one revision, they may be specified
-      individually, or provided as a topologically continuous range, separated
-      by the ":" character.
+          hg update :@
   
-      The syntax of range notation is [BEGIN]:[END], where BEGIN and END are
-      revision identifiers. Both BEGIN and END are optional. If BEGIN is not
-      specified, it defaults to revision number 0. If END is not specified, it
-      defaults to the tip. The range ":" thus means "all revisions".
+      - Show diff between tags 1.3 and 1.5 (this works because the first and the
+        last revisions of the revset are used):
   
-      If BEGIN is greater than END, revisions are treated in reverse order.
-  
-      A range acts as a closed interval. This means that a range of 3:5 gives 3,
-      4 and 5. Similarly, a range of 9:6 gives 9, 8, 7, and 6.
+          hg diff -r 1.3::1.5
   
   use 'hg help -c multirevs' to see help for the multirevs command
 
@@ -1045,6 +1037,61 @@
   $ hg help patchbomb
   patchbomb extension - command to send changesets as (a series of) patch emails
   
+  The series is started off with a "[PATCH 0 of N]" introduction, which
+  describes the series as a whole.
+  
+  Each patch email has a Subject line of "[PATCH M of N] ...", using the first
+  line of the changeset description as the subject text. The message contains
+  two or three body parts:
+  
+  - The changeset description.
+  - [Optional] The result of running diffstat on the patch.
+  - The patch itself, as generated by 'hg export'.
+  
+  Each message refers to the first in the series using the In-Reply-To and
+  References headers, so they will show up as a sequence in threaded mail and
+  news readers, and in mail archives.
+  
+  To configure other defaults, add a section like this to your configuration
+  file:
+  
+    [email]
+    from = My Name <my@email>
+    to = recipient1, recipient2, ...
+    cc = cc1, cc2, ...
+    bcc = bcc1, bcc2, ...
+    reply-to = address1, address2, ...
+  
+  Use "[patchbomb]" as configuration section name if you need to override global
+  "[email]" address settings.
+  
+  Then you can use the 'hg email' command to mail a series of changesets as a
+  patchbomb.
+  
+  You can also either configure the method option in the email section to be a
+  sendmail compatible mailer or fill out the [smtp] section so that the
+  patchbomb extension can automatically send patchbombs directly from the
+  commandline. See the [email] and [smtp] sections in hgrc(5) for details.
+  
+  By default, 'hg email' will prompt for a "To" or "CC" header if you do not
+  supply one via configuration or the command line.  You can override this to
+  never prompt by configuring an empty value:
+  
+    [email]
+    cc =
+  
+  You can control the default inclusion of an introduction message with the
+  "patchbomb.intro" configuration option. The configuration is always
+  overwritten by command line flags like --intro and --desc:
+  
+    [patchbomb]
+    intro=auto   # include introduction message if more than 1 patch (default)
+    intro=never  # never include an introduction message
+    intro=always # always include an introduction message
+  
+  You can set patchbomb to always ask for confirmation by setting
+  "patchbomb.confirm" to true.
+  
   (use 'hg help extensions' for information on enabling extensions)
 
 
@@ -1455,48 +1502,6 @@
 
   $ cd ..
 
-Test compatibility with extension commands that don't use @command (issue5137)
-
-  $ hg init deprecated
-  $ cd deprecated
-
-  $ cat <<EOF > deprecatedcmd.py
-  > def deprecatedcmd(repo, ui):
-  >     pass
-  > cmdtable = {
-  >     'deprecatedcmd': (deprecatedcmd, [], ''),
-  > }
-  > EOF
-  $ cat <<EOF > .hg/hgrc
-  > [extensions]
-  > deprecatedcmd = `pwd`/deprecatedcmd.py
-  > mq = !
-  > hgext.mq = !
-  > hgext/mq = !
-  > [alias]
-  > deprecatedalias = deprecatedcmd
-  > EOF
-
-  $ hg deprecatedcmd
-  devel-warn: missing attribute 'norepo', use @command decorator to register 'deprecatedcmd'
-  (compatibility will be dropped after Mercurial-3.8, update your code.) at: * (glob)
-
-  $ hg deprecatedalias
-  devel-warn: missing attribute 'norepo', use @command decorator to register 'deprecatedalias'
-  (compatibility will be dropped after Mercurial-3.8, update your code.) at: * (glob)
-
- no warning unless command is executed:
-
-  $ hg paths
-
- but mq iterates over command table:
-
-  $ hg --config extensions.mq= paths
-  devel-warn: missing attribute 'norepo', use @command decorator to register 'deprecatedcmd'
-  (compatibility will be dropped after Mercurial-3.8, update your code.) at: * (glob)
-
-  $ cd ..
-
 Test synopsis and docstring extending
 
   $ hg init exthelp
--- a/tests/test-filecache.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-filecache.py	Wed Jan 18 11:43:36 2017 -0500
@@ -141,7 +141,7 @@
 def test_filecache_synced():
     # test old behavior that caused filecached properties to go out of sync
     os.system('hg init && echo a >> a && hg ci -qAm.')
-    repo = hg.repository(uimod.ui())
+    repo = hg.repository(uimod.ui.load())
     # first rollback clears the filecache, but changelog to stays in __dict__
     repo.rollback()
     repo.commit('.')
@@ -197,7 +197,7 @@
         repetition = 3
 
         # repeat changing via checkambigatclosing, to examine whether
-        # st_mtime is advanced multiple times as expecetd
+        # st_mtime is advanced multiple times as expected
         for i in xrange(repetition):
             # explicit closing
             fp = scmutil.checkambigatclosing(open(filename, 'a'))
@@ -214,7 +214,7 @@
             continue
 
         # st_mtime should be advanced "repetition * 2" times, because
-        # all changes occured at same time (in sec)
+        # all changes occurred at same time (in sec)
         expected = (oldstat.st_mtime + repetition * 2) & 0x7fffffff
         if newstat.st_mtime != expected:
             print("'newstat.st_mtime %s is not %s (as %s + %s * 2)" %
--- a/tests/test-filelog.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-filelog.py	Wed Jan 18 11:43:36 2017 -0500
@@ -13,7 +13,7 @@
     ui as uimod,
 )
 
-myui = uimod.ui()
+myui = uimod.ui.load()
 repo = hg.repository(myui, path='.', create=True)
 
 fl = repo.file('foobar')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-flagprocessor.t	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,165 @@
+# Create server
+  $ hg init server
+  $ cd server
+  $ cat >> .hg/hgrc << EOF
+  > [extensions]
+  > extension=$TESTDIR/flagprocessorext.py
+  > EOF
+  $ cd ../
+
+# Clone server and enable extensions
+  $ hg clone -q server client
+  $ cd client
+  $ cat >> .hg/hgrc << EOF
+  > [extensions]
+  > extension=$TESTDIR/flagprocessorext.py
+  > EOF
+
+# Commit file that will trigger the noop extension
+  $ echo '[NOOP]' > noop
+  $ hg commit -Aqm "noop"
+
+# Commit file that will trigger the base64 extension
+  $ echo '[BASE64]' > base64
+  $ hg commit -Aqm 'base64'
+
+# Commit file that will trigger the gzip extension
+  $ echo '[GZIP]' > gzip
+  $ hg commit -Aqm 'gzip'
+
+# Commit file that will trigger noop and base64
+  $ echo '[NOOP][BASE64]' > noop-base64
+  $ hg commit -Aqm 'noop+base64'
+
+# Commit file that will trigger noop and gzip
+  $ echo '[NOOP][GZIP]' > noop-gzip
+  $ hg commit -Aqm 'noop+gzip'
+
+# Commit file that will trigger base64 and gzip
+  $ echo '[BASE64][GZIP]' > base64-gzip
+  $ hg commit -Aqm 'base64+gzip'
+
+# Commit file that will trigger base64, gzip and noop
+  $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
+  $ hg commit -Aqm 'base64+gzip+noop'
+
+# TEST: ensure the revision data is consistent
+  $ hg cat noop
+  [NOOP]
+  $ hg debugdata noop 0
+  [NOOP]
+
+  $ hg cat -r . base64
+  [BASE64]
+  $ hg debugdata base64 0
+  W0JBU0U2NF0K (no-eol)
+
+  $ hg cat -r . gzip
+  [GZIP]
+  $ hg debugdata gzip 0
+  x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
+
+  $ hg cat -r . noop-base64
+  [NOOP][BASE64]
+  $ hg debugdata noop-base64 0
+  W05PT1BdW0JBU0U2NF0K (no-eol)
+
+  $ hg cat -r . noop-gzip
+  [NOOP][GZIP]
+  $ hg debugdata noop-gzip 0
+  x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
+
+  $ hg cat -r . base64-gzip
+  [BASE64][GZIP]
+  $ hg debugdata base64-gzip 0
+  eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
+
+  $ hg cat -r . base64-gzip-noop
+  [BASE64][GZIP][NOOP]
+  $ hg debugdata base64-gzip-noop 0
+  eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
+
+# Push to the server
+  $ hg push
+  pushing to $TESTTMP/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 7 changesets with 7 changes to 7 files
+
+# Initialize new client (not cloning) and setup extension
+  $ cd ..
+  $ hg init client2
+  $ cd client2
+  $ cat >> .hg/hgrc << EOF
+  > [paths]
+  > default = $TESTTMP/server
+  > [extensions]
+  > extension=$TESTDIR/flagprocessorext.py
+  > EOF
+
+# Pull from server and update to latest revision
+  $ hg pull default
+  pulling from $TESTTMP/server (glob)
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 7 changesets with 7 changes to 7 files
+  (run 'hg update' to get a working copy)
+  $ hg update
+  7 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+# TEST: ensure the revision data is consistent
+  $ hg cat noop
+  [NOOP]
+  $ hg debugdata noop 0
+  [NOOP]
+
+  $ hg cat -r . base64
+  [BASE64]
+  $ hg debugdata base64 0
+  W0JBU0U2NF0K (no-eol)
+
+  $ hg cat -r . gzip
+  [GZIP]
+  $ hg debugdata gzip 0
+  x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
+
+  $ hg cat -r . noop-base64
+  [NOOP][BASE64]
+  $ hg debugdata noop-base64 0
+  W05PT1BdW0JBU0U2NF0K (no-eol)
+
+  $ hg cat -r . noop-gzip
+  [NOOP][GZIP]
+  $ hg debugdata noop-gzip 0
+  x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
+
+  $ hg cat -r . base64-gzip
+  [BASE64][GZIP]
+  $ hg debugdata base64-gzip 0
+  eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
+
+  $ hg cat -r . base64-gzip-noop
+  [BASE64][GZIP][NOOP]
+  $ hg debugdata base64-gzip-noop 0
+  eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
+
+# TEST: ensure a missing processor is handled
+  $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
+  $ hg commit -Aqm 'fail+base64+gzip+noop'
+  abort: missing processor for flag '0x1'!
+  [255]
+
+# TEST: ensure we cannot register several flag processors on the same flag
+  $ cat >> .hg/hgrc << EOF
+  > [extensions]
+  > extension=$TESTDIR/flagprocessorext.py
+  > duplicate=$TESTDIR/flagprocessorext.py
+  > EOF
+  $ echo 'this should fail' > file
+  $ hg commit -Aqm 'add file'
+  abort: cannot register multiple processors on flag '0x8'.
+  [255]
--- a/tests/test-generaldelta.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-generaldelta.t	Wed Jan 18 11:43:36 2017 -0500
@@ -74,7 +74,7 @@
   $ cd ..
 
 Test "usegeneraldelta" config
-(repo are general delta, but incoming bundle are not re-deltified)
+(repo are general delta, but incoming bundle are not re-deltafied)
 
 delta coming from the server base delta server are not recompressed.
 (also include the aggressive version for comparison)
--- a/tests/test-getbundle.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-getbundle.t	Wed Jan 18 11:43:36 2017 -0500
@@ -264,9 +264,9 @@
 
   $ cat access.log
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - (glob)
+  * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=700b7e19db54103633c4bf4a6a6b6d55f4d50c03+d5f6e1ea452285324836a49d7d3c2a63cfed1d31&heads=13c0170174366b441dc68e8e33757232fa744458+bac16991d12ff45f9dc43c52da1946dfadb83e80 (glob)
+  * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=700b7e19db54103633c4bf4a6a6b6d55f4d50c03+d5f6e1ea452285324836a49d7d3c2a63cfed1d31&heads=13c0170174366b441dc68e8e33757232fa744458+bac16991d12ff45f9dc43c52da1946dfadb83e80 x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
   $ cat error.log
 
--- a/tests/test-globalopts.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-globalopts.t	Wed Jan 18 11:43:36 2017 -0500
@@ -351,11 +351,9 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
-   multirevs     Specifying Multiple Revisions
    patterns      File Name Patterns
    phases        Working with Phases
-   revisions     Specifying Single Revisions
-   revsets       Specifying Revision Sets
+   revisions     Specifying Revisions
    scripting     Using Mercurial from scripts and automation
    subrepos      Subrepositories
    templating    Template Usage
@@ -434,11 +432,9 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
-   multirevs     Specifying Multiple Revisions
    patterns      File Name Patterns
    phases        Working with Phases
-   revisions     Specifying Single Revisions
-   revsets       Specifying Revision Sets
+   revisions     Specifying Revisions
    scripting     Using Mercurial from scripts and automation
    subrepos      Subrepositories
    templating    Template Usage
--- a/tests/test-graft.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-graft.t	Wed Jan 18 11:43:36 2017 -0500
@@ -456,7 +456,7 @@
   c
   =======
   b
-  >>>>>>> graft: 5d205f8b35b6  - bar: 1
+  >>>>>>> graft: 5d205f8b35b6 - bar: 1
   $ echo b > a
   $ hg resolve -m a
   (no more unresolved files)
@@ -1286,3 +1286,28 @@
   $ hg ci -qAmc
   $ hg up -q .~2
   $ hg graft tip -qt:fail
+
+  $ cd ..
+
+Graft a change into a new file previously grafted into a renamed directory
+
+  $ hg init dirmovenewfile
+  $ cd dirmovenewfile
+  $ mkdir a
+  $ echo a > a/a
+  $ hg ci -qAma
+  $ echo x > a/x
+  $ hg ci -qAmx
+  $ hg up -q 0
+  $ hg mv -q a b
+  $ hg ci -qAmb
+  $ hg graft -q 1 # a/x grafted as b/x, but no copy information recorded
+  $ hg up -q 1
+  $ echo y > a/x
+  $ hg ci -qAmy
+  $ hg up -q 3
+  $ hg graft -q 4
+  $ hg status --change .
+  M b/x
+
+  $ cd ..
--- a/tests/test-hardlinks.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hardlinks.t	Wed Jan 18 11:43:36 2017 -0500
@@ -211,6 +211,10 @@
   2 r4/.hg/00changelog.i
   2 r4/.hg/branch
   2 r4/.hg/cache/branch2-served
+  2 r4/.hg/cache/checkisexec
+  3 r4/.hg/cache/checklink (?)
+  ? r4/.hg/cache/checklink-target (glob)
+  2 r4/.hg/cache/checknoexec
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/dirstate
@@ -247,6 +251,9 @@
   2 r4/.hg/00changelog.i
   1 r4/.hg/branch
   2 r4/.hg/cache/branch2-served
+  2 r4/.hg/cache/checkisexec
+  2 r4/.hg/cache/checklink-target
+  2 r4/.hg/cache/checknoexec
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   1 r4/.hg/dirstate
--- a/tests/test-help.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-help.t	Wed Jan 18 11:43:36 2017 -0500
@@ -113,11 +113,9 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
-   multirevs     Specifying Multiple Revisions
    patterns      File Name Patterns
    phases        Working with Phases
-   revisions     Specifying Single Revisions
-   revsets       Specifying Revision Sets
+   revisions     Specifying Revisions
    scripting     Using Mercurial from scripts and automation
    subrepos      Subrepositories
    templating    Template Usage
@@ -190,11 +188,9 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
-   multirevs     Specifying Multiple Revisions
    patterns      File Name Patterns
    phases        Working with Phases
-   revisions     Specifying Single Revisions
-   revsets       Specifying Revision Sets
+   revisions     Specifying Revisions
    scripting     Using Mercurial from scripts and automation
    subrepos      Subrepositories
    templating    Template Usage
@@ -241,7 +237,6 @@
   
       enabled extensions:
   
-       chgserver     command server extension for cHg (EXPERIMENTAL) (?)
        children      command to display child changesets (DEPRECATED)
        rebase        command to move sets of revisions to a different ancestor
   
@@ -832,11 +827,9 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
-   multirevs     Specifying Multiple Revisions
    patterns      File Name Patterns
    phases        Working with Phases
-   revisions     Specifying Single Revisions
-   revsets       Specifying Revision Sets
+   revisions     Specifying Revisions
    scripting     Using Mercurial from scripts and automation
    subrepos      Subrepositories
    templating    Template Usage
@@ -917,6 +910,8 @@
                  show set of successors for revision
    debugtemplate
                  parse and apply a template
+   debugupgraderepo
+                 upgrade a repository to use different features
    debugwalk     show how files match on given patterns
    debugwireargs
                  (no help text available)
@@ -1158,38 +1153,49 @@
 
 Test a help topic
 
-  $ hg help revs
-  Specifying Single Revisions
-  """""""""""""""""""""""""""
-  
-      Mercurial supports several ways to specify individual revisions.
-  
-      A plain integer is treated as a revision number. Negative integers are
-      treated as sequential offsets from the tip, with -1 denoting the tip, -2
-      denoting the revision prior to the tip, and so forth.
-  
-      A 40-digit hexadecimal string is treated as a unique revision identifier.
-  
-      A hexadecimal string less than 40 characters long is treated as a unique
-      revision identifier and is referred to as a short-form identifier. A
-      short-form identifier is only valid if it is the prefix of exactly one
-      full-length identifier.
-  
-      Any other string is treated as a bookmark, tag, or branch name. A bookmark
-      is a movable pointer to a revision. A tag is a permanent name associated
-      with a revision. A branch name denotes the tipmost open branch head of
-      that branch - or if they are all closed, the tipmost closed head of the
-      branch. Bookmark, tag, and branch names must not contain the ":"
-      character.
-  
-      The reserved name "tip" always identifies the most recent revision.
-  
-      The reserved name "null" indicates the null revision. This is the revision
-      of an empty repository, and the parent of revision 0.
-  
-      The reserved name "." indicates the working directory parent. If no
-      working directory is checked out, it is equivalent to null. If an
-      uncommitted merge is in progress, "." is the revision of the first parent.
+  $ hg help dates
+  Date Formats
+  """"""""""""
+  
+      Some commands allow the user to specify a date, e.g.:
+  
+      - backout, commit, import, tag: Specify the commit date.
+      - log, revert, update: Select revision(s) by date.
+  
+      Many date formats are valid. Here are some examples:
+  
+      - "Wed Dec 6 13:18:29 2006" (local timezone assumed)
+      - "Dec 6 13:18 -0600" (year assumed, time offset provided)
+      - "Dec 6 13:18 UTC" (UTC and GMT are aliases for +0000)
+      - "Dec 6" (midnight)
+      - "13:18" (today assumed)
+      - "3:39" (3:39AM assumed)
+      - "3:39pm" (15:39)
+      - "2006-12-06 13:18:29" (ISO 8601 format)
+      - "2006-12-6 13:18"
+      - "2006-12-6"
+      - "12-6"
+      - "12/6"
+      - "12/6/6" (Dec 6 2006)
+      - "today" (midnight)
+      - "yesterday" (midnight)
+      - "now" - right now
+  
+      Lastly, there is Mercurial's internal format:
+  
+      - "1165411109 0" (Wed Dec 6 13:18:29 2006 UTC)
+  
+      This is the internal representation format for dates. The first number is
+      the number of seconds since the epoch (1970-01-01 00:00 UTC). The second
+      is the offset of the local timezone, in seconds west of UTC (negative if
+      the timezone is east of UTC).
+  
+      The log command also accepts date ranges:
+  
+      - "<DATE" - at or before a given date/time
+      - ">DATE" - on or after a given date/time
+      - "DATE to DATE" - a date range, inclusive
+      - "-DAYS" - within a given number of days of today
 
 Test repeated config section name
 
@@ -1280,7 +1286,7 @@
   >     return doc + '\nhelphook1\n'
   > 
   > def extsetup(ui):
-  >     help.addtopichook('revsets', rewrite)
+  >     help.addtopichook('revisions', rewrite)
   > EOF
   $ cat > helphook2.py <<EOF
   > from mercurial import help
@@ -1289,7 +1295,7 @@
   >     return doc + '\nhelphook2\n'
   > 
   > def extsetup(ui):
-  >     help.addtopichook('revsets', rewrite)
+  >     help.addtopichook('revisions', rewrite)
   > EOF
   $ echo '[extensions]' >> $HGRCPATH
   $ echo "helphook1 = `pwd`/helphook1.py" >> $HGRCPATH
@@ -1908,13 +1914,6 @@
   Merge Tools
   </td></tr>
   <tr><td>
-  <a href="/help/multirevs">
-  multirevs
-  </a>
-  </td><td>
-  Specifying Multiple Revisions
-  </td></tr>
-  <tr><td>
   <a href="/help/patterns">
   patterns
   </a>
@@ -1933,14 +1932,7 @@
   revisions
   </a>
   </td><td>
-  Specifying Single Revisions
-  </td></tr>
-  <tr><td>
-  <a href="/help/revsets">
-  revsets
-  </a>
-  </td><td>
-  Specifying Revision Sets
+  Specifying Revisions
   </td></tr>
   <tr><td>
   <a href="/help/scripting">
@@ -2363,7 +2355,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -2538,7 +2529,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -2734,14 +2724,13 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
   </html>
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/revisions"
+  $ get-with-headers.py 127.0.0.1:$HGPORT "help/dates"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2752,7 +2741,7 @@
   <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
   <script type="text/javascript" src="/static/mercurial.js"></script>
   
-  <title>Help: revisions</title>
+  <title>Help: dates</title>
   </head>
   <body>
   
@@ -2776,7 +2765,7 @@
   
   <div class="main">
   <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
-  <h3>Help: revisions</h3>
+  <h3>Help: dates</h3>
   
   <form class="search" action="/log">
   
@@ -2785,51 +2774,61 @@
   number or hash, or <a href="/help/revsets">revset expression</a>.</div>
   </form>
   <div id="doc">
-  <h1>Specifying Single Revisions</h1>
+  <h1>Date Formats</h1>
   <p>
-  Mercurial supports several ways to specify individual revisions.
+  Some commands allow the user to specify a date, e.g.:
   </p>
+  <ul>
+   <li> backout, commit, import, tag: Specify the commit date.
+   <li> log, revert, update: Select revision(s) by date.
+  </ul>
   <p>
-  A plain integer is treated as a revision number. Negative integers are
-  treated as sequential offsets from the tip, with -1 denoting the tip,
-  -2 denoting the revision prior to the tip, and so forth.
+  Many date formats are valid. Here are some examples:
   </p>
-  <p>
-  A 40-digit hexadecimal string is treated as a unique revision
-  identifier.
-  </p>
+  <ul>
+   <li> &quot;Wed Dec 6 13:18:29 2006&quot; (local timezone assumed)
+   <li> &quot;Dec 6 13:18 -0600&quot; (year assumed, time offset provided)
+   <li> &quot;Dec 6 13:18 UTC&quot; (UTC and GMT are aliases for +0000)
+   <li> &quot;Dec 6&quot; (midnight)
+   <li> &quot;13:18&quot; (today assumed)
+   <li> &quot;3:39&quot; (3:39AM assumed)
+   <li> &quot;3:39pm&quot; (15:39)
+   <li> &quot;2006-12-06 13:18:29&quot; (ISO 8601 format)
+   <li> &quot;2006-12-6 13:18&quot;
+   <li> &quot;2006-12-6&quot;
+   <li> &quot;12-6&quot;
+   <li> &quot;12/6&quot;
+   <li> &quot;12/6/6&quot; (Dec 6 2006)
+   <li> &quot;today&quot; (midnight)
+   <li> &quot;yesterday&quot; (midnight)
+   <li> &quot;now&quot; - right now
+  </ul>
   <p>
-  A hexadecimal string less than 40 characters long is treated as a
-  unique revision identifier and is referred to as a short-form
-  identifier. A short-form identifier is only valid if it is the prefix
-  of exactly one full-length identifier.
+  Lastly, there is Mercurial's internal format:
+  </p>
+  <ul>
+   <li> &quot;1165411109 0&quot; (Wed Dec 6 13:18:29 2006 UTC)
+  </ul>
+  <p>
+  This is the internal representation format for dates. The first number
+  is the number of seconds since the epoch (1970-01-01 00:00 UTC). The
+  second is the offset of the local timezone, in seconds west of UTC
+  (negative if the timezone is east of UTC).
   </p>
   <p>
-  Any other string is treated as a bookmark, tag, or branch name. A
-  bookmark is a movable pointer to a revision. A tag is a permanent name
-  associated with a revision. A branch name denotes the tipmost open branch head
-  of that branch - or if they are all closed, the tipmost closed head of the
-  branch. Bookmark, tag, and branch names must not contain the &quot;:&quot; character.
-  </p>
-  <p>
-  The reserved name &quot;tip&quot; always identifies the most recent revision.
+  The log command also accepts date ranges:
   </p>
-  <p>
-  The reserved name &quot;null&quot; indicates the null revision. This is the
-  revision of an empty repository, and the parent of revision 0.
-  </p>
-  <p>
-  The reserved name &quot;.&quot; indicates the working directory parent. If no
-  working directory is checked out, it is equivalent to null. If an
-  uncommitted merge is in progress, &quot;.&quot; is the revision of the first
-  parent.
-  </p>
+  <ul>
+   <li> &quot;&lt;DATE&quot; - at or before a given date/time
+   <li> &quot;&gt;DATE&quot; - on or after a given date/time
+   <li> &quot;DATE to DATE&quot; - a date range, inclusive
+   <li> &quot;-DAYS&quot; - within a given number of days of today
+  </ul>
   
   </div>
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -2926,7 +2925,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -3165,7 +3163,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
--- a/tests/test-hgrc.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgrc.t	Wed Jan 18 11:43:36 2017 -0500
@@ -46,7 +46,6 @@
   default = $TESTTMP/foo%bar (glob)
   $ hg showconfig
   bundle.mainreporoot=$TESTTMP/foobar (glob)
-  extensions.chgserver= (?)
   paths.default=$TESTTMP/foo%bar (glob)
   $ cd ..
 
@@ -81,7 +80,6 @@
 
   $ hg showconfig --config ui.verbose=True --quiet
   bundle.mainreporoot=$TESTTMP
-  extensions.chgserver= (?)
   ui.verbose=False
   ui.debug=False
   ui.quiet=True
@@ -113,7 +111,6 @@
 
   $ hg showconfig
   bundle.mainreporoot=$TESTTMP
-  extensions.chgserver= (?)
   ui.username=$FAKEUSER
 
   $ unset FAKEUSER
@@ -159,7 +156,6 @@
   $TESTTMP/hgrc:13: alias.log=log -g
   repo: bundle.mainreporoot=$TESTTMP
   $TESTTMP/hgrc:11: defaults.identify=-n
-  --config: extensions.chgserver= (?)
   $TESTTMP/hgrc:2: ui.debug=true
   $TESTTMP/hgrc:3: ui.fallbackencoding=ASCII
   $TESTTMP/hgrc:4: ui.quiet=true
@@ -175,7 +171,6 @@
   $ hg showconfig --config ui.traceback=True --debug
   read config from: $TESTTMP/hgrc
   repo: bundle.mainreporoot=$TESTTMP
-  --config: extensions.chgserver= (?)
   --config: ui.traceback=True
   --verbose: ui.verbose=False
   --debug: ui.debug=True
@@ -199,7 +194,6 @@
   read config from: $TESTTMP/hgrc
   repo: bundle.mainreporoot=$TESTTMP
   $TESTTMP/hgrc:15: extensions.plain=./plain.py
-  --config: extensions.chgserver= (?)
   --config: ui.traceback=True
   --verbose: ui.verbose=False
   --debug: ui.debug=True
@@ -210,7 +204,6 @@
   read config from: $TESTTMP/hgrc
   repo: bundle.mainreporoot=$TESTTMP
   $TESTTMP/hgrc:15: extensions.plain=./plain.py
-  --config: extensions.chgserver= (?)
   --config: ui.traceback=True
   --verbose: ui.verbose=False
   --debug: ui.debug=True
@@ -221,7 +214,6 @@
   read config from: $TESTTMP/hgrc
   repo: bundle.mainreporoot=$TESTTMP
   $TESTTMP/hgrc:15: extensions.plain=./plain.py
-  --config: extensions.chgserver= (?)
   --config: ui.traceback=True
   --verbose: ui.verbose=False
   --debug: ui.debug=True
--- a/tests/test-hgweb-auth.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-auth.py	Wed Jan 18 11:43:36 2017 -0500
@@ -15,7 +15,7 @@
     def interactive(self):
         return False
 
-origui = myui()
+origui = myui.load()
 
 def writeauth(items):
     ui = origui.copy()
--- a/tests/test-hgweb-commands.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-commands.t	Wed Jan 18 11:43:36 2017 -0500
@@ -829,7 +829,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -961,7 +960,6 @@
   
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -1068,7 +1066,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -1352,7 +1349,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -1478,7 +1474,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -1690,7 +1685,6 @@
     <td colspan="3"><a class="list" href="/branches?style=gitweb">...</a></td>
   </tr>
   </table>
-  <script type="text/javascript">process_dates()</script>
   <div class="page_footer">
   <div class="page_footer_text">test</div>
   <div class="rss_logo">
@@ -1835,7 +1829,6 @@
       );
   </script>
   
-  <script type="text/javascript">process_dates()</script>
   <div class="page_footer">
   <div class="page_footer_text">test</div>
   <div class="rss_logo">
@@ -1903,7 +1896,7 @@
   $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities'; echo
   200 Script output follows
   
-  lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024
+  lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=*zlib (glob)
 
 heads
 
@@ -2154,6 +2147,8 @@
   bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps
   unbundle=HG10GZ,HG10BZ,HG10UN
   httpheader=1024
+  httpmediatype=0.1rx,0.1tx,0.2tx
+  compression=*zlib (glob)
 
 heads
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hgweb-csp.t	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,129 @@
+#require serve
+
+  $ cat > web.conf << EOF
+  > [paths]
+  > / = $TESTTMP/*
+  > EOF
+
+  $ hg init repo1
+  $ cd repo1
+  $ touch foo
+  $ hg -q commit -A -m initial
+  $ cd ..
+
+  $ hg serve -p $HGPORT -d --pid-file=hg.pid --web-conf web.conf
+  $ cat hg.pid >> $DAEMON_PIDS
+
+repo index should not send Content-Security-Policy header by default
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT '' content-security-policy etag
+  200 Script output follows
+
+static page should not send CSP by default
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT static/mercurial.js content-security-policy etag
+  200 Script output follows
+
+repo page should not send CSP by default, should send ETag
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT repo1 content-security-policy etag
+  200 Script output follows
+  etag: W/"*" (glob)
+
+  $ killdaemons.py
+
+Configure CSP without nonce
+
+  $ cat >> web.conf << EOF
+  > [web]
+  > csp = script-src https://example.com/ 'unsafe-inline'
+  > EOF
+
+  $ hg serve -p $HGPORT -d --pid-file=hg.pid --web-conf web.conf
+  $ cat hg.pid > $DAEMON_PIDS
+
+repo index should send Content-Security-Policy header when enabled
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT '' content-security-policy etag
+  200 Script output follows
+  content-security-policy: script-src https://example.com/ 'unsafe-inline'
+
+static page should send CSP when enabled
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT static/mercurial.js content-security-policy etag
+  200 Script output follows
+  content-security-policy: script-src https://example.com/ 'unsafe-inline'
+
+repo page should send CSP by default, include etag w/o nonce
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT repo1 content-security-policy etag
+  200 Script output follows
+  content-security-policy: script-src https://example.com/ 'unsafe-inline'
+  etag: W/"*" (glob)
+
+nonce should not be added to html if CSP doesn't use it
+
+  $ get-with-headers.py localhost:$HGPORT repo1/graph/tip | egrep 'content-security-policy|<script'
+  <script type="text/javascript" src="/repo1/static/mercurial.js"></script>
+  <!--[if IE]><script type="text/javascript" src="/repo1/static/excanvas.js"></script><![endif]-->
+  <script type="text/javascript">
+  <script type="text/javascript">
+
+Configure CSP with nonce
+
+  $ killdaemons.py
+  $ cat >> web.conf << EOF
+  > csp = image-src 'self'; script-src https://example.com/ 'nonce-%nonce%'
+  > EOF
+
+  $ hg serve -p $HGPORT -d --pid-file=hg.pid --web-conf web.conf
+  $ cat hg.pid > $DAEMON_PIDS
+
+nonce should be substituted in CSP header
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT '' content-security-policy etag
+  200 Script output follows
+  content-security-policy: image-src 'self'; script-src https://example.com/ 'nonce-*' (glob)
+
+nonce should be included in CSP for static pages
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT static/mercurial.js content-security-policy etag
+  200 Script output follows
+  content-security-policy: image-src 'self'; script-src https://example.com/ 'nonce-*' (glob)
+
+repo page should have nonce, no ETag
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT repo1 content-security-policy etag
+  200 Script output follows
+  content-security-policy: image-src 'self'; script-src https://example.com/ 'nonce-*' (glob)
+
+nonce should be added to html when used
+
+  $ get-with-headers.py localhost:$HGPORT repo1/graph/tip content-security-policy | egrep 'content-security-policy|<script'
+  content-security-policy: image-src 'self'; script-src https://example.com/ 'nonce-*' (glob)
+  <script type="text/javascript" src="/repo1/static/mercurial.js"></script>
+  <!--[if IE]><script type="text/javascript" src="/repo1/static/excanvas.js"></script><![endif]-->
+  <script type="text/javascript" nonce="*"> (glob)
+  <script type="text/javascript" nonce="*"> (glob)
+
+hgweb_mod w/o hgwebdir works as expected
+
+  $ killdaemons.py
+
+  $ hg -R repo1 serve -p $HGPORT -d --pid-file=hg.pid --config "web.csp=image-src 'self'; script-src https://example.com/ 'nonce-%nonce%'"
+  $ cat hg.pid > $DAEMON_PIDS
+
+static page sends CSP
+
+  $ get-with-headers.py --headeronly localhost:$HGPORT static/mercurial.js content-security-policy etag
+  200 Script output follows
+  content-security-policy: image-src 'self'; script-src https://example.com/ 'nonce-*' (glob)
+
+nonce included in <script> and headers
+
+  $ get-with-headers.py localhost:$HGPORT graph/tip content-security-policy  | egrep 'content-security-policy|<script'
+  content-security-policy: image-src 'self'; script-src https://example.com/ 'nonce-*' (glob)
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  <!--[if IE]><script type="text/javascript" src="/static/excanvas.js"></script><![endif]-->
+  <script type="text/javascript" nonce="*"> (glob)
+  <script type="text/javascript" nonce="*"> (glob)
--- a/tests/test-hgweb-descend-empties.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-descend-empties.t	Wed Jan 18 11:43:36 2017 -0500
@@ -139,7 +139,6 @@
   </table>
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -259,7 +258,6 @@
   </table>
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -360,7 +358,6 @@
           
       </table>
   
-      <script type="text/javascript">process_dates()</script>
       <div class="page-footer">
           <p>Mercurial Repository: test</p>
           <ul class="rss-logo">
@@ -470,7 +467,6 @@
   
   </table>
   
-  <script type="text/javascript">process_dates()</script>
   <div class="page_footer">
   <div class="page_footer_text">test</div>
   <div class="rss_logo">
@@ -551,7 +547,6 @@
   </a>
   
   </table>
-  <script type="text/javascript">process_dates()</script>
   
   <div class="logo">
   <a href="https://mercurial-scm.org/">
--- a/tests/test-hgweb-diffs.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-diffs.t	Wed Jan 18 11:43:36 2017 -0500
@@ -162,7 +162,6 @@
   
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -299,7 +298,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -442,7 +440,6 @@
   
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -579,7 +576,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -703,7 +699,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -837,7 +832,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -969,7 +963,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -1096,7 +1089,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
--- a/tests/test-hgweb-empty.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-empty.t	Wed Jan 18 11:43:36 2017 -0500
@@ -109,7 +109,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -220,7 +219,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -374,7 +372,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -454,7 +451,6 @@
   </table>
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
--- a/tests/test-hgweb-filelog.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-filelog.t	Wed Jan 18 11:43:36 2017 -0500
@@ -242,7 +242,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -362,7 +361,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -474,7 +472,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -586,7 +583,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -651,7 +647,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -757,7 +752,6 @@
   
   
   
-  <script type="text/javascript">process_dates()</script>
   
   <div class="logo">
   <a href="https://mercurial-scm.org/">
--- a/tests/test-hgweb-json.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-json.t	Wed Jan 18 11:43:36 2017 -0500
@@ -1593,10 +1593,6 @@
         "topic": "merge-tools"
       },
       {
-        "summary": "Specifying Multiple Revisions",
-        "topic": "multirevs"
-      },
-      {
         "summary": "File Name Patterns",
         "topic": "patterns"
       },
@@ -1605,14 +1601,10 @@
         "topic": "phases"
       },
       {
-        "summary": "Specifying Single Revisions",
+        "summary": "Specifying Revisions",
         "topic": "revisions"
       },
       {
-        "summary": "Specifying Revision Sets",
-        "topic": "revsets"
-      },
-      {
         "summary": "Using Mercurial from scripts and automation",
         "topic": "scripting"
       },
--- a/tests/test-hgweb-removed.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-removed.t	Wed Jan 18 11:43:36 2017 -0500
@@ -131,7 +131,6 @@
   
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -235,7 +234,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
--- a/tests/test-hgweb-symrev.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb-symrev.t	Wed Jan 18 11:43:36 2017 -0500
@@ -186,7 +186,7 @@
   <li><a href="/diff/xyzzy/foo?style=paper">diff</a></li>
   <li><a href="/comparison/xyzzy/foo?style=paper">comparison</a></li>
   <li><a href="/log/xyzzy/foo?style=paper">file log</a></li>
-  <li><a href="/raw-annotate/xyzzy/foo">raw</a></li>
+  <li><a href="/raw-file/xyzzy/foo">raw</a></li>
    annotate foo @ 1:<a href="/rev/a7c1559b7bba?style=paper">a7c1559b7bba</a>
    <td class="author"><a href="/file/43c799df6e75/foo?style=paper">43c799df6e75</a> </td>
    <td class="author"><a href="/file/9d8c40cba617/foo?style=paper">9d8c40cba617</a> </td>
@@ -381,7 +381,7 @@
   <li><a href="/diff/xyzzy/foo?style=coal">diff</a></li>
   <li><a href="/comparison/xyzzy/foo?style=coal">comparison</a></li>
   <li><a href="/log/xyzzy/foo?style=coal">file log</a></li>
-  <li><a href="/raw-annotate/xyzzy/foo">raw</a></li>
+  <li><a href="/raw-file/xyzzy/foo">raw</a></li>
    annotate foo @ 1:<a href="/rev/a7c1559b7bba?style=coal">a7c1559b7bba</a>
    <td class="author"><a href="/file/43c799df6e75/foo?style=coal">43c799df6e75</a> </td>
    <td class="author"><a href="/file/9d8c40cba617/foo?style=coal">9d8c40cba617</a> </td>
@@ -626,7 +626,7 @@
   <a href="/log/xyzzy/foo?style=gitweb">revisions</a> |
   <a href="/diff/xyzzy/foo?style=gitweb">diff</a> |
   <a href="/comparison/xyzzy/foo?style=gitweb">comparison</a> |
-  <a href="/raw-annotate/xyzzy/foo">raw</a> |
+  <a href="/raw-file/xyzzy/foo">raw</a> |
    <td style="font-family:monospace"><a class="list" href="/rev/a7c1559b7bba?style=gitweb">a7c1559b7bba</a></td>
   <a class="list" href="/annotate/43c799df6e75/foo?style=gitweb">
   <a class="list" href="/annotate/9d8c40cba617/foo?style=gitweb">9d8c40cba617</a></td>
@@ -849,7 +849,7 @@
           <li><a href="/log/xyzzy/foo?style=monoblue">revisions</a></li>
           <li><a href="/diff/xyzzy/foo?style=monoblue">diff</a></li>
           <li><a href="/comparison/xyzzy/foo?style=monoblue">comparison</a></li>
-          <li><a href="/raw-annotate/xyzzy/foo">raw</a></li>
+          <li><a href="/raw-file/xyzzy/foo">raw</a></li>
           <dd><a href="/rev/a7c1559b7bba?style=monoblue">a7c1559b7bba</a></dd>
   <a href="/annotate/43c799df6e75/foo?style=monoblue">
   <a href="/annotate/9d8c40cba617/foo?style=monoblue">9d8c40cba617</a>
@@ -1053,7 +1053,7 @@
   <a href="/file/xyzzy/?style=spartan">files</a>
   <a href="/file/xyzzy/foo?style=spartan">file</a>
   <a href="/log/xyzzy/foo?style=spartan">revisions</a>
-  <a href="/raw-annotate/xyzzy/foo">raw</a>
+  <a href="/raw-file/xyzzy/foo">raw</a>
    <td><a href="/rev/a7c1559b7bba?style=spartan">a7c1559b7bba</a></td>
   <a href="/annotate/43c799df6e75/foo?style=spartan">
   <td><a href="/annotate/9d8c40cba617/foo?style=spartan">9d8c40cba617</a></td>
--- a/tests/test-hgweb.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgweb.t	Wed Jan 18 11:43:36 2017 -0500
@@ -99,7 +99,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -207,7 +206,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -318,7 +316,6 @@
   </table>
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
--- a/tests/test-hgwebdir-paths.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgwebdir-paths.py	Wed Jan 18 11:43:36 2017 -0500
@@ -15,7 +15,7 @@
 
 webdir = os.path.realpath('.')
 
-u = uimod.ui()
+u = uimod.ui.load()
 hg.repository(u, 'a', create=1)
 hg.repository(u, 'b', create=1)
 os.chdir('b')
--- a/tests/test-hgwebdir.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-hgwebdir.t	Wed Jan 18 11:43:36 2017 -0500
@@ -898,7 +898,6 @@
   </table>
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -969,7 +968,6 @@
   </table>
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -1407,7 +1405,6 @@
   </table>
   </div>
   </div>
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
--- a/tests/test-highlight.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-highlight.t	Wed Jan 18 11:43:36 2017 -0500
@@ -186,7 +186,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
@@ -236,7 +235,7 @@
   <li><a href="/comparison/tip/primes.py">comparison</a></li>
   <li class="active">annotate</li>
   <li><a href="/log/tip/primes.py">file log</a></li>
-  <li><a href="/raw-annotate/tip/primes.py">raw</a></li>
+  <li><a href="/raw-file/tip/primes.py">raw</a></li>
   </ul>
   <ul>
   <li><a href="/help">help</a></li>
@@ -857,7 +856,6 @@
   </div>
   </div>
   
-  <script type="text/javascript">process_dates()</script>
   
   
   </body>
--- a/tests/test-histedit-obsolete.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-histedit-obsolete.t	Wed Jan 18 11:43:36 2017 -0500
@@ -339,7 +339,7 @@
 
 New-commit as draft (default)
 
-  $ cp -r base simple-draft
+  $ cp -R base simple-draft
   $ cd simple-draft
   $ hg histedit -r 'b449568bf7fc' --commands - << EOF
   > edit b449568bf7fc 11 f
@@ -378,7 +378,7 @@
 
 New-commit as secret (config)
 
-  $ cp -r base simple-secret
+  $ cp -R base simple-secret
   $ cd simple-secret
   $ cat >> .hg/hgrc << EOF
   > [phases]
@@ -425,7 +425,7 @@
 If a secret changeset is put before a draft one, all descendant should be secret.
 It seems more important to present the secret phase.
 
-  $ cp -r base reorder
+  $ cp -R base reorder
   $ cd reorder
   $ hg histedit -r 'b449568bf7fc' --commands - << EOF
   > pick b449568bf7fc 11 f
@@ -462,7 +462,7 @@
 
 Note that there is a few reordering in this series for more extensive test
 
-  $ cp -r base folding
+  $ cp -R base folding
   $ cd folding
   $ cat >> .hg/hgrc << EOF
   > [phases]
--- a/tests/test-http-branchmap.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-http-branchmap.t	Wed Jan 18 11:43:36 2017 -0500
@@ -81,7 +81,7 @@
   > sys.stdout = StdoutWrapper(sys.stdout)
   > sys.stderr = StdoutWrapper(sys.stderr)
   > 
-  > myui = ui.ui()
+  > myui = ui.ui.load()
   > repo = hg.repository(myui, 'a')
   > commands.serve(myui, repo, stdio=True, cmdserver=False)
   > EOF
--- a/tests/test-http-bundle1.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-http-bundle1.t	Wed Jan 18 11:43:36 2017 -0500
@@ -238,66 +238,66 @@
 
   $ sed 's/.*] "/"/' < ../access.log
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=branchmap HTTP/1.1" 200 -
-  "GET /?cmd=stream_out HTTP/1.1" 401 -
-  "GET /?cmd=stream_out HTTP/1.1" 200 -
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
-  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=stream_out HTTP/1.1" 401 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
-  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D
-  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
-  "GET /?cmd=branchmap HTTP/1.1" 200 -
-  "GET /?cmd=branchmap HTTP/1.1" 200 -
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
   $ cd ..
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-http-protocol.t	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,167 @@
+  $ cat >> $HGRCPATH << EOF
+  > [web]
+  > push_ssl = false
+  > allow_push = *
+  > EOF
+
+  $ hg init server
+  $ cd server
+  $ touch a
+  $ hg -q commit -A -m initial
+  $ cd ..
+
+  $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
+  $ cat hg.pid >> $DAEMON_PIDS
+
+compression formats are advertised in compression capability
+
+#if zstd
+  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep compression
+  compression=zstd,zlib
+#else
+  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep compression
+  compression=ZL
+#endif
+
+  $ killdaemons.py
+
+server.compressionengines can replace engines list wholesale
+
+  $ hg --config server.compressionengines=none -R server serve -p $HGPORT -d --pid-file hg.pid
+  $ cat hg.pid > $DAEMON_PIDS
+  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep compression
+  compression=none
+
+  $ killdaemons.py
+
+Order of engines can also change
+
+  $ hg --config server.compressionengines=none,zlib -R server serve -p $HGPORT -d --pid-file hg.pid
+  $ cat hg.pid > $DAEMON_PIDS
+  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep compression
+  compression=none,zlib
+
+  $ killdaemons.py
+
+Start a default server again
+
+  $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
+  $ cat hg.pid > $DAEMON_PIDS
+
+Server should send application/mercurial-0.1 to clients if no Accept is used
+
+  $ get-with-headers.py --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  200 Script output follows
+  content-type: application/mercurial-0.1
+  date: * (glob)
+  server: * (glob)
+  transfer-encoding: chunked
+
+Server should send application/mercurial-0.1 when client says it wants it
+
+  $ get-with-headers.py --hgproto '0.1' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  200 Script output follows
+  content-type: application/mercurial-0.1
+  date: * (glob)
+  server: * (glob)
+  transfer-encoding: chunked
+
+Server should send application/mercurial-0.2 when client says it wants it
+
+  $ get-with-headers.py --hgproto '0.2' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  200 Script output follows
+  content-type: application/mercurial-0.2
+  date: * (glob)
+  server: * (glob)
+  transfer-encoding: chunked
+
+  $ get-with-headers.py --hgproto '0.1 0.2' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  200 Script output follows
+  content-type: application/mercurial-0.2
+  date: * (glob)
+  server: * (glob)
+  transfer-encoding: chunked
+
+Requesting a compression format that server doesn't support results will fall back to 0.1
+
+  $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  200 Script output follows
+  content-type: application/mercurial-0.1
+  date: * (glob)
+  server: * (glob)
+  transfer-encoding: chunked
+
+#if zstd
+zstd is used if available
+
+  $ get-with-headers.py --hgproto '0.2 comp=zstd' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
+  $ f --size --hexdump --bytes 36 --sha1 resp
+  resp: size=248, sha1=4d8d8f87fb82bd542ce52881fdc94f850748
+  0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
+  0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 73 74 64 |t follows...zstd|
+  0020: 28 b5 2f fd                                     |(./.|
+
+#endif
+
+application/mercurial-0.2 is not yet used on non-streaming responses
+
+  $ get-with-headers.py --hgproto '0.2' 127.0.0.1:$HGPORT '?cmd=heads' -
+  200 Script output follows
+  content-length: 41
+  content-type: application/mercurial-0.1
+  date: * (glob)
+  server: * (glob)
+  
+  e93700bd72895c5addab234c56d4024b487a362f
+
+Now test protocol preference usage
+
+  $ killdaemons.py
+  $ hg --config server.compressionengines=none,zlib -R server serve -p $HGPORT -d --pid-file hg.pid
+  $ cat hg.pid > $DAEMON_PIDS
+
+No Accept will send 0.1+zlib, even though "none" is preferred b/c "none" isn't supported on 0.1
+
+  $ get-with-headers.py --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
+  200 Script output follows
+  content-type: application/mercurial-0.1
+
+  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ f --size --hexdump --bytes 28 --sha1 resp
+  resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
+  0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
+  0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78             |t follows..x|
+
+Explicit 0.1 will send zlib because "none" isn't supported on 0.1
+
+  $ get-with-headers.py --hgproto '0.1' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ f --size --hexdump --bytes 28 --sha1 resp
+  resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
+  0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
+  0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78             |t follows..x|
+
+0.2 with no compression will get "none" because that is server's preference
+(spec says ZL and UN are implicitly supported)
+
+  $ get-with-headers.py --hgproto '0.2' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ f --size --hexdump --bytes 32 --sha1 resp
+  resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
+  0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
+  0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
+
+Client receives server preference even if local order doesn't match
+
+  $ get-with-headers.py --hgproto '0.2 comp=zlib,none' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ f --size --hexdump --bytes 32 --sha1 resp
+  resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
+  0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
+  0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
+
+Client receives only supported format even if not server preferred format
+
+  $ get-with-headers.py --hgproto '0.2 comp=zlib' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ f --size --hexdump --bytes 33 --sha1 resp
+  resp: size=232, sha1=a1c727f0c9693ca15742a75c30419bc36
+  0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
+  0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 6c 69 62 |t follows...zlib|
+  0020: 78                                              |x|
--- a/tests/test-http-proxy.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-http-proxy.t	Wed Jan 18 11:43:36 2017 -0500
@@ -101,19 +101,19 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cat proxy.log
   * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=branchmap HTTP/1.1" - - (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=stream_out HTTP/1.1" - - (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=83180e7845de420a1bb46896fd5fe05294f8d629&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=branchmap HTTP/1.1" - - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=stream_out HTTP/1.1" - - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D83180e7845de420a1bb46896fd5fe05294f8d629 x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=83180e7845de420a1bb46896fd5fe05294f8d629&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
-  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
--- a/tests/test-http.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-http.t	Wed Jan 18 11:43:36 2017 -0500
@@ -229,63 +229,63 @@
 
   $ sed 's/.*] "/"/' < ../access.log
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=branchmap HTTP/1.1" 200 -
-  "GET /?cmd=stream_out HTTP/1.1" 401 -
-  "GET /?cmd=stream_out HTTP/1.1" 200 -
-  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d
-  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=phases%2Cbookmarks
+  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=stream_out HTTP/1.1" 401 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D
-  "GET /?cmd=getbundle HTTP/1.1" 401 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=phases%2Cbookmarks
-  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=phases%2Cbookmarks
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=getbundle HTTP/1.1" 401 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=phases%2Cbookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
-  "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces
+  "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872
-  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
-  "GET /?cmd=branchmap HTTP/1.1" 200 -
-  "GET /?cmd=branchmap HTTP/1.1" 200 -
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
   $ cd ..
 
--- a/tests/test-https.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-https.t	Wed Jan 18 11:43:36 2017 -0500
@@ -379,7 +379,7 @@
 
 Fingerprints
 
-- works without cacerts (hostkeyfingerprints)
+- works without cacerts (hostfingerprints)
   $ hg -R copy-pull id https://localhost:$HGPORT/ --insecure --config hostfingerprints.localhost=ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03
   warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
   5fed3813f7f5
--- a/tests/test-import.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-import.t	Wed Jan 18 11:43:36 2017 -0500
@@ -431,10 +431,10 @@
   parent: 0
 
 Test that "hg rollback" doesn't restore dirstate to one at the
-beginning of the rollbacked transaction in not-"parent-gone" case.
+beginning of the rolled back transaction in not-"parent-gone" case.
 
 invoking pretxncommit hook will cause marking '.hg/dirstate' as a file
-to be restored at rollbacking, after DirstateTransactionPlan (see wiki
+to be restored when rolling back, after DirstateTransactionPlan (see wiki
 page for detail).
 
   $ hg --cwd b branch -q foobar
@@ -451,7 +451,7 @@
   $ hg --cwd b update -q -C 0
   $ hg --cwd b --config extensions.strip= strip -q 1
 
-Test visibility of in-memory distate changes inside transaction to
+Test visibility of in-memory dirstate changes inside transaction to
 external process
 
   $ echo foo > a/foo
@@ -1517,6 +1517,13 @@
   |
   o  initial [Babar] 2: +8/-0
   
+Adding those config options should not change the output of diffstat. Bugfix #4755.
+
+  $ hg log -r . --template '{diffstat}\n'
+  1: +1/-0
+  $ hg log -r . --template '{diffstat}\n' --config diff.git=1 \
+  >   --config diff.noprefix=1
+  1: +1/-0
 
 Importing with some success and some errors:
 
--- a/tests/test-install.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-install.t	Wed Jan 18 11:43:36 2017 -0500
@@ -11,6 +11,9 @@
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
   checking installed modules (*mercurial)... (glob)
+  checking registered compression engines (*zlib*) (glob)
+  checking available compression engines (*zlib*) (glob)
+  checking available compression engines for wire protocol (*zlib*) (glob)
   checking templates (*mercurial?templates)... (glob)
   checking default template (*mercurial?templates?map-cmdline.default) (glob)
   checking commit editor... (* -c "import sys; sys.exit(0)") (glob)
@@ -21,6 +24,9 @@
   $ hg debuginstall -Tjson | sed 's|\\\\|\\|g'
   [
    {
+    "compengines": ["bz2", "bz2truncated", "none", "zlib"*], (glob)
+    "compenginesavail": ["bz2", "bz2truncated", "none", "zlib"*], (glob)
+    "compenginesserver": [*"zlib"*], (glob)
     "defaulttemplate": "*mercurial?templates?map-cmdline.default", (glob)
     "defaulttemplateerror": null,
     "defaulttemplatenotfound": "default",
@@ -58,6 +64,9 @@
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
   checking installed modules (*mercurial)... (glob)
+  checking registered compression engines (*zlib*) (glob)
+  checking available compression engines (*zlib*) (glob)
+  checking available compression engines for wire protocol (*zlib*) (glob)
   checking templates (*mercurial?templates)... (glob)
   checking default template (*mercurial?templates?map-cmdline.default) (glob)
   checking commit editor... (* -c "import sys; sys.exit(0)") (glob)
@@ -85,6 +94,9 @@
   checking Mercurial custom build (*) (glob)
   checking module policy (*) (glob)
   checking installed modules (*mercurial)... (glob)
+  checking registered compression engines (*zlib*) (glob)
+  checking available compression engines (*zlib*) (glob)
+  checking available compression engines for wire protocol (*zlib*) (glob)
   checking templates (*mercurial?templates)... (glob)
   checking default template (*mercurial?templates?map-cmdline.default) (glob)
   checking commit editor... (* -c "import sys; sys.exit(0)") (glob)
--- a/tests/test-keyword.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-keyword.t	Wed Jan 18 11:43:36 2017 -0500
@@ -1117,7 +1117,7 @@
   bar
   =======
   foo
-  >>>>>>> merge rev:    85d2d2d732a5  - test: simplemerge
+  >>>>>>> merge rev:    85d2d2d732a5 - test: simplemerge
 
 resolve to local, m must contain hash of last change (local parent)
 
--- a/tests/test-largefiles-wireproto.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-largefiles-wireproto.t	Wed Jan 18 11:43:36 2017 -0500
@@ -347,7 +347,7 @@
   searching 2 changesets for largefiles
   verified existence of 2 revisions of 2 largefiles
   $ tail -1 access.log
-  127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a (glob)
+  127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   $ hg -R batchverifyclone update
   getting changed largefiles
   2 largefiles updated, 0 removed
@@ -384,11 +384,11 @@
   searching 3 changesets for largefiles
   verified existence of 3 revisions of 3 largefiles
   $ tail -1 access.log
-  127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c (glob)
+  127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
   $ killdaemons.py
 
-largefiles should not ask for password again after succesfull authorization
+largefiles should not ask for password again after successful authorization
 
   $ hg init credentialmain
   $ cd credentialmain
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-linerange.py	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,232 @@
+from __future__ import absolute_import
+
+import unittest
+from mercurial import error, mdiff
+
+# for readability, line numbers are 0-origin
+text1 = '''
+           00 at OLD
+           01 at OLD
+           02 at OLD
+02 at NEW, 03 at OLD
+03 at NEW, 04 at OLD
+04 at NEW, 05 at OLD
+05 at NEW, 06 at OLD
+           07 at OLD
+           08 at OLD
+           09 at OLD
+           10 at OLD
+           11 at OLD
+'''[1:] # strip initial LF
+
+text2 = '''
+00 at NEW
+01 at NEW
+02 at NEW, 03 at OLD
+03 at NEW, 04 at OLD
+04 at NEW, 05 at OLD
+05 at NEW, 06 at OLD
+06 at NEW
+07 at NEW
+08 at NEW
+09 at NEW
+10 at NEW
+11 at NEW
+'''[1:] # strip initial LF
+
+def filteredblocks(blocks, rangeb):
+    """return `rangea` extracted from `blocks` coming from
+    `mdiff.blocksinrange` along with the mask of blocks within rangeb.
+    """
+    filtered, rangea = mdiff.blocksinrange(blocks, rangeb)
+    skipped = [b not in filtered for b in blocks]
+    return rangea, skipped
+
+class blocksinrangetests(unittest.TestCase):
+
+    def setUp(self):
+        self.blocks = list(mdiff.allblocks(text1, text2))
+        assert self.blocks == [
+            ([0, 3, 0, 2], '!'),
+            ((3, 7, 2, 6), '='),
+            ([7, 12, 6, 12], '!'),
+            ((12, 12, 12, 12), '='),
+        ], self.blocks
+
+    def testWithinEqual(self):
+        """linerange within an "=" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #        ^^
+        linerange2 = (3, 5)
+        linerange1, skipped = filteredblocks(self.blocks, linerange2)
+        self.assertEqual(linerange1, (4, 6))
+        self.assertEqual(skipped, [True, False, True, True])
+
+    def testWithinEqualStrictly(self):
+        """linerange matching exactly an "=" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #       ^^^^
+        linerange2 = (2, 6)
+        linerange1, skipped = filteredblocks(self.blocks, linerange2)
+        self.assertEqual(linerange1, (3, 7))
+        self.assertEqual(skipped, [True, False, True, True])
+
+    def testWithinEqualLowerbound(self):
+        """linerange at beginning of an "=" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #       ^^
+        linerange2 = (2, 4)
+        linerange1, skipped = filteredblocks(self.blocks, linerange2)
+        self.assertEqual(linerange1, (3, 5))
+        self.assertEqual(skipped, [True, False, True, True])
+
+    def testWithinEqualLowerboundOneline(self):
+        """oneline-linerange at beginning of an "=" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #       ^
+        linerange2 = (2, 3)
+        linerange1, skipped = filteredblocks(self.blocks, linerange2)
+        self.assertEqual(linerange1, (3, 4))
+        self.assertEqual(skipped, [True, False, True, True])
+
+    def testWithinEqualUpperbound(self):
+        """linerange at end of an "=" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #        ^^^
+        linerange2 = (3, 6)
+        linerange1, skipped = filteredblocks(self.blocks, linerange2)
+        self.assertEqual(linerange1, (4, 7))
+        self.assertEqual(skipped, [True, False, True, True])
+
+    def testWithinEqualUpperboundOneLine(self):
+        """oneline-linerange at end of an "=" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #          ^
+        linerange2 = (5, 6)
+        linerange1, skipped = filteredblocks(self.blocks, linerange2)
+        self.assertEqual(linerange1, (6, 7))
+        self.assertEqual(skipped, [True, False, True, True])
+
+    def testWithinFirstBlockNeq(self):
+        """linerange within the first "!" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #     ^
+        #      |           (empty)
+        #      ^
+        #     ^^
+        for linerange2 in [
+            (0, 1),
+            (1, 1),
+            (1, 2),
+            (0, 2),
+        ]:
+            linerange1, skipped = filteredblocks(self.blocks, linerange2)
+            self.assertEqual(linerange1, (0, 3))
+            self.assertEqual(skipped, [False, True, True, True])
+
+    def testWithinLastBlockNeq(self):
+        """linerange within the last "!" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #           ^
+        #            ^
+        #           |      (empty)
+        #           ^^^^^^
+        #                ^
+        for linerange2 in [
+            (6, 7),
+            (7, 8),
+            (7, 7),
+            (6, 12),
+            (11, 12),
+        ]:
+            linerange1, skipped = filteredblocks(self.blocks, linerange2)
+            self.assertEqual(linerange1, (7, 12))
+            self.assertEqual(skipped, [True, True, False, True])
+
+    def testAccrossTwoBlocks(self):
+        """linerange accross two blocks"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #      ^^^^
+        linerange2 = (1, 5)
+        linerange1, skipped = filteredblocks(self.blocks, linerange2)
+        self.assertEqual(linerange1, (0, 6))
+        self.assertEqual(skipped, [False, False, True, True])
+
+    def testCrossingSeveralBlocks(self):
+        """linerange accross three blocks"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #      ^^^^^^^
+        linerange2 = (1, 8)
+        linerange1, skipped = filteredblocks(self.blocks, linerange2)
+        self.assertEqual(linerange1, (0, 12))
+        self.assertEqual(skipped, [False, False, False, True])
+
+    def testStartInEqBlock(self):
+        """linerange starting in an "=" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #          ^^^^
+        #         ^^^^^^^
+        for linerange2, expectedlinerange1 in [
+            ((5, 9), (6, 12)),
+            ((4, 11), (5, 12)),
+        ]:
+            linerange1, skipped = filteredblocks(self.blocks, linerange2)
+            self.assertEqual(linerange1, expectedlinerange1)
+            self.assertEqual(skipped, [True, False, False, True])
+
+    def testEndInEqBlock(self):
+        """linerange ending in an "=" block"""
+        # IDX 0         1
+        #     012345678901
+        # SRC NNOOOONNNNNN (New/Old)
+        #      ^^
+        #     ^^^^^
+        for linerange2, expectedlinerange1 in [
+            ((1, 3), (0, 4)),
+            ((0, 4), (0, 5)),
+        ]:
+            linerange1, skipped = filteredblocks(self.blocks, linerange2)
+            self.assertEqual(linerange1, expectedlinerange1)
+            self.assertEqual(skipped, [False, False, True, True])
+
+    def testOutOfRange(self):
+        """linerange exceeding file size"""
+        exctype = error.Abort
+        for linerange2 in [
+            (0, 34),
+            (15, 12),
+        ]:
+            # Could be `with self.assertRaises(error.Abort)` but python2.6
+            # does not have assertRaises context manager.
+            try:
+                mdiff.blocksinrange(self.blocks, linerange2)
+            except exctype as exc:
+                self.assertTrue('line range exceeds file size' in str(exc))
+            else:
+                self.fail('%s not raised' % exctype.__name__)
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-merge-changedelete.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-merge-changedelete.t	Wed Jan 18 11:43:36 2017 -0500
@@ -107,7 +107,7 @@
   changed2
   =======
   changed1
-  >>>>>>> merge rev:    10f9a0a634e8  - test: removed file1, changed file2, cha...
+  >>>>>>> merge rev:    10f9a0a634e8 - test: removed file1, changed file2, chan...
 
 
 Interactive merge:
@@ -171,7 +171,7 @@
   changed2
   =======
   changed1
-  >>>>>>> merge rev:    10f9a0a634e8  - test: removed file1, changed file2, cha...
+  >>>>>>> merge rev:    10f9a0a634e8 - test: removed file1, changed file2, chan...
 
 
 Interactive merge with bad input:
@@ -247,7 +247,7 @@
   changed2
   =======
   changed1
-  >>>>>>> merge rev:    10f9a0a634e8  - test: removed file1, changed file2, cha...
+  >>>>>>> merge rev:    10f9a0a634e8 - test: removed file1, changed file2, chan...
 
 
 Interactive merge with not enough input:
@@ -310,7 +310,7 @@
   changed2
   =======
   changed1
-  >>>>>>> merge rev:    10f9a0a634e8  - test: removed file1, changed file2, cha...
+  >>>>>>> merge rev:    10f9a0a634e8 - test: removed file1, changed file2, chan...
 
 Choose local versions of files
 
@@ -635,7 +635,7 @@
   ||||||| base
   =======
   changed1
-  >>>>>>> merge rev:    10f9a0a634e8  - test: removed file1, changed file2, cha...
+  >>>>>>> merge rev:    10f9a0a634e8 - test: removed file1, changed file2, chan...
 
 Exercise transitions between local, other, fail and prompt, and make sure the
 dirstate stays consistent. (Compare with each other and to the above
--- a/tests/test-merge-force.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-merge-force.t	Wed Jan 18 11:43:36 2017 -0500
@@ -379,7 +379,7 @@
   content1
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M content1_content2_content1_content4-untracked
   content2
@@ -409,7 +409,7 @@
   content1
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M content1_content2_content2_content4-untracked
   content2
@@ -439,7 +439,7 @@
   content1
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M content1_content2_content3_content3-untracked
   content2
@@ -451,7 +451,7 @@
   content1
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M content1_content2_content3_content4-untracked
   content2
@@ -481,7 +481,7 @@
   content1
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M content1_content2_missing_content4-untracked
   content2
@@ -564,7 +564,7 @@
   ||||||| base
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M missing_content2_content2_content4-untracked
   content2
@@ -587,7 +587,7 @@
   ||||||| base
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M missing_content2_content3_content3-untracked
   content2
@@ -598,7 +598,7 @@
   ||||||| base
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M missing_content2_content3_content4-untracked
   content2
@@ -621,7 +621,7 @@
   ||||||| base
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M missing_content2_missing_content4-untracked
   <<<<<<< working copy: 0447570f1af6 - test: local
@@ -629,7 +629,7 @@
   ||||||| base
   =======
   content2
-  >>>>>>> merge rev:    85100b8c675b  - test: remote
+  >>>>>>> merge rev:    85100b8c675b - test: remote
   
   M missing_content2_missing_missing-tracked
   content2
--- a/tests/test-merge-revert2.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-merge-revert2.t	Wed Jan 18 11:43:36 2017 -0500
@@ -56,7 +56,7 @@
   @@ -1,3 +1,7 @@
    added file1
    another line of text
-  +<<<<<<< working copy: c3fa057dd86f  - test: added file1 and file2
+  +<<<<<<< working copy: c3fa057dd86f - test: added file1 and file2
   +changed file1 different
   +=======
    changed file1
--- a/tests/test-merge-tools.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-merge-tools.t	Wed Jan 18 11:43:36 2017 -0500
@@ -75,11 +75,11 @@
   [1]
   $ aftermerge
   # cat f
-  <<<<<<< working copy: ef83787e2614  - test: revision 1
+  <<<<<<< working copy: ef83787e2614 - test: revision 1
   revision 1
   =======
   revision 2
-  >>>>>>> merge rev:    0185f4e0cf02  - test: revision 2
+  >>>>>>> merge rev:    0185f4e0cf02 - test: revision 2
   space
   # hg stat
   M f
@@ -935,7 +935,7 @@
   # hg update -C 1
   $ hg merge -r 4 --config merge-tools.true.premerge=keep
   merging f
-  <<<<<<< working copy: ef83787e2614  - test: revision 1
+  <<<<<<< working copy: ef83787e2614 - test: revision 1
   revision 1
   space
   =======
@@ -948,7 +948,7 @@
   (branch merge, don't forget to commit)
   $ aftermerge
   # cat f
-  <<<<<<< working copy: ef83787e2614  - test: revision 1
+  <<<<<<< working copy: ef83787e2614 - test: revision 1
   revision 1
   space
   =======
@@ -969,7 +969,7 @@
   # hg update -C 1
   $ hg merge -r 4 --config merge-tools.true.premerge=keep-merge3
   merging f
-  <<<<<<< working copy: ef83787e2614  - test: revision 1
+  <<<<<<< working copy: ef83787e2614 - test: revision 1
   revision 1
   space
   ||||||| base
@@ -985,7 +985,7 @@
   (branch merge, don't forget to commit)
   $ aftermerge
   # cat f
-  <<<<<<< working copy: ef83787e2614  - test: revision 1
+  <<<<<<< working copy: ef83787e2614 - test: revision 1
   revision 1
   space
   ||||||| base
@@ -1209,3 +1209,15 @@
   [1]
 
 #endif
+
+Verify naming of temporary files and that extension is preserved:
+
+  $ hg update -q -C 1
+  $ hg mv f f.txt
+  $ hg ci -qm "f.txt"
+  $ hg update -q -C 2
+  $ hg merge -y -r tip --tool echo --config merge-tools.echo.args='$base $local $other $output'
+  merging f and f.txt to f.txt
+  */f~base.?????? $TESTTMP/f.txt.orig */f~other.??????.txt $TESTTMP/f.txt (glob)
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
--- a/tests/test-merge-types.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-merge-types.t	Wed Jan 18 11:43:36 2017 -0500
@@ -367,14 +367,14 @@
   2
   =======
   1
-  >>>>>>> merge rev:    2e60aa20b912  - test: 1
+  >>>>>>> merge rev:    2e60aa20b912 - test: 1
   $ tellmeabout b
   b is a plain file with content:
   <<<<<<< working copy: 0c617753b41b - test: 2
   2
   =======
   1
-  >>>>>>> merge rev:    2e60aa20b912  - test: 1
+  >>>>>>> merge rev:    2e60aa20b912 - test: 1
   $ tellmeabout c
   c is a plain file with content:
   x
@@ -418,14 +418,14 @@
   [1]
   $ tellmeabout a
   a is a plain file with content:
-  <<<<<<< working copy: 2e60aa20b912  - test: 1
+  <<<<<<< working copy: 2e60aa20b912 - test: 1
   1
   =======
   2
   >>>>>>> merge rev:    0c617753b41b - test: 2
   $ tellmeabout b
   b is an executable file with content:
-  <<<<<<< working copy: 2e60aa20b912  - test: 1
+  <<<<<<< working copy: 2e60aa20b912 - test: 1
   1
   =======
   2
--- a/tests/test-merge7.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-merge7.t	Wed Jan 18 11:43:36 2017 -0500
@@ -99,7 +99,7 @@
 
   $ cat test.txt
   one
-  <<<<<<< working copy: 50c3a7e29886  - test: Merge 1
+  <<<<<<< working copy: 50c3a7e29886 - test: Merge 1
   two-point-five
   =======
   two-point-one
--- a/tests/test-mq-qrefresh-replace-log-message.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-mq-qrefresh-replace-log-message.t	Wed Jan 18 11:43:36 2017 -0500
@@ -199,7 +199,7 @@
   
   test saving last-message.txt
 
-Test visibility of in-memory distate changes outside transaction to
+Test visibility of in-memory dirstate changes outside transaction to
 external process
 
   $ cat > $TESTTMP/checkvisibility.sh <<EOF
--- a/tests/test-mq.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-mq.t	Wed Jan 18 11:43:36 2017 -0500
@@ -888,9 +888,10 @@
   $ hg log -r 'mq()' --template '{rev}\n'
   1
   2
-  $ hg help revsets | grep -i mq
+  $ hg help revisions.mq
       "mq()"
         Changesets managed by MQ.
+  
 
 bad node in status
 
--- a/tests/test-obsolete-checkheads.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-obsolete-checkheads.t	Wed Jan 18 11:43:36 2017 -0500
@@ -23,7 +23,7 @@
   $ mkcommit base
   $ hg phase --public .
   $ cd ..
-  $ cp -r remote base
+  $ cp -R remote base
   $ hg clone remote local
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -54,7 +54,7 @@
   |/
   o  b4952fcf48cf (public) add base
   
-  $ cp -r ../remote ../backup1
+  $ cp -R ../remote ../backup1
 
 old exists remotely as draft. It is obsoleted by new that we now push.
 Push should not warn about creating new head
@@ -73,7 +73,7 @@
 setup
 
   $ rm -fr ../remote
-  $ cp -r ../backup1 ../remote
+  $ cp -R ../backup1 ../remote
   $ hg -R ../remote phase --public c70b08862e08
   $ hg pull -v
   pulling from $TESTTMP/remote (glob)
@@ -104,7 +104,7 @@
 # setup
 #
 #   $ rm -fr ../remote
-#   $ cp -r ../backup1 ../remote
+#   $ cp -R ../backup1 ../remote
 #   $ hg -R ../remote phase --public c70b08862e08
 #   $ hg phase --draft --force c70b08862e08
 #   $ hg log -G --hidden
@@ -131,7 +131,7 @@
 setup
 
   $ rm -fr ../remote
-  $ cp -r ../backup1 ../remote
+  $ cp -R ../backup1 ../remote
   $ hg phase --draft --force '(0::) - 0'
   $ hg up -q '.^'
   $ mkcommit other
@@ -206,7 +206,7 @@
   |/
   @  b4952fcf48cf (public) add base
   
-  $ cp -r ../remote ../backup2
+  $ cp -R ../remote ../backup2
 
 Push should not warn about adding new heads. We create one, but we'll delete
 one anyway.
@@ -226,7 +226,7 @@
 setup
 
   $ rm -fr ../remote
-  $ cp -r ../backup1 ../remote
+  $ cp -R ../backup1 ../remote
   $ cd ..
   $ rm -rf local
   $ hg clone remote local -r 0
--- a/tests/test-obsolete.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-obsolete.t	Wed Jan 18 11:43:36 2017 -0500
@@ -3,7 +3,7 @@
   > # public changeset are not obsolete
   > publish=false
   > [ui]
-  > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
+  > logtemplate="{rev}:{node|short} ({phase}{if(troubles, ' {troubles}')}) [{tags} {bookmarks}] {desc|firstline}\n"
   > EOF
   $ mkcommit() {
   >    echo "$1" > "$1"
@@ -203,7 +203,7 @@
 
   $ hg --hidden phase --public 2
   $ hg log -G
-  @  5:5601fb93a350 (draft) [tip ] add new_3_c
+  @  5:5601fb93a350 (draft bumped) [tip ] add new_3_c
   |
   | o  2:245bde4270cd (public) [ ] add original_c
   |/
@@ -220,7 +220,7 @@
 the public changeset
 
   $ hg log --hidden -r 'bumped()'
-  5:5601fb93a350 (draft) [tip ] add new_3_c
+  5:5601fb93a350 (draft bumped) [tip ] add new_3_c
 
 And that we can't push bumped changeset
 
@@ -242,7 +242,7 @@
 We need to create a clone of 5 and add a special marker with a flag
 
   $ hg summary
-  parent: 5:5601fb93a350 tip
+  parent: 5:5601fb93a350 tip (bumped)
    add new_3_c
   branch: default
   commit: (clean)
@@ -477,7 +477,7 @@
   $ hg log -r 'obsolete()'
   4:94b33453f93b (draft) [ ] add original_d
   $ hg summary
-  parent: 5:cda648ca50f5 tip
+  parent: 5:cda648ca50f5 tip (unstable)
    add original_e
   branch: default
   commit: (clean)
@@ -485,7 +485,7 @@
   phases: 3 draft
   unstable: 1 changesets
   $ hg log -G -r '::unstable()'
-  @  5:cda648ca50f5 (draft) [tip ] add original_e
+  @  5:cda648ca50f5 (draft unstable) [tip ] add original_e
   |
   x  4:94b33453f93b (draft) [ ] add original_d
   |
@@ -527,7 +527,7 @@
   2:245bde4270cd (public) [ ] add original_c
   3:6f9641995072 (draft) [ ] add n3w_3_c
   4:94b33453f93b (draft) [ ] add original_d
-  5:cda648ca50f5 (draft) [tip ] add original_e
+  5:cda648ca50f5 (draft unstable) [tip ] add original_e
   $ hg push ../tmpf -f # -f because be push unstable too
   pushing to ../tmpf
   searching for changes
@@ -548,7 +548,7 @@
 Do not warn about new head when the new head is a successors of a remote one
 
   $ hg log -G
-  @  5:cda648ca50f5 (draft) [tip ] add original_e
+  @  5:cda648ca50f5 (draft unstable) [tip ] add original_e
   |
   x  4:94b33453f93b (draft) [ ] add original_d
   |
@@ -719,8 +719,6 @@
   $ hg debugobsolete -r6 -T '{flag} {get(metadata, "user")}\n'
   0 test
 
-#if serve
-
 Test the debug output for exchange
 ----------------------------------
 
@@ -746,6 +744,8 @@
   $ hg up tip
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
+#if serve
+
   $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
   $ cat hg.pid >> $DAEMON_PIDS
 
@@ -796,8 +796,50 @@
   $ echo '[experimental]' >> $HGRCPATH
   $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
 
+  $ rm hg.pid access.log errors.log
 #endif
 
+Several troubles on the same changeset (create an unstable and bumped changeset)
+
+  $ hg debugobsolete `getid obsolete_e`
+  $ hg debugobsolete `getid original_c` `getid babar`
+  $ hg log --config ui.logtemplate= -r 'bumped() and unstable()'
+  changeset:   7:50c51b361e60
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  trouble:     unstable, bumped
+  summary:     add babar
+  
+
+test the "troubles" templatekw
+
+  $ hg log -r 'bumped() and unstable()'
+  7:50c51b361e60 (draft unstable bumped) [ ] add babar
+
+test the default cmdline template
+
+  $ hg log -T default -r 'bumped()'
+  changeset:   7:50c51b361e60
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  trouble:     unstable, bumped
+  summary:     add babar
+  
+
+test summary output
+
+  $ hg up -r 'bumped() and unstable()'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg summary
+  parent: 7:50c51b361e60  (unstable, bumped)
+   add babar
+  branch: default
+  commit: (clean)
+  update: 2 new changesets (update)
+  phases: 4 draft
+  unstable: 2 changesets
+  bumped: 1 changesets
+
 Test incoming/outcoming with changesets obsoleted remotely, known locally
 ===============================================================================
 
@@ -1211,7 +1253,7 @@
   2 1ab51af8f9b41ef8c7f6f3312d4706d870b1fb74 29346082e4a9e27042b62d2da0e2de211c027621 0 \(.*\) {'user': 'test'} (re)
   3 4715cf767440ed891755448016c2b8cf70760c30 7ae79c5d60f049c7b0dd02f5f25b9d60aaf7b36d 0 \(.*\) {'user': 'test'} (re)
   $ hg debugobsolete --delete 1 --delete 3
-  deleted 2 obsolescense markers
+  deleted 2 obsolescence markers
   $ hg debugobsolete
   cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b f9bd49731b0b175e42992a3c8fa6c678b2bc11f1 0 \(.*\) {'user': 'test'} (re)
   1ab51af8f9b41ef8c7f6f3312d4706d870b1fb74 29346082e4a9e27042b62d2da0e2de211c027621 0 \(.*\) {'user': 'test'} (re)
--- a/tests/test-profile.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-profile.t	Wed Jan 18 11:43:36 2017 -0500
@@ -8,25 +8,27 @@
 
 test --profile
 
-  $ hg --profile st 2>../out
+  $ prof='hg --config profiling.type=ls --profile'
+
+  $ $prof st 2>../out
   $ grep CallCount ../out > /dev/null || cat ../out
 
-  $ hg --profile --config profiling.output=../out st
+  $ $prof --config profiling.output=../out st
   $ grep CallCount ../out > /dev/null || cat ../out
 
-  $ hg --profile --config profiling.output=blackbox --config extensions.blackbox= st
+  $ $prof --config profiling.output=blackbox --config extensions.blackbox= st
   $ grep CallCount .hg/blackbox.log > /dev/null || cat .hg/blackbox.log
 
-  $ hg --profile --config profiling.format=text st 2>../out
+  $ $prof --config profiling.format=text st 2>../out
   $ grep CallCount ../out > /dev/null || cat ../out
 
   $ echo "[profiling]" >> $HGRCPATH
   $ echo "format=kcachegrind" >> $HGRCPATH
 
-  $ hg --profile st 2>../out
+  $ $prof st 2>../out
   $ grep 'events: Ticks' ../out > /dev/null || cat ../out
 
-  $ hg --profile --config profiling.output=../out st
+  $ $prof --config profiling.output=../out st
   $ grep 'events: Ticks' ../out > /dev/null || cat ../out
 
 #endif
@@ -35,7 +37,7 @@
 
 Profiling of HTTP requests works
 
-  $ hg --profile --config profiling.format=text --config profiling.output=../profile.log serve -d -p $HGPORT --pid-file ../hg.pid -A ../access.log
+  $ $prof --config profiling.format=text --config profiling.output=../profile.log serve -d -p $HGPORT --pid-file ../hg.pid -A ../access.log
   $ cat ../hg.pid >> $DAEMON_PIDS
   $ hg -q clone -U http://localhost:$HGPORT ../clone
 
@@ -45,4 +47,49 @@
 
 #endif
 
+Install an extension that can sleep and guarantee a profiler has time to run
+
+  $ cat >> sleepext.py << EOF
+  > import time
+  > from mercurial import cmdutil, commands
+  > cmdtable = {}
+  > command = cmdutil.command(cmdtable)
+  > @command('sleep', [], 'hg sleep')
+  > def sleep(ui, *args, **kwargs):
+  >     time.sleep(0.1)
+  > EOF
+
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > sleep = `pwd`/sleepext.py
+  > EOF
+
+statistical profiler works
+
+  $ hg --profile sleep 2>../out
+  $ grep Sample ../out
+  Sample count: \d+ (re)
+
+Various statprof formatters work
+
+  $ hg --profile --config profiling.statformat=byline sleep 2>../out
+  $ head -n 1 ../out
+    %   cumulative      self          
+  $ grep Sample ../out
+  Sample count: \d+ (re)
+
+  $ hg --profile --config profiling.statformat=bymethod sleep 2>../out
+  $ head -n 1 ../out
+    %   cumulative      self          
+  $ grep Sample ../out
+  Sample count: \d+ (re)
+
+  $ hg --profile --config profiling.statformat=hotpath sleep 2>../out
+  $ grep Sample ../out
+  Sample count: \d+ (re)
+
+  $ hg --profile --config profiling.statformat=json sleep 2>../out
+  $ cat ../out
+  \[\[\d+.* (re)
+
   $ cd ..
--- a/tests/test-propertycache.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-propertycache.py	Wed Jan 18 11:43:36 2017 -0500
@@ -46,7 +46,7 @@
 # these tests on the real object to detect regression.
 repopath = os.path.join(os.environ['TESTTMP'], 'repo')
 assert subprocess.call(['hg', 'init', repopath]) == 0
-ui = uimod.ui()
+ui = uimod.ui.load()
 repo = hg.repository(ui, path=repopath).unfiltered()
 
 
--- a/tests/test-pull-update.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-pull-update.t	Wed Jan 18 11:43:36 2017 -0500
@@ -130,7 +130,7 @@
 
 Test that updating deactivates current active bookmark, if the
 destination of the update is explicitly specified, and it doesn't
-match with the name of any exsiting bookmarks.
+match with the name of any existing bookmarks.
 
   $ cd ../t
   $ hg bookmark -d active-after-pull
--- a/tests/test-push-hook-lock.t	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-  $ hg init 1
-
-  $ echo '[ui]' >> 1/.hg/hgrc
-  $ echo 'timeout = 10' >> 1/.hg/hgrc
-
-  $ echo foo > 1/foo
-  $ hg --cwd 1 ci -A -m foo
-  adding foo
-
-  $ hg clone 1 2
-  updating to branch default
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-  $ hg clone 2 3
-  updating to branch default
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-  $ cat <<EOF > $TESTTMP/debuglocks-pretxn-hook.sh
-  > hg debuglocks
-  > true
-  > EOF
-  $ echo '[hooks]' >> 2/.hg/hgrc
-  $ echo "pretxnchangegroup.a = sh $TESTTMP/debuglocks-pretxn-hook.sh" >> 2/.hg/hgrc
-  $ echo 'changegroup.push = hg push -qf ../1' >> 2/.hg/hgrc
-
-  $ echo bar >> 3/foo
-  $ hg --cwd 3 ci -m bar
-
-  $ hg --cwd 3 push ../2 --config devel.legacy.exchange=bundle1
-  pushing to ../2
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  lock:  user *, process * (*s) (glob)
-  wlock: free
-
-  $ hg --cwd 1 --config extensions.strip= strip tip -q
-  $ hg --cwd 2 --config extensions.strip= strip tip -q
-  $ hg --cwd 3 push ../2 # bundle2+
-  pushing to ../2
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  lock:  user *, process * (*s) (glob)
-  wlock: user *, process * (*s) (glob)
-
--- a/tests/test-push-r.t	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,149 +0,0 @@
-  $ hg init test
-  $ cd test
-  $ hg unbundle "$TESTDIR/bundles/remote.hg"
-  adding changesets
-  adding manifests
-  adding file changes
-  added 9 changesets with 7 changes to 4 files (+1 heads)
-  (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg up tip
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cd ..
-
-  $ for i in 0 1 2 3 4 5 6 7 8; do
-  >    echo
-  >    mkdir test-"$i"
-  >    hg --cwd test-"$i" init
-  >    hg -R test push -r "$i" test-"$i"
-  >    cd test-"$i"
-  >    hg verify
-  >    cd ..
-  > done
-  
-  pushing to test-0
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  1 files, 1 changesets, 1 total revisions
-  
-  pushing to test-1
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 2 changesets with 2 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  1 files, 2 changesets, 2 total revisions
-  
-  pushing to test-2
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 3 changesets with 3 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  1 files, 3 changesets, 3 total revisions
-  
-  pushing to test-3
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 4 changesets with 4 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  1 files, 4 changesets, 4 total revisions
-  
-  pushing to test-4
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 2 changesets with 2 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  1 files, 2 changesets, 2 total revisions
-  
-  pushing to test-5
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 3 changesets with 3 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  1 files, 3 changesets, 3 total revisions
-  
-  pushing to test-6
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 4 changesets with 5 changes to 2 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  2 files, 4 changesets, 5 total revisions
-  
-  pushing to test-7
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 5 changesets with 6 changes to 3 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  3 files, 5 changesets, 6 total revisions
-  
-  pushing to test-8
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 5 changesets with 5 changes to 2 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  2 files, 5 changesets, 5 total revisions
-
-  $ cd test-8
-
-  $ hg pull ../test-7
-  pulling from ../test-7
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 4 changesets with 2 changes to 3 files (+1 heads)
-  (run 'hg heads' to see heads, 'hg merge' to merge)
-
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  4 files, 9 changesets, 7 total revisions
-
-  $ cd ..
--- a/tests/test-push-validation.t	Wed Jan 04 10:51:37 2017 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,92 +0,0 @@
-  $ hg init test
-  $ cd test
-
-  $ cat > .hg/hgrc <<EOF
-  > [server]
-  > validate=1
-  > EOF
-
-  $ echo alpha > alpha
-  $ echo beta > beta
-  $ hg addr
-  adding alpha
-  adding beta
-  $ hg ci -m 1
-
-  $ cd ..
-  $ hg clone test test-clone
-  updating to branch default
-  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-Test spurious filelog entries:
-
-  $ cd test-clone
-  $ echo blah >> beta
-  $ cp .hg/store/data/beta.i tmp1
-  $ hg ci -m 2
-  $ cp .hg/store/data/beta.i tmp2
-  $ hg -q rollback
-  $ mv tmp2 .hg/store/data/beta.i
-  $ echo blah >> beta
-  $ hg ci -m '2 (corrupt)'
-
-Expected to fail:
-
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-   beta@1: dddc47b3ba30 not in manifests
-  2 files, 2 changesets, 4 total revisions
-  1 integrity errors encountered!
-  (first damaged changeset appears to be 1)
-  [1]
-
-  $ hg push
-  pushing to $TESTTMP/test (glob)
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  transaction abort!
-  rollback completed
-  abort: received spurious file revlog entry
-  [255]
-
-  $ hg -q rollback
-  $ mv tmp1 .hg/store/data/beta.i
-  $ echo beta > beta
-
-Test missing filelog entries:
-
-  $ cp .hg/store/data/beta.i tmp
-  $ echo blah >> beta
-  $ hg ci -m '2 (corrupt)'
-  $ mv tmp .hg/store/data/beta.i
-
-Expected to fail:
-
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-   beta@1: manifest refers to unknown revision dddc47b3ba30
-  2 files, 2 changesets, 2 total revisions
-  1 integrity errors encountered!
-  (first damaged changeset appears to be 1)
-  [1]
-
-  $ hg push
-  pushing to $TESTTMP/test (glob)
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  transaction abort!
-  rollback completed
-  abort: missing file data for beta:dddc47b3ba30e54484720ce0f4f768a0f4b6efb9 - run hg verify
-  [255]
-
-  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push.t	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,299 @@
+==================================
+Basic testing for the push command
+==================================
+
+Testing of the '--rev' flag
+===========================
+
+  $ hg init test-revflag
+  $ hg -R test-revflag unbundle "$TESTDIR/bundles/remote.hg"
+  adding changesets
+  adding manifests
+  adding file changes
+  added 9 changesets with 7 changes to 4 files (+1 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+
+  $ for i in 0 1 2 3 4 5 6 7 8; do
+  >    echo
+  >    hg init test-revflag-"$i"
+  >    hg -R test-revflag push -r "$i" test-revflag-"$i"
+  >    hg -R test-revflag-"$i" verify
+  > done
+  
+  pushing to test-revflag-0
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  1 files, 1 changesets, 1 total revisions
+  
+  pushing to test-revflag-1
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 1 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  1 files, 2 changesets, 2 total revisions
+  
+  pushing to test-revflag-2
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 1 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  1 files, 3 changesets, 3 total revisions
+  
+  pushing to test-revflag-3
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 1 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  1 files, 4 changesets, 4 total revisions
+  
+  pushing to test-revflag-4
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 1 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  1 files, 2 changesets, 2 total revisions
+  
+  pushing to test-revflag-5
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 1 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  1 files, 3 changesets, 3 total revisions
+  
+  pushing to test-revflag-6
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 5 changes to 2 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  2 files, 4 changesets, 5 total revisions
+  
+  pushing to test-revflag-7
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5 changesets with 6 changes to 3 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  3 files, 5 changesets, 6 total revisions
+  
+  pushing to test-revflag-8
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5 changesets with 5 changes to 2 files
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  2 files, 5 changesets, 5 total revisions
+
+  $ cd test-revflag-8
+
+  $ hg pull ../test-revflag-7
+  pulling from ../test-revflag-7
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 2 changes to 3 files (+1 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  4 files, 9 changesets, 7 total revisions
+
+  $ cd ..
+
+Test server side validation during push
+=======================================
+
+  $ hg init test-validation
+  $ cd test-validation
+
+  $ cat > .hg/hgrc <<EOF
+  > [server]
+  > validate=1
+  > EOF
+
+  $ echo alpha > alpha
+  $ echo beta > beta
+  $ hg addr
+  adding alpha
+  adding beta
+  $ hg ci -m 1
+
+  $ cd ..
+  $ hg clone test-validation test-validation-clone
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Test spurious filelog entries:
+
+  $ cd test-validation-clone
+  $ echo blah >> beta
+  $ cp .hg/store/data/beta.i tmp1
+  $ hg ci -m 2
+  $ cp .hg/store/data/beta.i tmp2
+  $ hg -q rollback
+  $ mv tmp2 .hg/store/data/beta.i
+  $ echo blah >> beta
+  $ hg ci -m '2 (corrupt)'
+
+Expected to fail:
+
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+   beta@1: dddc47b3ba30 not in manifests
+  2 files, 2 changesets, 4 total revisions
+  1 integrity errors encountered!
+  (first damaged changeset appears to be 1)
+  [1]
+
+  $ hg push
+  pushing to $TESTTMP/test-validation (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  transaction abort!
+  rollback completed
+  abort: received spurious file revlog entry
+  [255]
+
+  $ hg -q rollback
+  $ mv tmp1 .hg/store/data/beta.i
+  $ echo beta > beta
+
+Test missing filelog entries:
+
+  $ cp .hg/store/data/beta.i tmp
+  $ echo blah >> beta
+  $ hg ci -m '2 (corrupt)'
+  $ mv tmp .hg/store/data/beta.i
+
+Expected to fail:
+
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+   beta@1: manifest refers to unknown revision dddc47b3ba30
+  2 files, 2 changesets, 2 total revisions
+  1 integrity errors encountered!
+  (first damaged changeset appears to be 1)
+  [1]
+
+  $ hg push
+  pushing to $TESTTMP/test-validation (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  transaction abort!
+  rollback completed
+  abort: missing file data for beta:dddc47b3ba30e54484720ce0f4f768a0f4b6efb9 - run hg verify
+  [255]
+
+  $ cd ..
+
+Test push hook locking
+=====================
+
+  $ hg init 1
+
+  $ echo '[ui]' >> 1/.hg/hgrc
+  $ echo 'timeout = 10' >> 1/.hg/hgrc
+
+  $ echo foo > 1/foo
+  $ hg --cwd 1 ci -A -m foo
+  adding foo
+
+  $ hg clone 1 2
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ hg clone 2 3
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ cat <<EOF > $TESTTMP/debuglocks-pretxn-hook.sh
+  > hg debuglocks
+  > true
+  > EOF
+  $ echo '[hooks]' >> 2/.hg/hgrc
+  $ echo "pretxnchangegroup.a = sh $TESTTMP/debuglocks-pretxn-hook.sh" >> 2/.hg/hgrc
+  $ echo 'changegroup.push = hg push -qf ../1' >> 2/.hg/hgrc
+
+  $ echo bar >> 3/foo
+  $ hg --cwd 3 ci -m bar
+
+  $ hg --cwd 3 push ../2 --config devel.legacy.exchange=bundle1
+  pushing to ../2
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  lock:  user *, process * (*s) (glob)
+  wlock: free
+
+  $ hg --cwd 1 --config extensions.strip= strip tip -q
+  $ hg --cwd 2 --config extensions.strip= strip tip -q
+  $ hg --cwd 3 push ../2 # bundle2+
+  pushing to ../2
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  lock:  user *, process * (*s) (glob)
+  wlock: user *, process * (*s) (glob)
+
--- a/tests/test-qrecord.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-qrecord.t	Wed Jan 18 11:43:36 2017 -0500
@@ -9,6 +9,9 @@
   record extension - commands to interactively select changes for
   commit/qrefresh (DEPRECATED)
   
+  The feature provided by this extension has been moved into core Mercurial as
+  'hg commit --interactive'.
+  
   (use 'hg help extensions' for information on enabling extensions)
 
 help qrecord (no record)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-base.t	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,393 @@
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > rebase=
+  > drawdag=$TESTDIR/drawdag.py
+  > 
+  > [phases]
+  > publish=False
+  > 
+  > [alias]
+  > tglog = log -G --template "{rev}: {desc}"
+  > EOF
+
+  $ rebasewithdag() {
+  >   N=`$PYTHON -c "print($N+1)"`
+  >   hg init repo$N && cd repo$N
+  >   hg debugdrawdag
+  >   hg rebase "$@" > _rebasetmp
+  >   r=$?
+  >   grep -v 'saved backup bundle' _rebasetmp
+  >   [ $r -eq 0 ] && hg tglog
+  >   cd ..
+  >   return $r
+  > }
+
+Single branching point, without merge:
+
+  $ rebasewithdag -b D -d Z <<'EOS'
+  >     D E
+  >     |/
+  > Z B C   # C: branching point, E should be picked
+  >  \|/    # B should not be picked
+  >   A
+  >   |
+  >   R
+  > EOS
+  rebasing 3:d6003a550c2c "C" (C)
+  rebasing 5:4526cf523425 "D" (D)
+  rebasing 6:b296604d9846 "E" (E tip)
+  o  6: E
+  |
+  | o  5: D
+  |/
+  o  4: C
+  |
+  o  3: Z
+  |
+  | o  2: B
+  |/
+  o  1: A
+  |
+  o  0: R
+  
+Multiple branching points caused by selecting a single merge changeset:
+
+  $ rebasewithdag -b E -d Z <<'EOS'
+  >     E
+  >    /|
+  >   B C D  # B, C: multiple branching points
+  >   | |/   # D should not be picked
+  > Z | /
+  >  \|/
+  >   A
+  >   |
+  >   R
+  > EOS
+  rebasing 2:c1e6b162678d "B" (B)
+  rebasing 3:d6003a550c2c "C" (C)
+  rebasing 6:5251e0cb7302 "E" (E tip)
+  o    6: E
+  |\
+  | o  5: C
+  | |
+  o |  4: B
+  |/
+  o  3: Z
+  |
+  | o  2: D
+  |/
+  o  1: A
+  |
+  o  0: R
+  
+Rebase should not extend the "--base" revset using "descendants":
+
+  $ rebasewithdag -b B -d Z <<'EOS'
+  >     E
+  >    /|
+  > Z B C  # descendants(B) = B+E. With E, C will be included incorrectly
+  >  \|/
+  >   A
+  >   |
+  >   R
+  > EOS
+  rebasing 2:c1e6b162678d "B" (B)
+  rebasing 5:5251e0cb7302 "E" (E tip)
+  o    5: E
+  |\
+  | o  4: B
+  | |
+  | o  3: Z
+  | |
+  o |  2: C
+  |/
+  o  1: A
+  |
+  o  0: R
+  
+Rebase should not simplify the "--base" revset using "roots":
+
+  $ rebasewithdag -b B+E -d Z <<'EOS'
+  >     E
+  >    /|
+  > Z B C  # roots(B+E) = B. Without E, C will be missed incorrectly
+  >  \|/
+  >   A
+  >   |
+  >   R
+  > EOS
+  rebasing 2:c1e6b162678d "B" (B)
+  rebasing 3:d6003a550c2c "C" (C)
+  rebasing 5:5251e0cb7302 "E" (E tip)
+  o    5: E
+  |\
+  | o  4: C
+  | |
+  o |  3: B
+  |/
+  o  2: Z
+  |
+  o  1: A
+  |
+  o  0: R
+  
+The destination is one of the two branching points of a merge:
+
+  $ rebasewithdag -b F -d Z <<'EOS'
+  >     F
+  >    / \
+  >   E   D
+  >  /   /
+  > Z   C
+  >  \ /
+  >   B
+  >   |
+  >   A
+  > EOS
+  nothing to rebase
+  [1]
+
+Multiple branching points caused by multiple bases (issue5420):
+
+  $ rebasewithdag -b E1+E2+C2+B1 -d Z <<'EOS'
+  >   Z    E2
+  >   |   /
+  >   F E1 C2
+  >   |/  /
+  >   E C1 B2
+  >   |/  /
+  >   C B1
+  >   |/
+  >   B
+  >   |
+  >   A
+  >   |
+  >   R
+  > EOS
+  rebasing 3:a113dbaa660a "B1" (B1)
+  rebasing 5:06ce7b1cc8c2 "B2" (B2)
+  rebasing 6:0ac98cce32d3 "C1" (C1)
+  rebasing 8:781512f5e33d "C2" (C2)
+  rebasing 9:428d8c18f641 "E1" (E1)
+  rebasing 11:e1bf82f6b6df "E2" (E2)
+  o  12: E2
+  |
+  o  11: E1
+  |
+  | o  10: C2
+  | |
+  | o  9: C1
+  |/
+  | o  8: B2
+  | |
+  | o  7: B1
+  |/
+  o  6: Z
+  |
+  o  5: F
+  |
+  o  4: E
+  |
+  o  3: C
+  |
+  o  2: B
+  |
+  o  1: A
+  |
+  o  0: R
+  
+Multiple branching points with multiple merges:
+
+  $ rebasewithdag -b G+P -d Z <<'EOS'
+  > G   H   P
+  > |\ /|   |\
+  > F E D   M N
+  >  \|/|  /| |\
+  > Z C B I J K L
+  >  \|/  |/  |/
+  >   A   A   A
+  > EOS
+  rebasing 2:dc0947a82db8 "C" (C)
+  rebasing 8:215e7b0814e1 "D" (D)
+  rebasing 9:03ca77807e91 "E" (E)
+  rebasing 10:afc707c82df0 "F" (F)
+  rebasing 13:018caa673317 "G" (G)
+  rebasing 14:4f710fbd68cb "H" (H)
+  rebasing 3:08ebfeb61bac "I" (I)
+  rebasing 4:a0a5005cec67 "J" (J)
+  rebasing 5:83780307a7e8 "K" (K)
+  rebasing 6:e131637a1cb6 "L" (L)
+  rebasing 11:d6fe3d11d95d "M" (M)
+  rebasing 12:fa1e02269063 "N" (N)
+  rebasing 15:448b1a498430 "P" (P tip)
+  o    15: P
+  |\
+  | o    14: N
+  | |\
+  o \ \    13: M
+  |\ \ \
+  | | | o  12: L
+  | | | |
+  | | o |  11: K
+  | | |/
+  | o /  10: J
+  | |/
+  o /  9: I
+  |/
+  | o    8: H
+  | |\
+  | | | o  7: G
+  | | |/|
+  | | | o  6: F
+  | | | |
+  | | o |  5: E
+  | | |/
+  | o |  4: D
+  | |\|
+  +---o  3: C
+  | |
+  o |  2: Z
+  | |
+  | o  1: B
+  |/
+  o  0: A
+  
+Slightly more complex merge case (mentioned in https://www.mercurial-scm.org/pipermail/mercurial-devel/2016-November/091074.html):
+
+  $ rebasewithdag -b A3+B3 -d Z <<'EOF'
+  > Z     C1    A3     B3
+  > |    /     / \    / \
+  > M3 C0     A1  A2 B1  B2
+  > | /       |   |  |   |
+  > M2        M1  C1 C1  M3
+  > |
+  > M1
+  > |
+  > M0
+  > EOF
+  rebasing 4:8817fae53c94 "C0" (C0)
+  rebasing 6:06ca5dfe3b5b "B2" (B2)
+  rebasing 7:73508237b032 "C1" (C1)
+  rebasing 9:fdb955e2faed "A2" (A2)
+  rebasing 11:1b2f368c3cb5 "A3" (A3)
+  rebasing 10:0a33b0519128 "B1" (B1)
+  rebasing 12:bd6a37b5b67a "B3" (B3 tip)
+  o    12: B3
+  |\
+  | o  11: B1
+  | |
+  | | o    10: A3
+  | | |\
+  | +---o  9: A2
+  | | |
+  | o |  8: C1
+  | | |
+  o | |  7: B2
+  | | |
+  | o |  6: C0
+  |/ /
+  o |  5: Z
+  | |
+  o |  4: M3
+  | |
+  o |  3: M2
+  | |
+  | o  2: A1
+  |/
+  o  1: M1
+  |
+  o  0: M0
+  
+Mixed rebasable and non-rebasable bases (unresolved, issue5422):
+
+  $ rebasewithdag -b C+D -d B <<'EOS'
+  >   D
+  >  /
+  > B C
+  > |/
+  > A
+  > EOS
+  nothing to rebase
+  [1]
+
+Disconnected graph:
+
+  $ rebasewithdag -b B -d Z <<'EOS'
+  >   B
+  >   |
+  > Z A
+  > EOS
+  nothing to rebase from 112478962961 to 48b9aae0607f
+  [1]
+
+Multiple roots. Roots are ancestors of dest:
+
+  $ rebasewithdag -b B+D -d Z <<'EOF'
+  > D Z B
+  >  \|\|
+  >   C A
+  > EOF
+  rebasing 2:112478962961 "B" (B)
+  rebasing 3:b70f76719894 "D" (D)
+  o  4: D
+  |
+  | o  3: B
+  |/
+  o    2: Z
+  |\
+  | o  1: C
+  |
+  o  0: A
+  
+Multiple roots. One root is not an ancestor of dest:
+
+  $ rebasewithdag -b B+D -d Z <<'EOF'
+  > Z B D
+  >  \|\|
+  >   A C
+  > EOF
+  nothing to rebase from 86d01f49c0d9+b70f76719894 to 262e37e34f63
+  [1]
+
+Multiple roots. One root is not an ancestor of dest. Select using a merge:
+
+  $ rebasewithdag -b E -d Z <<'EOF'
+  >   E
+  >   |\
+  > Z B D
+  >  \|\|
+  >   A C
+  > EOF
+  rebasing 2:86d01f49c0d9 "B" (B)
+  rebasing 5:539a0ff83ea9 "E" (E tip)
+  o    5: E
+  |\
+  | o    4: B
+  | |\
+  | | o  3: Z
+  | | |
+  o | |  2: D
+  |/ /
+  o /  1: C
+   /
+  o  0: A
+  
+Multiple roots. Two children share two parents while dest has only one parent:
+
+  $ rebasewithdag -b B+D -d Z <<'EOF'
+  > Z B D
+  >  \|\|\
+  >   A C A
+  > EOF
+  rebasing 2:86d01f49c0d9 "B" (B)
+  rebasing 3:b7df2ca01aa8 "D" (D)
+  o    4: D
+  |\
+  +---o  3: B
+  | |/
+  | o  2: Z
+  | |
+  o |  1: C
+   /
+  o  0: A
+  
--- a/tests/test-rebase-collapse.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-rebase-collapse.t	Wed Jan 18 11:43:36 2017 -0500
@@ -836,7 +836,7 @@
   rebasing 2:b8d8db2b242d "a-dev" (tip)
   saved backup bundle to $TESTTMP/collapse_remember_message/.hg/strip-backup/b8d8db2b242d-f474c19a-backup.hg (glob)
   $ hg log
-  changeset:   2:12bb766dceb1
+  changeset:   2:45ba1d1a8665
   tag:         tip
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
--- a/tests/test-rebase-conflicts.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-rebase-conflicts.t	Wed Jan 18 11:43:36 2017 -0500
@@ -73,8 +73,6 @@
 Try to continue without solving the conflict:
 
   $ hg rebase --continue
-  already rebased 3:3163e20567cc "L1" as 3e046f2ecedb
-  rebasing 4:46f0b057b5c0 "L2"
   abort: unresolved merge conflicts (see 'hg help resolve')
   [255]
 
@@ -335,7 +333,7 @@
   @@ -1,2 +1,6 @@
    a
    b
-  +<<<<<<< dest:   328e4ab1f7cc  ab - test: ab
+  +<<<<<<< dest:   328e4ab1f7cc ab - test: ab
   +=======
   +c
   +>>>>>>> source: 7bc217434fc1 - test: abc
@@ -354,7 +352,7 @@
   +++ b/a	* (glob)
   @@ -1,2 +1,8 @@
    a
-  +<<<<<<< dest:   328e4ab1f7cc  ab - test: ab
+  +<<<<<<< dest:   328e4ab1f7cc ab - test: ab
    b
   +||||||| base
   +=======
--- a/tests/test-rebase-obsolete.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-rebase-obsolete.t	Wed Jan 18 11:43:36 2017 -0500
@@ -761,7 +761,7 @@
   o  0:4a2df7238c3b A
   
   $ hg summary
-  parent: 15:73568ab6879d tip
+  parent: 15:73568ab6879d tip (unstable)
    bar foo
   branch: default
   commit: (clean)
--- a/tests/test-rebase-pull.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-rebase-pull.t	Wed Jan 18 11:43:36 2017 -0500
@@ -1,6 +1,7 @@
   $ cat >> $HGRCPATH <<EOF
   > [extensions]
   > rebase=
+  > histedit=
   > 
   > [alias]
   > tglog = log -G --template "{rev}: '{desc}' {branches}\n"
@@ -72,6 +73,63 @@
   searching for changes
   no changes found
 
+Abort pull early if working dir is not clean:
+
+  $ echo L1-mod > L1
+  $ hg pull --rebase
+  abort: uncommitted changes
+  (cannot pull with rebase: please commit or shelve your changes first)
+  [255]
+  $ hg update --clean --quiet
+
+Abort pull early if another operation (histedit) is in progress:
+
+  $ hg histedit . -q --commands - << EOF
+  > edit d80cc2da061e histedit: generate unfinished state
+  > EOF
+  Editing (d80cc2da061e), you may commit or record as needed now.
+  (hg histedit --continue to resume)
+  [1]
+  $ hg pull --rebase
+  abort: histedit in progress
+  (use 'hg histedit --continue' or 'hg histedit --abort')
+  [255]
+  $ hg histedit --abort --quiet
+
+Abort pull early with pending uncommitted merge:
+
+  $ cd ..
+  $ hg clone --noupdate c d
+  $ cd d
+  $ hg tglog
+  o  1: 'C2'
+  |
+  o  0: 'C1'
+  
+  $ hg update --quiet 0
+  $ echo M1 > M1
+  $ hg commit --quiet -Am M1
+  $ hg update --quiet 1
+  $ hg merge 2
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg pull --rebase
+  abort: outstanding uncommitted merge
+  (cannot pull with rebase: please commit or shelve your changes first)
+  [255]
+  $ hg update --clean --quiet
+
+Abort pull early with unclean subrepo:
+  $ echo s = s > .hgsub
+  $ hg add .hgsub
+  $ hg init s
+  $ hg commit -m "generated a subrepo"
+  $ echo a > s/a
+  $ hg -R s add s/a
+  $ hg pull --rebase
+  abort: uncommitted changes in subrepository 's'
+  (cannot pull with rebase: please commit or shelve your changes first)
+  [255]
 
 Invoke pull --rebase and nothing to rebase:
 
--- a/tests/test-rebase-scenario-global.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-rebase-scenario-global.t	Wed Jan 18 11:43:36 2017 -0500
@@ -838,7 +838,7 @@
 
   $ cd ..
 
-Make the repo a bit more interresting
+Make the repo a bit more interesting
 
   $ hg up 1
   0 files updated, 0 files merged, 2 files removed, 0 files unresolved
--- a/tests/test-rename-dir-merge.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-rename-dir-merge.t	Wed Jan 18 11:43:36 2017 -0500
@@ -148,7 +148,7 @@
   target
   =======
   baz
-  >>>>>>> merge rev:    ce36d17b18fb  - test: 2 add a/c
+  >>>>>>> merge rev:    ce36d17b18fb - test: 2 add a/c
   $ rm b/c.orig
 
 Remote directory rename with conflicting file added in remote target directory
@@ -177,7 +177,7 @@
   ? a/d
   ? b/c.orig
   $ cat b/c
-  <<<<<<< working copy: ce36d17b18fb  - test: 2 add a/c
+  <<<<<<< working copy: ce36d17b18fb - test: 2 add a/c
   baz
   =======
   target
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-repo-compengines.t	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,78 @@
+A new repository uses zlib storage, which doesn't need a requirement
+
+  $ hg init default
+  $ cd default
+  $ cat .hg/requires
+  dotencode
+  fncache
+  generaldelta
+  revlogv1
+  store
+
+  $ touch foo
+  $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text to trigger compression'
+  $ hg debugrevlog -c | grep 0x78
+      0x78 (x)  :   1 (100.00%)
+      0x78 (x)  : 110 (100.00%)
+
+  $ cd ..
+
+Unknown compression engine to format.compression aborts
+
+  $ hg --config experimental.format.compression=unknown init unknown
+  abort: compression engine unknown defined by experimental.format.compression not available
+  (run "hg debuginstall" to list available compression engines)
+  [255]
+
+A requirement specifying an unknown compression engine results in bail
+
+  $ hg init unknownrequirement
+  $ cd unknownrequirement
+  $ echo exp-compression-unknown >> .hg/requires
+  $ hg log
+  abort: repository requires features unknown to this Mercurial: exp-compression-unknown!
+  (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
+  [255]
+
+  $ cd ..
+
+#if zstd
+
+  $ hg --config experimental.format.compression=zstd init zstd
+  $ cd zstd
+  $ cat .hg/requires
+  dotencode
+  exp-compression-zstd
+  fncache
+  generaldelta
+  revlogv1
+  store
+
+  $ touch foo
+  $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text'
+
+  $ hg debugrevlog -c | grep 0x28
+      0x28      :  1 (100.00%)
+      0x28      : 98 (100.00%)
+
+  $ cd ..
+
+Specifying a new format.compression on an existing repo won't introduce data
+with that engine or a requirement
+
+  $ cd default
+  $ touch bar
+  $ hg --config experimental.format.compression=zstd -q commit -A -m 'add bar with a lot of repeated repeated repeated text'
+
+  $ cat .hg/requires
+  dotencode
+  fncache
+  generaldelta
+  revlogv1
+  store
+
+  $ hg debugrevlog -c | grep 0x78
+      0x78 (x)  :   2 (100.00%)
+      0x78 (x)  : 199 (100.00%)
+
+#endif
--- a/tests/test-revert-interactive.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-revert-interactive.t	Wed Jan 18 11:43:36 2017 -0500
@@ -46,6 +46,7 @@
   > y
   > y
   > y
+  > y
   > n
   > n
   > EOF
@@ -53,6 +54,7 @@
   reverting folder1/g (glob)
   removing folder1/i (glob)
   reverting folder2/h (glob)
+  remove added file folder1/i (Yn)? y
   diff --git a/f b/f
   2 hunks, 2 lines changed
   examine changes to 'f'? [Ynesfdaq?] y
@@ -137,7 +139,7 @@
   $ ls folder1/
   g
 
-Test that a noop revert doesn't do an unecessary backup
+Test that a noop revert doesn't do an unnecessary backup
   $ (echo y; echo n) | hg revert -i -r 2 folder1/g
   diff --git a/folder1/g b/folder1/g
   1 hunks, 1 lines changed
@@ -174,6 +176,7 @@
   $ hg update -C 6
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg revert -i -r 2 --all -- << EOF
+  > n
   > y
   > y
   > y
@@ -186,6 +189,7 @@
   reverting folder1/g (glob)
   removing folder1/i (glob)
   reverting folder2/h (glob)
+  remove added file folder1/i (Yn)? n
   diff --git a/f b/f
   2 hunks, 2 lines changed
   examine changes to 'f'? [Ynesfdaq?] y
@@ -258,7 +262,6 @@
   $ hg st
   M f
   M folder1/g
-  R folder1/i
   $ hg revert --interactive f << EOF
   > y
   > y
@@ -290,7 +293,6 @@
   $ hg st
   M f
   M folder1/g
-  R folder1/i
   ? f.orig
   $ cat f
   a
@@ -307,7 +309,7 @@
   5
   $ rm f.orig
   $ hg update -C .
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
 Check editing files newly added by a revert
 
@@ -424,14 +426,14 @@
   > n
   > EOF
   forgetting newfile
-  forget added file newfile (yn)? n
+  forget added file newfile (Yn)? n
   $ hg status
   A newfile
   $ hg revert -i <<EOF
   > y
   > EOF
   forgetting newfile
-  forget added file newfile (yn)? y
+  forget added file newfile (Yn)? y
   $ hg status
   ? newfile
 
--- a/tests/test-revert.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-revert.t	Wed Jan 18 11:43:36 2017 -0500
@@ -784,7 +784,7 @@
 
 (setup from reference repo)
 
-  $ cp -r revert-ref revert-parent-all
+  $ cp -R revert-ref revert-parent-all
   $ cd revert-parent-all
 
 check revert output
@@ -841,7 +841,7 @@
 
 (setup from reference repo)
 
-  $ cp -r revert-ref revert-base-all
+  $ cp -R revert-ref revert-base-all
   $ cd revert-base-all
 
 check revert output
@@ -896,7 +896,7 @@
 
 (setup from reference repo)
 
-  $ cp -r revert-ref revert-parent-explicit
+  $ cp -R revert-ref revert-parent-explicit
   $ cd revert-parent-explicit
 
 revert all files individually and check the output
@@ -989,7 +989,7 @@
 
 (setup from reference repo)
 
-  $ cp -r revert-ref revert-base-explicit
+  $ cp -R revert-ref revert-base-explicit
   $ cd revert-base-explicit
 
 revert all files individually and check the output
--- a/tests/test-revlog-ancestry.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-revlog-ancestry.py	Wed Jan 18 11:43:36 2017 -0500
@@ -6,7 +6,7 @@
     ui as uimod,
 )
 
-u = uimod.ui()
+u = uimod.ui.load()
 
 repo = hg.repository(u, 'test1', create=1)
 os.chdir('test1')
--- a/tests/test-revset.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-revset.t	Wed Jan 18 11:43:36 2017 -0500
@@ -161,8 +161,8 @@
   (rangeall
     None)
   * optimized:
-  (rangepre
-    ('string', 'tip')
+  (rangeall
+    None
     define)
   * set:
   <spanset+ 0:9>
@@ -619,8 +619,8 @@
   (rangeall
     None)
   * analyzed:
-  (rangepre
-    ('string', 'tip')
+  (rangeall
+    None
     define)
   * set:
   <spanset+ 0:9>
@@ -865,6 +865,17 @@
   7
   8
   9
+  $ log 'author(r"re:\S")'
+  0
+  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
   $ log 'branch(é)'
   8
   9
@@ -881,6 +892,13 @@
   $ log 'children(ancestor(4,5))'
   2
   3
+
+  $ log 'children(4)'
+  6
+  8
+  $ log 'children(null)'
+  0
+
   $ log 'closed()'
   $ log 'contains(a)'
   0
@@ -894,6 +912,9 @@
   5
   $ log 'desc(B)'
   5
+  $ hg log -r 'desc(r"re:S?u")' --template "{rev} {desc|firstline}\n"
+  5 5 bug
+  6 6 issue619
   $ log 'descendants(2 or 3)'
   2
   3
@@ -1079,7 +1100,7 @@
   8
   9
 
-Test opreand of '%' is optimized recursively (issue4670)
+Test operand of '%' is optimized recursively (issue4670)
 
   $ try --optimize '8:9-8%'
   (onlypost
@@ -2802,8 +2823,8 @@
   [255]
 
 Undocumented functions aren't suggested as similar either
-  $ log 'wdir2()'
-  hg: parse error: unknown identifier: wdir2
+  $ log 'tagged2()'
+  hg: parse error: unknown identifier: tagged2
   [255]
 
 multiple revspecs
@@ -3085,7 +3106,7 @@
   [255]
 
 test scope of alias expansion: 'universe' is expanded prior to 'shadowall(0)',
-but 'all()' should never be substituded to '0()'.
+but 'all()' should never be substituted to '0()'.
 
   $ echo 'universe = all()' >> .hg/hgrc
   $ echo 'shadowall(all) = all and universe' >> .hg/hgrc
--- a/tests/test-setdiscovery.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-setdiscovery.t	Wed Jan 18 11:43:36 2017 -0500
@@ -349,9 +349,9 @@
   $ killdaemons.py
   $ cut -d' ' -f6- access.log | grep -v cmd=known # cmd=known uses random sampling
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D513314ca8b3ae4dac8eec56966265b00fcf866db
-  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=513314ca8b3ae4dac8eec56966265b00fcf866db&heads=e64a39e7da8b0d54bc63e81169aff001c13b3477
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D513314ca8b3ae4dac8eec56966265b00fcf866db x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=513314ca8b3ae4dac8eec56966265b00fcf866db&heads=e64a39e7da8b0d54bc63e81169aff001c13b3477 x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   $ cat errors.log
 
   $ cd ..
--- a/tests/test-shelve.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-shelve.t	Wed Jan 18 11:43:36 2017 -0500
@@ -100,6 +100,46 @@
   default.hg
   default.patch
 
+checks to make sure we dont create a directory or
+hidden file while choosing a new shelve name
+
+when we are given a name
+
+  $ hg shelve -n foo/bar
+  abort: shelved change names can not contain slashes
+  [255]
+  $ hg shelve -n .baz
+  abort: shelved change names can not start with '.'
+  [255]
+  $ hg shelve -n foo\\bar
+  abort: shelved change names can not contain slashes
+  [255]
+
+when shelve has to choose itself
+
+  $ hg branch x/y -q
+  $ hg commit -q -m "Branch commit 0"
+  $ hg shelve
+  nothing changed
+  [1]
+  $ hg branch .x -q
+  $ hg commit -q -m "Branch commit 1"
+  $ hg shelve
+  nothing changed
+  [1]
+  $ hg branch x\\y -q
+  $ hg commit -q -m "Branch commit 2"
+  $ hg shelve
+  nothing changed
+  [1]
+
+cleaning the branches made for name checking tests
+
+  $ hg up default -q
+  $ hg strip 3 -q
+  $ hg strip 2 -q
+  $ hg strip 1 -q
+
 create an mq patch - shelving should work fine with a patch applied
 
   $ echo n > n
@@ -128,15 +168,6 @@
     c
   R b/b
 
-prevent some foot-shooting
-
-  $ hg shelve -n foo/bar
-  abort: shelved change names may not contain slashes
-  [255]
-  $ hg shelve -n .baz
-  abort: shelved change names may not start with '.'
-  [255]
-
 the common case - no options or filenames
 
   $ hg shelve
@@ -195,12 +226,12 @@
 (this also tests that same timestamp prevents backups from being
 removed, even though there are more than 'maxbackups' backups)
 
-  $ f -t .hg/shelve-backup/default.hg
-  .hg/shelve-backup/default.hg: file
-  $ touch -t 200001010000 .hg/shelve-backup/default.hg
-  $ f -t .hg/shelve-backup/default-1.hg
-  .hg/shelve-backup/default-1.hg: file
-  $ touch -t 200001010000 .hg/shelve-backup/default-1.hg
+  $ f -t .hg/shelve-backup/default.patch
+  .hg/shelve-backup/default.patch: file
+  $ touch -t 200001010000 .hg/shelve-backup/default.patch
+  $ f -t .hg/shelve-backup/default-1.patch
+  .hg/shelve-backup/default-1.patch: file
+  $ touch -t 200001010000 .hg/shelve-backup/default-1.patch
 
   $ hg unshelve
   unshelving change 'default-01'
@@ -332,7 +363,7 @@
   +++ b/a/a
   @@ -1,2 +1,6 @@
    a
-  +<<<<<<< dest:   *  - shelve: pending changes temporary commit (glob)
+  +<<<<<<< dest:   * - shelve: pending changes temporary commit (glob)
    c
   +=======
   +a
@@ -759,7 +790,7 @@
   M f
   ? f.orig
   $ cat f
-  <<<<<<< dest:   5f6b880e719b  - shelve: pending changes temporary commit
+  <<<<<<< dest:   5f6b880e719b - shelve: pending changes temporary commit
   g
   =======
   f
@@ -804,7 +835,7 @@
   M f
   ? f.orig
   $ cat f
-  <<<<<<< dest:   *  - test: intermediate other change (glob)
+  <<<<<<< dest:   * - test: intermediate other change (glob)
   g
   =======
   f
@@ -960,7 +991,7 @@
   x
   x
 
-shelve --patch and shelve --stat should work with a single valid shelfname
+shelve --patch and shelve --stat should work with valid shelfnames
 
   $ hg up --clean .
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -977,11 +1008,29 @@
   shelved as default-01
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg shelve --patch default default-01
-  abort: --patch expects a single shelf
-  [255]
+  default-01      (*)* changes to: create conflict (glob)
+  
+  diff --git a/shelf-patch-b b/shelf-patch-b
+  new file mode 100644
+  --- /dev/null
+  +++ b/shelf-patch-b
+  @@ -0,0 +1,1 @@
+  +patch b
+  default         (*)* changes to: create conflict (glob)
+  
+  diff --git a/shelf-patch-a b/shelf-patch-a
+  new file mode 100644
+  --- /dev/null
+  +++ b/shelf-patch-a
+  @@ -0,0 +1,1 @@
+  +patch a
   $ hg shelve --stat default default-01
-  abort: --stat expects a single shelf
-  [255]
+  default-01      (*)* changes to: create conflict (glob)
+   shelf-patch-b |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
+  default         (*)* changes to: create conflict (glob)
+   shelf-patch-a |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
   $ hg shelve --patch default
   default         (*)* changes to: create conflict (glob)
   
@@ -1001,6 +1050,12 @@
   $ hg shelve --stat nonexistentshelf
   abort: cannot find shelf nonexistentshelf
   [255]
+  $ hg shelve --patch default nonexistentshelf
+  abort: cannot find shelf nonexistentshelf
+  [255]
+  $ hg shelve --patch
+  abort: --patch expects at least one shelf
+  [255]
 
   $ cd ..
 
@@ -1383,6 +1438,7 @@
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg branch
   default
+  $ cd ..
 
 When i shelve commit on newly created branch i expect
 that after unshelve newly created branch will be preserved.
@@ -1416,6 +1472,7 @@
   ? b
   $ hg branch
   test
+  $ cd ..
 
 When i shelve commit on newly created branch, make
 some changes, unshelve it and running into merge
@@ -1489,11 +1546,12 @@
   A b
   $ hg branch
   default
+  $ cd ..
 
 When i unshelve resulting in merge conflicts and makes saved
 file shelvedstate looks like in previous versions in
 mercurial(without restore branch information in 7th line) i
-expect that after resolving conflicts and succesfully
+expect that after resolving conflicts and successfully
 running 'shelve --continue' the branch information won't be
 restored and branch will be unchanged.
 
@@ -1551,6 +1609,7 @@
   M a
   $ hg branch
   default
+  $ cd ..
 
 On non bare shelve the branch information shouldn't be restored
 
@@ -1587,7 +1646,7 @@
   default
   $ cd ..
 
-Prepare unshleve with a corrupted shelvedstate
+Prepare unshelve with a corrupted shelvedstate
   $ hg init r1 && cd r1
   $ echo text1 > file && hg add file
   $ hg shelve
@@ -1622,3 +1681,32 @@
   abort: no unshelve in progress
   [255]
   $ cd ..
+
+Unshelve respects --keep even if user intervention is needed
+  $ hg init unshelvekeep && cd unshelvekeep
+  $ echo 1 > file && hg ci -Am 1
+  adding file
+  $ echo 2 >> file
+  $ hg shelve
+  shelved as default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo 3 >> file && hg ci -Am 13
+  $ hg shelve --list
+  default         (1s ago)    changes to: 1
+  $ hg unshelve --keep
+  unshelving change 'default'
+  rebasing shelved changes
+  rebasing 2:3fbe6fbb0bef "changes to: 1" (tip)
+  merging file
+  warning: conflicts while merging file! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+  [1]
+  $ hg resolve --mark file
+  (no more unresolved files)
+  continue: hg unshelve --continue
+  $ hg unshelve --continue
+  rebasing 2:3fbe6fbb0bef "changes to: 1" (tip)
+  unshelve of 'default' complete
+  $ hg shelve --list
+  default         (*s ago)    changes to: 1 (glob)
+  $ cd ..
--- a/tests/test-ssh-bundle1.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-ssh-bundle1.t	Wed Jan 18 11:43:36 2017 -0500
@@ -464,8 +464,8 @@
   running python ".*/dummyssh" user@dummy ('|")hg -R remote serve --stdio('|") (re)
   sending hello command
   sending between command
-  remote: 371
-  remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024
+  remote: 355
+  remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN
   remote: 1
   preparing listkeys for "bookmarks"
   sending listkeys command
--- a/tests/test-ssh.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-ssh.t	Wed Jan 18 11:43:36 2017 -0500
@@ -265,8 +265,17 @@
   > sys.stdout.write("KABOOM\n")
   > EOF
 
-  $ echo '[hooks]' >> ../remote/.hg/hgrc
-  $ echo "changegroup.stdout = python $TESTTMP/badhook" >> ../remote/.hg/hgrc
+  $ cat <<EOF > $TESTTMP/badpyhook.py
+  > import sys
+  > def hook(ui, repo, hooktype, **kwargs):
+  >     sys.stdout.write("KABOOM IN PROCESS\n")
+  > EOF
+
+  $ cat <<EOF >> ../remote/.hg/hgrc
+  > [hooks]
+  > changegroup.stdout = python $TESTTMP/badhook
+  > changegroup.pystdout = python:$TESTTMP/badpyhook.py:hook
+  > EOF
   $ echo r > r
   $ hg ci -A -m z r
 
@@ -281,6 +290,7 @@
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
   remote: KABOOM
+  remote: KABOOM IN PROCESS
   $ hg -R ../remote heads
   changeset:   5:1383141674ec
   tag:         tip
@@ -447,6 +457,7 @@
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
   remote: KABOOM
+  remote: KABOOM IN PROCESS
   local stdout
 
 debug output
@@ -456,8 +467,8 @@
   running python ".*/dummyssh" user@dummy ('|")hg -R remote serve --stdio('|") (re)
   sending hello command
   sending between command
-  remote: 371
-  remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024
+  remote: 355
+  remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN
   remote: 1
   query 1; heads
   sending batch command
--- a/tests/test-status-inprocess.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-status-inprocess.py	Wed Jan 18 11:43:36 2017 -0500
@@ -7,7 +7,7 @@
     ui as uimod,
 )
 
-u = uimod.ui()
+u = uimod.ui.load()
 
 print('% creating repo')
 repo = localrepo.localrepository(u, '.', create=True)
--- a/tests/test-subrepo.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-subrepo.t	Wed Jan 18 11:43:36 2017 -0500
@@ -58,10 +58,10 @@
   $ mkdir snot
   $ touch snot/file
   $ hg remove -S snot/file
-  not removing snot/file: file is untracked
+  not removing snot/file: file is untracked (glob)
   [1]
   $ hg cat snot/filenot
-  snot/filenot: no such file in rev 7cf8cfea66e4
+  snot/filenot: no such file in rev 7cf8cfea66e4 (glob)
   [1]
   $ rm -r snot
 
@@ -332,7 +332,7 @@
   conflict
   =======
   t3
-  >>>>>>> other: 7af322bc1198  - test: 7
+  >>>>>>> other: 7af322bc1198 - test: 7
 
 11: remove subrepo t
 
@@ -1518,8 +1518,8 @@
 Courtesy phases synchronisation to publishing server does not block the push
 (issue3781)
 
-  $ cp -r main issue3781
-  $ cp -r main issue3781-dest
+  $ cp -R main issue3781
+  $ cp -R main issue3781-dest
   $ cd issue3781-dest/s
   $ hg phase tip # show we have draft changeset
   5: draft
--- a/tests/test-symlink-os-yes-fs-no.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-symlink-os-yes-fs-no.py	Wed Jan 18 11:43:36 2017 -0500
@@ -17,7 +17,7 @@
 if not getattr(os, "symlink", False):
     sys.exit(80) # SKIPPED_STATUS defined in run-tests.py
 
-u = uimod.ui()
+u = uimod.ui.load()
 # hide outer repo
 hg.peer(u, {}, '.', create=True)
 
@@ -35,6 +35,9 @@
 def symlink_failure(src, dst):
     raise OSError(1, "Operation not permitted")
 os.symlink = symlink_failure
+def islink_failure(path):
+    return False
+os.path.islink = islink_failure
 
 # dereference links as if a Samba server has exported this to a
 # Windows client
@@ -45,10 +48,10 @@
     fp.close()
 
 # reload repository
-u = uimod.ui()
+u = uimod.ui.load()
 repo = hg.repository(u, 'test0')
 commands.status(u, repo)
 
 # try cloning a repo which contains symlinks
-u = uimod.ui()
+u = uimod.ui.load()
 hg.clone(u, {}, BUNDLEPATH, 'test1')
--- a/tests/test-tags.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-tags.t	Wed Jan 18 11:43:36 2017 -0500
@@ -672,6 +672,9 @@
 
   $ ls tagsclient/.hg/cache
   branch2-served
+  checkisexec
+  checklink
+  checklink-target
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
@@ -696,6 +699,9 @@
 
   $ ls tagsclient/.hg/cache
   branch2-served
+  checkisexec
+  checklink
+  checklink-target
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
--- a/tests/test-transplant.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-transplant.t	Wed Jan 18 11:43:36 2017 -0500
@@ -102,9 +102,10 @@
   7  b3
   $ hg log -r 'transplanted(head())' --template '{rev} {parents} {desc}\n'
   7  b3
-  $ hg help revsets | grep transplanted
+  $ hg help revisions.transplanted
       "transplanted([set])"
         Transplanted changesets in set, or all transplanted changesets.
+  
 
 test transplanted keyword
 
--- a/tests/test-treediscovery.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-treediscovery.t	Wed Jan 18 11:43:36 2017 -0500
@@ -505,32 +505,32 @@
   $ cd ..
   $ tstop show
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=heads HTTP/1.1" 200 -
-  "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961
-  "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785
-  "GET /?cmd=changegroupsubset HTTP/1.1" 200 - x-hgarg-1:bases=d8f638ac69e9ae8dea4f09f11d696546a912d961&heads=d8f638ac69e9ae8dea4f09f11d696546a912d961
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=heads HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961 x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785 x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=changegroupsubset HTTP/1.1" 200 - x-hgarg-1:bases=d8f638ac69e9ae8dea4f09f11d696546a912d961&heads=d8f638ac69e9ae8dea4f09f11d696546a912d961 x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=heads HTTP/1.1" 200 -
-  "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961
-  "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785
+  "GET /?cmd=heads HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961 x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785 x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
-  "GET /?cmd=heads HTTP/1.1" 200 -
-  "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961
-  "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785
-  "GET /?cmd=changegroupsubset HTTP/1.1" 200 - x-hgarg-1:bases=d8f638ac69e9ae8dea4f09f11d696546a912d961&heads=d8f638ac69e9ae8dea4f09f11d696546a912d961
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=heads HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961 x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785 x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=changegroupsubset HTTP/1.1" 200 - x-hgarg-1:bases=d8f638ac69e9ae8dea4f09f11d696546a912d961&heads=d8f638ac69e9ae8dea4f09f11d696546a912d961 x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=heads HTTP/1.1" 200 -
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
-  "GET /?cmd=branchmap HTTP/1.1" 200 -
-  "GET /?cmd=branchmap HTTP/1.1" 200 -
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
+  "GET /?cmd=heads HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
   "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+1827a5bb63e602382eb89dd58f2ac9f3b007ad91* (glob)
-  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
+  "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=heads HTTP/1.1" 200 -
+  "GET /?cmd=heads HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
   "GET /?cmd=capabilities HTTP/1.1" 200 -
-  "GET /?cmd=heads HTTP/1.1" 200 -
+  "GET /?cmd=heads HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=zstd,zlib,none,bzip2
--- a/tests/test-treemanifest.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-treemanifest.t	Wed Jan 18 11:43:36 2017 -0500
@@ -458,7 +458,7 @@
   b/bar/fruits.txt (glob)
   b/bar/orange/fly/gnat.py (glob)
   b/bar/orange/fly/housefly.txt (glob)
-  $ cp -r .hg/store .hg/store-copy
+  $ cp -R .hg/store .hg/store-copy
 
 Test files for a subdirectory.
 
@@ -468,7 +468,7 @@
   b/bar/orange/fly/gnat.py (glob)
   b/bar/orange/fly/housefly.txt (glob)
   b/foo/apple/bees/flower.py (glob)
-  $ cp -r .hg/store-copy/. .hg/store
+  $ cp -R .hg/store-copy/. .hg/store
 
 Test files with just includes and excludes.
 
@@ -477,7 +477,7 @@
   $ rm -r .hg/store/meta/b/foo/apple/bees
   $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
   b/bar/fruits.txt (glob)
-  $ cp -r .hg/store-copy/. .hg/store
+  $ cp -R .hg/store-copy/. .hg/store
 
 Test files for a subdirectory, excluding a directory within it.
 
@@ -487,7 +487,7 @@
   b/bar/fruits.txt (glob)
   b/bar/orange/fly/gnat.py (glob)
   b/bar/orange/fly/housefly.txt (glob)
-  $ cp -r .hg/store-copy/. .hg/store
+  $ cp -R .hg/store-copy/. .hg/store
 
 Test files for a sub directory, including only a directory within it, and
 including an unrelated directory.
@@ -497,7 +497,7 @@
   $ hg files -r . -I path:b/bar/orange -I path:a b
   b/bar/orange/fly/gnat.py (glob)
   b/bar/orange/fly/housefly.txt (glob)
-  $ cp -r .hg/store-copy/. .hg/store
+  $ cp -R .hg/store-copy/. .hg/store
 
 Test files for a pattern, including a directory, and excluding a directory
 within that.
@@ -507,7 +507,7 @@
   $ rm -r .hg/store/meta/b/bar/orange
   $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
   b/bar/fruits.txt (glob)
-  $ cp -r .hg/store-copy/. .hg/store
+  $ cp -R .hg/store-copy/. .hg/store
 
 Add some more changes to the deep repo
   $ echo narf >> b/bar/fruits.txt
@@ -553,7 +553,7 @@
   $ killdaemons.py
 
 Back up the recently added revlogs
-  $ cp -r .hg/store .hg/store-newcopy
+  $ cp -R .hg/store .hg/store-newcopy
 
 Verify reports missing dirlog
   $ rm .hg/store/meta/b/00manifest.*
@@ -582,7 +582,7 @@
   8 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-newcopy/. .hg/store
+  $ cp -R .hg/store-newcopy/. .hg/store
 
 Verify reports missing dirlog entry
   $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
@@ -607,7 +607,7 @@
   8 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
-  $ cp -r .hg/store-newcopy/. .hg/store
+  $ cp -R .hg/store-newcopy/. .hg/store
 
 Test cloning a treemanifest repo over http.
   $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
--- a/tests/test-trusted.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-trusted.py	Wed Jan 18 11:43:36 2017 -0500
@@ -66,7 +66,7 @@
     print('# %s user, %s group%s' % (kind[user == cuser], kind[group == cgroup],
                                      trusted))
 
-    u = uimod.ui()
+    u = uimod.ui.load()
     u.setconfig('ui', 'debug', str(bool(debug)))
     u.setconfig('ui', 'report_untrusted', str(bool(report)))
     u.readconfig('.hg/hgrc')
@@ -156,7 +156,7 @@
 
 print()
 print("# read trusted, untrusted, new ui, trusted")
-u = uimod.ui()
+u = uimod.ui.load()
 u.setconfig('ui', 'debug', 'on')
 u.readconfig(filename)
 u2 = u.copy()
--- a/tests/test-ui-color.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-ui-color.py	Wed Jan 18 11:43:36 2017 -0500
@@ -23,7 +23,7 @@
 hgrc.write('color=\n')
 hgrc.close()
 
-ui_ = uimod.ui()
+ui_ = uimod.ui.load()
 ui_.setconfig('ui', 'formatted', 'True')
 
 # we're not interested in the output, so write that to devnull
--- a/tests/test-ui-config.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-ui-config.py	Wed Jan 18 11:43:36 2017 -0500
@@ -5,7 +5,7 @@
     ui as uimod,
 )
 
-testui = uimod.ui()
+testui = uimod.ui.load()
 parsed = dispatch._parseconfig(testui, [
     'values.string=string value',
     'values.bool1=true',
--- a/tests/test-ui-verbosity.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-ui-verbosity.py	Wed Jan 18 11:43:36 2017 -0500
@@ -32,7 +32,7 @@
         f.write('debug = True\n')
     f.close()
 
-    u = uimod.ui()
+    u = uimod.ui.load()
     if cmd_quiet or cmd_debug or cmd_verbose:
         u.setconfig('ui', 'quiet', str(bool(cmd_quiet)))
         u.setconfig('ui', 'verbose', str(bool(cmd_verbose)))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-upgrade-repo.t	Wed Jan 18 11:43:36 2017 -0500
@@ -0,0 +1,312 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > share =
+  > EOF
+
+store and revlogv1 are required in source
+
+  $ hg --config format.usestore=false init no-store
+  $ hg -R no-store debugupgraderepo
+  abort: cannot upgrade repository; requirement missing: store
+  [255]
+
+  $ hg init no-revlogv1
+  $ cat > no-revlogv1/.hg/requires << EOF
+  > dotencode
+  > fncache
+  > generaldelta
+  > store
+  > EOF
+
+  $ hg -R no-revlogv1 debugupgraderepo
+  abort: cannot upgrade repository; requirement missing: revlogv1
+  [255]
+
+Cannot upgrade shared repositories
+
+  $ hg init share-parent
+  $ hg -q share share-parent share-child
+
+  $ hg -R share-child debugupgraderepo
+  abort: cannot upgrade repository; unsupported source requirement: shared
+  [255]
+
+Do not yet support upgrading manifestv2 and treemanifest repos
+
+  $ hg --config experimental.manifestv2=true init manifestv2
+  $ hg -R manifestv2 debugupgraderepo
+  abort: cannot upgrade repository; unsupported source requirement: manifestv2
+  [255]
+
+  $ hg --config experimental.treemanifest=true init treemanifest
+  $ hg -R treemanifest debugupgraderepo
+  abort: cannot upgrade repository; unsupported source requirement: treemanifest
+  [255]
+
+Cannot add manifestv2 or treemanifest requirement during upgrade
+
+  $ hg init disallowaddedreq
+  $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
+  abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
+  [255]
+
+An upgrade of a repository created with recommended settings only suggests optimizations
+
+  $ hg init empty
+  $ cd empty
+  $ hg debugupgraderepo
+  (no feature deficiencies found in existing repository)
+  performing an upgrade with "--run" will make the following changes:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, store
+  
+  additional optimizations are available by specifying "--optimize <name>":
+  
+  redeltaparent
+     deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
+  
+  redeltamultibase
+     deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
+  
+  redeltaall
+     deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
+  
+
+--optimize can be used to add optimizations
+
+  $ hg debugupgrade --optimize redeltaparent
+  (no feature deficiencies found in existing repository)
+  performing an upgrade with "--run" will make the following changes:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, store
+  
+  redeltaparent
+     deltas within internal storage will choose a new base revision if needed
+  
+  additional optimizations are available by specifying "--optimize <name>":
+  
+  redeltamultibase
+     deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
+  
+  redeltaall
+     deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
+  
+
+Various sub-optimal detections work
+
+  $ cat > .hg/requires << EOF
+  > revlogv1
+  > store
+  > EOF
+
+  $ hg debugupgraderepo
+  repository lacks features recommended by current config options:
+  
+  fncache
+     long and reserved filenames may not work correctly; repository performance is sub-optimal
+  
+  dotencode
+     storage of filenames beginning with a period or space may not work correctly
+  
+  generaldelta
+     deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
+  
+  
+  performing an upgrade with "--run" will make the following changes:
+  
+  requirements
+     preserved: revlogv1, store
+     added: dotencode, fncache, generaldelta
+  
+  fncache
+     repository will be more resilient to storing certain paths and performance of certain operations should be improved
+  
+  dotencode
+     repository will be better able to store files beginning with a space or period
+  
+  generaldelta
+     repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
+  
+  additional optimizations are available by specifying "--optimize <name>":
+  
+  redeltaparent
+     deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
+  
+  redeltamultibase
+     deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
+  
+  redeltaall
+     deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
+  
+
+  $ hg --config format.dotencode=false debugupgraderepo
+  repository lacks features recommended by current config options:
+  
+  fncache
+     long and reserved filenames may not work correctly; repository performance is sub-optimal
+  
+  generaldelta
+     deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
+  
+  repository lacks features used by the default config options:
+  
+  dotencode
+     storage of filenames beginning with a period or space may not work correctly
+  
+  
+  performing an upgrade with "--run" will make the following changes:
+  
+  requirements
+     preserved: revlogv1, store
+     added: fncache, generaldelta
+  
+  fncache
+     repository will be more resilient to storing certain paths and performance of certain operations should be improved
+  
+  generaldelta
+     repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
+  
+  additional optimizations are available by specifying "--optimize <name>":
+  
+  redeltaparent
+     deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
+  
+  redeltamultibase
+     deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
+  
+  redeltaall
+     deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
+  
+
+  $ cd ..
+
+Upgrading a repository that is already modern essentially no-ops
+
+  $ hg init modern
+  $ hg -R modern debugupgraderepo --run
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, fncache, generaldelta, revlogv1, store
+  
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  data fully migrated to temporary repository
+  marking source repository as being upgraded; clients will be unable to read from repository
+  starting in-place swap of repository data
+  replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
+  replacing store...
+  store replacement complete; repository was inconsistent for *s (glob)
+  finalizing requirements file and making repository readable again
+  removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
+  copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
+  the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
+
+Upgrading a repository to generaldelta works
+
+  $ hg --config format.usegeneraldelta=false init upgradegd
+  $ cd upgradegd
+  $ touch f0
+  $ hg -q commit -A -m initial
+  $ touch f1
+  $ hg -q commit -A -m 'add f1'
+  $ hg -q up -r 0
+  $ touch f2
+  $ hg -q commit -A -m 'add f2'
+
+  $ hg debugupgraderepo --run
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: dotencode, fncache, revlogv1, store
+     added: generaldelta
+  
+  generaldelta
+     repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
+  
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
+  migrating 341 bytes in store; 401 bytes tracked data
+  migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
+  finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
+  migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
+  finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
+  migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
+  finished migrating 3 changelog revisions; change in size: 0 bytes
+  finished migrating 9 total revisions; total change in store size: 0 bytes
+  copying phaseroots
+  data fully migrated to temporary repository
+  marking source repository as being upgraded; clients will be unable to read from repository
+  starting in-place swap of repository data
+  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  replacing store...
+  store replacement complete; repository was inconsistent for *s (glob)
+  finalizing requirements file and making repository readable again
+  removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+  copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+  the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
+
+Original requirements backed up
+
+  $ cat .hg/upgradebackup.*/requires
+  dotencode
+  fncache
+  revlogv1
+  store
+
+generaldelta added to original requirements files
+
+  $ cat .hg/requires
+  dotencode
+  fncache
+  generaldelta
+  revlogv1
+  store
+
+store directory has files we expect
+
+  $ ls .hg/store
+  00changelog.i
+  00manifest.i
+  data
+  fncache
+  phaseroots
+  undo
+  undo.backupfiles
+  undo.phaseroots
+
+manifest should be generaldelta
+
+  $ hg debugrevlog -m | grep flags
+  flags  : inline, generaldelta
+
+verify should be happy
+
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  3 files, 3 changesets, 3 total revisions
+
+old store should be backed up
+
+  $ ls .hg/upgradebackup.*/store
+  00changelog.i
+  00manifest.i
+  data
+  fncache
+  phaseroots
+  undo
+  undo.backup.fncache
+  undo.backupfiles
+  undo.phaseroots
+
+  $ cd ..
--- a/tests/test-verify.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-verify.t	Wed Jan 18 11:43:36 2017 -0500
@@ -69,10 +69,10 @@
   $ cd missing-entries
   $ echo 0 > file
   $ hg ci -Aqm0
-  $ cp -r .hg/store .hg/store-partial
+  $ cp -R .hg/store .hg/store-partial
   $ echo 1 > file
   $ hg ci -Aqm1
-  $ cp -r .hg/store .hg/store-full
+  $ cp -R .hg/store .hg/store-full
 
 Entire changelog missing
 
@@ -84,7 +84,7 @@
   3 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Entire manifest log missing
 
@@ -94,7 +94,7 @@
   1 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Entire filelog missing
 
@@ -109,7 +109,7 @@
   3 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Entire changelog and manifest log missing
 
@@ -118,7 +118,7 @@
   $ hg verify -q
   warning: orphan revlog 'data/file.i'
   1 warnings encountered!
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Entire changelog and filelog missing
 
@@ -137,7 +137,7 @@
   6 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Entire manifest log and filelog missing
 
@@ -152,7 +152,7 @@
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Changelog missing entry
 
@@ -165,7 +165,7 @@
   1 warnings encountered!
   3 integrity errors encountered!
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Manifest log missing entry
 
@@ -176,7 +176,7 @@
   2 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Filelog missing entry
 
@@ -186,7 +186,7 @@
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Changelog and manifest log missing entry
 
@@ -199,7 +199,7 @@
   1 warnings encountered!
   2 integrity errors encountered!
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Changelog and filelog missing entry
 
@@ -211,7 +211,7 @@
    file@?: manifest refers to unknown revision c10f2164107d
   3 integrity errors encountered!
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Manifest and filelog missing entry
 
@@ -222,7 +222,7 @@
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Corrupt changelog base node to cause failure to read revision
 
@@ -238,7 +238,7 @@
   4 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Corrupt manifest log base node to cause failure to read revision
 
@@ -250,7 +250,7 @@
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
 Corrupt filelog base node to cause failure to read revision
 
@@ -261,7 +261,7 @@
   1 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ cp -r .hg/store-full/. .hg/store
+  $ cp -R .hg/store-full/. .hg/store
 
   $ cd ..
 
--- a/tests/test-walkrepo.py	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-walkrepo.py	Wed Jan 18 11:43:36 2017 -0500
@@ -16,7 +16,7 @@
 walkrepos = scmutil.walkrepos
 checklink = util.checklink
 
-u = uimod.ui()
+u = uimod.ui.load()
 sym = checklink('.')
 
 hg.repository(u, 'top1', create=1)
--- a/tests/test-wireproto.t	Wed Jan 04 10:51:37 2017 -0600
+++ b/tests/test-wireproto.t	Wed Jan 18 11:43:36 2017 -0500
@@ -94,23 +94,23 @@
   * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:1033* (glob)
   * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:1033* (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=quatre&one=un&three=trois&two=deux (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=quatre&one=un&three=trois&two=deux (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=quatre&one=un&three=trois&two=deux x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=quatre&one=un&three=trois&two=deux x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=qu++atre&one=+un&three=trois+&two=deux (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=qu++atre&one=+un&three=trois+&two=deux (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=qu++atre&one=+un&three=trois+&two=deux x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=qu++atre&one=+un&three=trois+&two=deux x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=vier&one=eins&two=zwei (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=vier&one=eins&two=zwei (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=vier&one=eins&two=zwei x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=vier&one=eins&two=zwei x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:one=eins&two=zwei (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:one=eins&two=zwei (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:one=eins&two=zwei x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:one=eins&two=zwei x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:one=eins&two=zwei (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:one=eins&two=zwei (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:one=eins&two=zwei x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:one=eins&two=zwei x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=onethousandcharactersxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx&one x-hgarg-2:=un&three=trois&two=deux (glob)
-  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=onethousandcharactersxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx&one x-hgarg-2:=un&three=trois&two=deux (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=onethousandcharactersxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx&one x-hgarg-2:=un&three=trois&two=deux x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=onethousandcharactersxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx&one x-hgarg-2:=un&three=trois&two=deux x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
 HTTP without the httpheader capability:
 
@@ -133,17 +133,17 @@
   $ cat error2.log
   $ cat access2.log
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs&four=quatre&one=un&three=trois&two=deux HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs&four=quatre&one=un&three=trois&two=deux HTTP/1.1" 200 - (glob)
+  * - - [*] "GET /?cmd=debugwireargs&four=quatre&one=un&three=trois&two=deux HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs&four=quatre&one=un&three=trois&two=deux HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs&four=vier&one=eins&two=zwei HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs&four=vier&one=eins&two=zwei HTTP/1.1" 200 - (glob)
+  * - - [*] "GET /?cmd=debugwireargs&four=vier&one=eins&two=zwei HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs&four=vier&one=eins&two=zwei HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs&one=eins&two=zwei HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs&one=eins&two=zwei HTTP/1.1" 200 - (glob)
+  * - - [*] "GET /?cmd=debugwireargs&one=eins&two=zwei HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs&one=eins&two=zwei HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs&one=eins&two=zwei HTTP/1.1" 200 - (glob)
-  * - - [*] "GET /?cmd=debugwireargs&one=eins&two=zwei HTTP/1.1" 200 - (glob)
+  * - - [*] "GET /?cmd=debugwireargs&one=eins&two=zwei HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  * - - [*] "GET /?cmd=debugwireargs&one=eins&two=zwei HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
 SSH (try to exercise the ssh functionality with a dummy script):