Mercurial > hg
changeset 31301:295625f1296b
merge with stable
author | Augie Fackler <augie@google.com> |
---|---|
date | Sat, 11 Mar 2017 13:53:14 -0500 |
parents | 0c8a042b193d (diff) 86cd1f2cfff5 (current diff) |
children | 681046de87f1 |
files | mercurial/pycompat.py |
diffstat | 251 files changed, 13859 insertions(+), 6725 deletions(-) [+] |
line wrap: on
line diff
--- a/Makefile Tue Mar 07 13:24:24 2017 -0500 +++ b/Makefile Sat Mar 11 13:53:14 2017 -0500 @@ -163,6 +163,16 @@ --root=build/mercurial/ --prefix=/usr/local/ \ --install-lib=/Library/Python/2.7/site-packages/ make -C doc all install DESTDIR="$(PWD)/build/mercurial/" + # install zsh completions - this location appears to be + # searched by default as of macOS Sierra. + mkdir -p build/mercurial/usr/local/share/zsh/site-functions + cp contrib/zsh_completion build/mercurial/usr/local/share/zsh/site-functions/hg + # install bash completions - there doesn't appear to be a + # place that's searched by default for bash, so we'll follow + # the lead of Apple's git install and just put it in a + # location of our own. + mkdir -p build/mercurial/usr/local/hg/contrib + cp contrib/bash_completion build/mercurial/usr/local/hg/contrib/hg-completion.bash mkdir -p $${OUTPUTDIR:-dist} HGVER=$$((cat build/mercurial/Library/Python/2.7/site-packages/mercurial/__version__.py; echo 'print(version)') | python) && \ OSXVER=$$(sw_vers -productVersion | cut -d. -f1,2) && \ @@ -262,5 +272,9 @@ .PHONY: help all local build doc cleanbutpackages clean install install-bin \ install-doc install-home install-home-bin install-home-doc \ dist dist-notests check tests check-code update-pot \ - osx fedora20 docker-fedora20 fedora21 docker-fedora21 \ + osx deb ppa docker-debian-jessie \ + docker-ubuntu-trusty docker-ubuntu-trusty-ppa \ + docker-ubuntu-xenial docker-ubuntu-xenial-ppa \ + docker-ubuntu-yakkety docker-ubuntu-yakkety-ppa \ + fedora20 docker-fedora20 fedora21 docker-fedora21 \ centos5 docker-centos5 centos6 docker-centos6 centos7 docker-centos7
--- a/contrib/check-code.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/check-code.py Sat Mar 11 13:53:14 2017 -0500 @@ -237,7 +237,7 @@ (r'lambda\s*\(.*,.*\)', "tuple parameter unpacking not available in Python 3+"), (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), - (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), + (r'(?<!\.)\breduce\s*\(.*', "reduce is not available in Python 3+"), (r'\bdict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}', 'dict-from-generator'), (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
--- a/contrib/chg/chg.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/chg/chg.c Sat Mar 11 13:53:14 2017 -0500 @@ -128,6 +128,24 @@ abortmsg("insecure sockdir %s", sockdir); } +/* + * Check if a socket directory exists and is only owned by the current user. + * Return 1 if so, 0 if not. This is used to check if XDG_RUNTIME_DIR can be + * used or not. According to the specification [1], XDG_RUNTIME_DIR should be + * ignored if the directory is not owned by the user with mode 0700. + * [1]: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + */ +static int checkruntimedir(const char *sockdir) +{ + struct stat st; + int r = lstat(sockdir, &st); + if (r < 0) /* ex. does not exist */ + return 0; + if (!S_ISDIR(st.st_mode)) /* ex. is a file, not a directory */ + return 0; + return st.st_uid == geteuid() && (st.st_mode & 0777) == 0700; +} + static void getdefaultsockdir(char sockdir[], size_t size) { /* by default, put socket file in secure directory @@ -135,7 +153,7 @@ * (permission of socket file may be ignored on some Unices) */ const char *runtimedir = getenv("XDG_RUNTIME_DIR"); int r; - if (runtimedir) { + if (runtimedir && checkruntimedir(runtimedir)) { r = snprintf(sockdir, size, "%s/chg", runtimedir); } else { const char *tmpdir = getenv("TMPDIR");
--- a/contrib/chg/procutil.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/chg/procutil.c Sat Mar 11 13:53:14 2017 -0500 @@ -91,11 +91,15 @@ struct sigaction sa; memset(&sa, 0, sizeof(sa)); + + /* deadly signals meant to be sent to a process group: + * - SIGHUP: usually generated by the kernel, when termination of a + * process causes that process group to become orphaned + * - SIGINT: usually generated by the terminal */ sa.sa_handler = forwardsignaltogroup; sa.sa_flags = SA_RESTART; if (sigemptyset(&sa.sa_mask) < 0) goto error; - if (sigaction(SIGHUP, &sa, NULL) < 0) goto error; if (sigaction(SIGINT, &sa, NULL) < 0) @@ -111,6 +115,11 @@ sa.sa_flags = SA_RESTART; if (sigaction(SIGWINCH, &sa, NULL) < 0) goto error; + /* forward user-defined signals */ + if (sigaction(SIGUSR1, &sa, NULL) < 0) + goto error; + if (sigaction(SIGUSR2, &sa, NULL) < 0) + goto error; /* propagate job control requests to worker */ sa.sa_handler = forwardsignal; sa.sa_flags = SA_RESTART;
--- a/contrib/hgperf Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/hgperf Sat Mar 11 13:53:14 2017 -0500 @@ -55,17 +55,15 @@ import mercurial.util import mercurial.dispatch -import time - def timer(func, title=None): results = [] - begin = time.time() + begin = mercurial.util.timer() count = 0 while True: ostart = os.times() - cstart = time.time() + cstart = mercurial.util.timer() r = func() - cstop = time.time() + cstop = mercurial.util.timer() ostop = os.times() count += 1 a, b = ostart, ostop
--- a/contrib/perf.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/perf.py Sat Mar 11 13:53:14 2017 -0500 @@ -190,13 +190,13 @@ def _timer(fm, func, title=None): results = [] - begin = time.time() + begin = util.timer() count = 0 while True: ostart = os.times() - cstart = time.time() + cstart = util.timer() r = func() - cstop = time.time() + cstop = util.timer() ostop = os.times() count += 1 a, b = ostart, ostop @@ -993,6 +993,26 @@ node = r.lookup(rev) rev = r.rev(node) + def getrawchunks(data, chain): + start = r.start + length = r.length + inline = r._inline + iosize = r._io.size + buffer = util.buffer + offset = start(chain[0]) + + chunks = [] + ladd = chunks.append + + for rev in chain: + chunkstart = start(rev) + if inline: + chunkstart += (rev + 1) * iosize + chunklength = length(rev) + ladd(buffer(data, chunkstart - offset, chunklength)) + + return chunks + def dodeltachain(rev): if not cache: r.clearcaches() @@ -1003,24 +1023,15 @@ r.clearcaches() r._chunkraw(chain[0], chain[-1]) - def dodecompress(data, chain): + def dorawchunks(data, chain): if not cache: r.clearcaches() - - start = r.start - length = r.length - inline = r._inline - iosize = r._io.size - buffer = util.buffer - offset = start(chain[0]) + getrawchunks(data, chain) - for rev in chain: - chunkstart = start(rev) - if inline: - chunkstart += (rev + 1) * iosize - chunklength = length(rev) - b = buffer(data, chunkstart - offset, chunklength) - r.decompress(b) + def dodecompress(chunks): + decomp = r.decompress + for chunk in chunks: + decomp(chunk) def dopatch(text, bins): if not cache: @@ -1039,6 +1050,7 @@ chain = r._deltachain(rev)[0] data = r._chunkraw(chain[0], chain[-1])[1] + rawchunks = getrawchunks(data, chain) bins = r._chunks(chain) text = str(bins[0]) bins = bins[1:] @@ -1048,7 +1060,8 @@ (lambda: dorevision(), 'full'), (lambda: dodeltachain(rev), 'deltachain'), (lambda: doread(chain), 'read'), - (lambda: dodecompress(data, chain), 'decompress'), + (lambda: dorawchunks(data, chain), 'rawchunks'), + (lambda: dodecompress(rawchunks), 'decompress'), (lambda: dopatch(text, bins), 'patch'), (lambda: dohash(text), 'hash'), ] @@ -1256,6 +1269,17 @@ timer(fn, title=title) fm.end() +@command('perfwrite', formatteropts) +def perfwrite(ui, repo, **opts): + """microbenchmark ui.write + """ + timer, fm = gettimer(ui, opts) + def write(): + for i in range(100000): + ui.write(('Testing write performance\n')) + timer(write) + fm.end() + def uisetup(ui): if (util.safehasattr(cmdutil, 'openrevlog') and not util.safehasattr(commands, 'debugrevlogopts')):
--- a/contrib/python-zstandard/NEWS.rst Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/NEWS.rst Sat Mar 11 13:53:14 2017 -0500 @@ -1,6 +1,33 @@ Version History =============== +0.7.0 (released 2017-02-07) +--------------------------- + +* Added zstd.get_frame_parameters() to obtain info about a zstd frame. +* Added ZstdDecompressor.decompress_content_dict_chain() for efficient + decompression of *content-only dictionary chains*. +* CFFI module fully implemented; all tests run against both C extension and + CFFI implementation. +* Vendored version of zstd updated to 1.1.3. +* Use ZstdDecompressor.decompress() now uses ZSTD_createDDict_byReference() + to avoid extra memory allocation of dict data. +* Add function names to error messages (by using ":name" in PyArg_Parse* + functions). +* Reuse decompression context across operations. Previously, we created a + new ZSTD_DCtx for each decompress(). This was measured to slow down + decompression by 40-200MB/s. The API guarantees say ZstdDecompressor + is not thread safe. So we reuse the ZSTD_DCtx across operations and make + things faster in the process. +* ZstdCompressor.write_to()'s compress() and flush() methods now return number + of bytes written. +* ZstdDecompressor.write_to()'s write() method now returns the number of bytes + written to the underlying output object. +* CompressionParameters instances now expose their values as attributes. +* CompressionParameters instances no longer are subscriptable nor behave + as tuples (backwards incompatible). Use attributes to obtain values. +* DictParameters instances now expose their values as attributes. + 0.6.0 (released 2017-01-14) ---------------------------
--- a/contrib/python-zstandard/README.rst Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/README.rst Sat Mar 11 13:53:14 2017 -0500 @@ -4,10 +4,11 @@ This project provides Python bindings for interfacing with the `Zstandard <http://www.zstd.net>`_ compression library. A C extension -and CFFI interface is provided. +and CFFI interface are provided. -The primary goal of the extension is to provide a Pythonic interface to -the underlying C API. This means exposing most of the features and flexibility +The primary goal of the project is to provide a rich interface to the +underlying C API through a Pythonic interface while not sacrificing +performance. This means exposing most of the features and flexibility of the C API while not sacrificing usability or safety that Python provides. The canonical home for this project is @@ -23,6 +24,9 @@ may be some backwards incompatible changes before 1.0. Though the author does not intend to make any major changes to the Python API. +This project is vendored and distributed with Mercurial 4.1, where it is +used in a production capacity. + There is continuous integration for Python versions 2.6, 2.7, and 3.3+ on Linux x86_x64 and Windows x86 and x86_64. The author is reasonably confident the extension is stable and works as advertised on these @@ -48,14 +52,15 @@ support compression without the framing headers. But the author doesn't believe it a high priority at this time. -The CFFI bindings are half-baked and need to be finished. +The CFFI bindings are feature complete and all tests run against both +the C extension and CFFI bindings to ensure behavior parity. Requirements ============ -This extension is designed to run with Python 2.6, 2.7, 3.3, 3.4, and 3.5 -on common platforms (Linux, Windows, and OS X). Only x86_64 is currently -well-tested as an architecture. +This extension is designed to run with Python 2.6, 2.7, 3.3, 3.4, 3.5, and +3.6 on common platforms (Linux, Windows, and OS X). Only x86_64 is +currently well-tested as an architecture. Installing ========== @@ -106,15 +111,11 @@ Comparison to Other Python Bindings =================================== -https://pypi.python.org/pypi/zstd is an alternative Python binding to +https://pypi.python.org/pypi/zstd is an alternate Python binding to Zstandard. At the time this was written, the latest release of that -package (1.0.0.2) had the following significant differences from this package: - -* It only exposes the simple API for compression and decompression operations. - This extension exposes the streaming API, dictionary training, and more. -* It adds a custom framing header to compressed data and there is no way to - disable it. This means that data produced with that module cannot be used by - other Zstandard implementations. +package (1.1.2) only exposed the simple APIs for compression and decompression. +This package exposes much more of the zstd API, including streaming and +dictionary compression. This package also has CFFI support. Bundling of Zstandard Source Code ================================= @@ -260,6 +261,10 @@ compressor's internal state into the output object. This may result in 0 or more ``write()`` calls to the output object. +Both ``write()`` and ``flush()`` return the number of bytes written to the +object's ``write()``. In many cases, small inputs do not accumulate enough +data to cause a write and ``write()`` will return ``0``. + If the size of the data being fed to this streaming compressor is known, you can declare it before compression begins:: @@ -476,6 +481,10 @@ the decompressor by calling ``write(data)`` and decompressed output is written to the output object by calling its ``write(data)`` method. +Calls to ``write()`` will return the number of bytes written to the output +object. Not all inputs will result in bytes being written, so return values +of ``0`` are possible. + The size of chunks being ``write()`` to the destination can be specified:: dctx = zstd.ZstdDecompressor() @@ -576,6 +585,53 @@ data = dobj.decompress(compressed_chunk_0) data = dobj.decompress(compressed_chunk_1) +Content-Only Dictionary Chain Decompression +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``decompress_content_dict_chain(frames)`` performs decompression of a list of +zstd frames produced using chained *content-only* dictionary compression. Such +a list of frames is produced by compressing discrete inputs where each +non-initial input is compressed with a *content-only* dictionary consisting +of the content of the previous input. + +For example, say you have the following inputs:: + + inputs = [b'input 1', b'input 2', b'input 3'] + +The zstd frame chain consists of: + +1. ``b'input 1'`` compressed in standalone/discrete mode +2. ``b'input 2'`` compressed using ``b'input 1'`` as a *content-only* dictionary +3. ``b'input 3'`` compressed using ``b'input 2'`` as a *content-only* dictionary + +Each zstd frame **must** have the content size written. + +The following Python code can be used to produce a *content-only dictionary +chain*:: + + def make_chain(inputs): + frames = [] + + # First frame is compressed in standalone/discrete mode. + zctx = zstd.ZstdCompressor(write_content_size=True) + frames.append(zctx.compress(inputs[0])) + + # Subsequent frames use the previous fulltext as a content-only dictionary + for i, raw in enumerate(inputs[1:]): + dict_data = zstd.ZstdCompressionDict(inputs[i]) + zctx = zstd.ZstdCompressor(write_content_size=True, dict_data=dict_data) + frames.append(zctx.compress(raw)) + + return frames + +``decompress_content_dict_chain()`` returns the uncompressed data of the last +element in the input chain. + +It is possible to implement *content-only dictionary chain* decompression +on top of other Python APIs. However, this function will likely be significantly +faster, especially for long input chains, as it avoids the overhead of +instantiating and passing around intermediate objects between C and Python. + Choosing an API --------------- @@ -634,6 +690,13 @@ dict_data = zstd.ZstdCompressionDict(data) +It is possible to construct a dictionary from *any* data. Unless the +data begins with a magic header, the dictionary will be treated as +*content-only*. *Content-only* dictionaries allow compression operations +that follow to reference raw data within the content. For one use of +*content-only* dictionaries, see +``ZstdDecompressor.decompress_content_dict_chain()``. + More interestingly, instances can be created by *training* on sample data:: dict_data = zstd.train_dictionary(size, samples) @@ -700,19 +763,57 @@ cctx = zstd.ZstdCompressor(compression_params=params) -The members of the ``CompressionParameters`` tuple are as follows:: +The members/attributes of ``CompressionParameters`` instances are as follows:: -* 0 - Window log -* 1 - Chain log -* 2 - Hash log -* 3 - Search log -* 4 - Search length -* 5 - Target length -* 6 - Strategy (one of the ``zstd.STRATEGY_`` constants) +* window_log +* chain_log +* hash_log +* search_log +* search_length +* target_length +* strategy + +This is the order the arguments are passed to the constructor if not using +named arguments. You'll need to read the Zstandard documentation for what these parameters do. +Frame Inspection +---------------- + +Data emitted from zstd compression is encapsulated in a *frame*. This frame +begins with a 4 byte *magic number* header followed by 2 to 14 bytes describing +the frame in more detail. For more info, see +https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md. + +``zstd.get_frame_parameters(data)`` parses a zstd *frame* header from a bytes +instance and return a ``FrameParameters`` object describing the frame. + +Depending on which fields are present in the frame and their values, the +length of the frame parameters varies. If insufficient bytes are passed +in to fully parse the frame parameters, ``ZstdError`` is raised. To ensure +frame parameters can be parsed, pass in at least 18 bytes. + +``FrameParameters`` instances have the following attributes: + +content_size + Integer size of original, uncompressed content. This will be ``0`` if the + original content size isn't written to the frame (controlled with the + ``write_content_size`` argument to ``ZstdCompressor``) or if the input + content size was ``0``. + +window_size + Integer size of maximum back-reference distance in compressed data. + +dict_id + Integer of dictionary ID used for compression. ``0`` if no dictionary + ID was used or if the dictionary ID was ``0``. + +has_checksum + Bool indicating whether a 4 byte content checksum is stored at the end + of the frame. + Misc Functionality ------------------ @@ -776,19 +877,32 @@ TARGETLENGTH_MAX Maximum value for compression parameter STRATEGY_FAST - Compression strategory + Compression strategy STRATEGY_DFAST - Compression strategory + Compression strategy STRATEGY_GREEDY - Compression strategory + Compression strategy STRATEGY_LAZY - Compression strategory + Compression strategy STRATEGY_LAZY2 - Compression strategory + Compression strategy STRATEGY_BTLAZY2 - Compression strategory + Compression strategy STRATEGY_BTOPT - Compression strategory + Compression strategy + +Performance Considerations +-------------------------- + +The ``ZstdCompressor`` and ``ZstdDecompressor`` types maintain state to a +persistent compression or decompression *context*. Reusing a ``ZstdCompressor`` +or ``ZstdDecompressor`` instance for multiple operations is faster than +instantiating a new ``ZstdCompressor`` or ``ZstdDecompressor`` for each +operation. The differences are magnified as the size of data decreases. For +example, the difference between *context* reuse and non-reuse for 100,000 +100 byte inputs will be significant (possiby over 10x faster to reuse contexts) +whereas 10 1,000,000 byte inputs will be more similar in speed (because the +time spent doing compression dwarfs time spent creating new *contexts*). Note on Zstandard's *Experimental* API ======================================
--- a/contrib/python-zstandard/c-ext/compressiondict.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/compressiondict.c Sat Mar 11 13:53:14 2017 -0500 @@ -28,7 +28,8 @@ void* dict; ZstdCompressionDict* result; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "nO!|O!", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "nO!|O!:train_dictionary", + kwlist, &capacity, &PyList_Type, &samples, (PyObject*)&DictParametersType, ¶meters)) { @@ -57,7 +58,6 @@ sampleItem = PyList_GetItem(samples, sampleIndex); if (!PyBytes_Check(sampleItem)) { PyErr_SetString(PyExc_ValueError, "samples must be bytes"); - /* TODO probably need to perform DECREF here */ return NULL; } samplesSize += PyBytes_GET_SIZE(sampleItem); @@ -133,10 +133,11 @@ self->dictSize = 0; #if PY_MAJOR_VERSION >= 3 - if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) { + if (!PyArg_ParseTuple(args, "y#:ZstdCompressionDict", #else - if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) { + if (!PyArg_ParseTuple(args, "s#:ZstdCompressionDict", #endif + &source, &sourceSize)) { return -1; }
--- a/contrib/python-zstandard/c-ext/compressionparams.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/compressionparams.c Sat Mar 11 13:53:14 2017 -0500 @@ -25,7 +25,8 @@ ZSTD_compressionParameters params; CompressionParametersObject* result; - if (!PyArg_ParseTuple(args, "i|Kn", &compressionLevel, &sourceSize, &dictSize)) { + if (!PyArg_ParseTuple(args, "i|Kn:get_compression_parameters", + &compressionLevel, &sourceSize, &dictSize)) { return NULL; } @@ -47,12 +48,85 @@ return result; } +static int CompressionParameters_init(CompressionParametersObject* self, PyObject* args, PyObject* kwargs) { + static char* kwlist[] = { + "window_log", + "chain_log", + "hash_log", + "search_log", + "search_length", + "target_length", + "strategy", + NULL + }; + + unsigned windowLog; + unsigned chainLog; + unsigned hashLog; + unsigned searchLog; + unsigned searchLength; + unsigned targetLength; + unsigned strategy; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "IIIIIII:CompressionParameters", + kwlist, &windowLog, &chainLog, &hashLog, &searchLog, &searchLength, + &targetLength, &strategy)) { + return -1; + } + + if (windowLog < ZSTD_WINDOWLOG_MIN || windowLog > ZSTD_WINDOWLOG_MAX) { + PyErr_SetString(PyExc_ValueError, "invalid window log value"); + return -1; + } + + if (chainLog < ZSTD_CHAINLOG_MIN || chainLog > ZSTD_CHAINLOG_MAX) { + PyErr_SetString(PyExc_ValueError, "invalid chain log value"); + return -1; + } + + if (hashLog < ZSTD_HASHLOG_MIN || hashLog > ZSTD_HASHLOG_MAX) { + PyErr_SetString(PyExc_ValueError, "invalid hash log value"); + return -1; + } + + if (searchLog < ZSTD_SEARCHLOG_MIN || searchLog > ZSTD_SEARCHLOG_MAX) { + PyErr_SetString(PyExc_ValueError, "invalid search log value"); + return -1; + } + + if (searchLength < ZSTD_SEARCHLENGTH_MIN || searchLength > ZSTD_SEARCHLENGTH_MAX) { + PyErr_SetString(PyExc_ValueError, "invalid search length value"); + return -1; + } + + if (targetLength < ZSTD_TARGETLENGTH_MIN || targetLength > ZSTD_TARGETLENGTH_MAX) { + PyErr_SetString(PyExc_ValueError, "invalid target length value"); + return -1; + } + + if (strategy < ZSTD_fast || strategy > ZSTD_btopt) { + PyErr_SetString(PyExc_ValueError, "invalid strategy value"); + return -1; + } + + self->windowLog = windowLog; + self->chainLog = chainLog; + self->hashLog = hashLog; + self->searchLog = searchLog; + self->searchLength = searchLength; + self->targetLength = targetLength; + self->strategy = strategy; + + return 0; +} + PyObject* estimate_compression_context_size(PyObject* self, PyObject* args) { CompressionParametersObject* params; ZSTD_compressionParameters zparams; PyObject* result; - if (!PyArg_ParseTuple(args, "O!", &CompressionParametersType, ¶ms)) { + if (!PyArg_ParseTuple(args, "O!:estimate_compression_context_size", + &CompressionParametersType, ¶ms)) { return NULL; } @@ -64,113 +138,33 @@ PyDoc_STRVAR(CompressionParameters__doc__, "CompressionParameters: low-level control over zstd compression"); -static PyObject* CompressionParameters_new(PyTypeObject* subtype, PyObject* args, PyObject* kwargs) { - CompressionParametersObject* self; - unsigned windowLog; - unsigned chainLog; - unsigned hashLog; - unsigned searchLog; - unsigned searchLength; - unsigned targetLength; - unsigned strategy; - - if (!PyArg_ParseTuple(args, "IIIIIII", &windowLog, &chainLog, &hashLog, &searchLog, - &searchLength, &targetLength, &strategy)) { - return NULL; - } - - if (windowLog < ZSTD_WINDOWLOG_MIN || windowLog > ZSTD_WINDOWLOG_MAX) { - PyErr_SetString(PyExc_ValueError, "invalid window log value"); - return NULL; - } - - if (chainLog < ZSTD_CHAINLOG_MIN || chainLog > ZSTD_CHAINLOG_MAX) { - PyErr_SetString(PyExc_ValueError, "invalid chain log value"); - return NULL; - } - - if (hashLog < ZSTD_HASHLOG_MIN || hashLog > ZSTD_HASHLOG_MAX) { - PyErr_SetString(PyExc_ValueError, "invalid hash log value"); - return NULL; - } - - if (searchLog < ZSTD_SEARCHLOG_MIN || searchLog > ZSTD_SEARCHLOG_MAX) { - PyErr_SetString(PyExc_ValueError, "invalid search log value"); - return NULL; - } - - if (searchLength < ZSTD_SEARCHLENGTH_MIN || searchLength > ZSTD_SEARCHLENGTH_MAX) { - PyErr_SetString(PyExc_ValueError, "invalid search length value"); - return NULL; - } - - if (targetLength < ZSTD_TARGETLENGTH_MIN || targetLength > ZSTD_TARGETLENGTH_MAX) { - PyErr_SetString(PyExc_ValueError, "invalid target length value"); - return NULL; - } - - if (strategy < ZSTD_fast || strategy > ZSTD_btopt) { - PyErr_SetString(PyExc_ValueError, "invalid strategy value"); - return NULL; - } - - self = (CompressionParametersObject*)subtype->tp_alloc(subtype, 1); - if (!self) { - return NULL; - } - - self->windowLog = windowLog; - self->chainLog = chainLog; - self->hashLog = hashLog; - self->searchLog = searchLog; - self->searchLength = searchLength; - self->targetLength = targetLength; - self->strategy = strategy; - - return (PyObject*)self; -} - static void CompressionParameters_dealloc(PyObject* self) { PyObject_Del(self); } -static Py_ssize_t CompressionParameters_length(PyObject* self) { - return 7; -} - -static PyObject* CompressionParameters_item(PyObject* o, Py_ssize_t i) { - CompressionParametersObject* self = (CompressionParametersObject*)o; - - switch (i) { - case 0: - return PyLong_FromLong(self->windowLog); - case 1: - return PyLong_FromLong(self->chainLog); - case 2: - return PyLong_FromLong(self->hashLog); - case 3: - return PyLong_FromLong(self->searchLog); - case 4: - return PyLong_FromLong(self->searchLength); - case 5: - return PyLong_FromLong(self->targetLength); - case 6: - return PyLong_FromLong(self->strategy); - default: - PyErr_SetString(PyExc_IndexError, "index out of range"); - return NULL; - } -} - -static PySequenceMethods CompressionParameters_sq = { - CompressionParameters_length, /* sq_length */ - 0, /* sq_concat */ - 0, /* sq_repeat */ - CompressionParameters_item, /* sq_item */ - 0, /* sq_ass_item */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0 /* sq_inplace_repeat */ +static PyMemberDef CompressionParameters_members[] = { + { "window_log", T_UINT, + offsetof(CompressionParametersObject, windowLog), READONLY, + "window log" }, + { "chain_log", T_UINT, + offsetof(CompressionParametersObject, chainLog), READONLY, + "chain log" }, + { "hash_log", T_UINT, + offsetof(CompressionParametersObject, hashLog), READONLY, + "hash log" }, + { "search_log", T_UINT, + offsetof(CompressionParametersObject, searchLog), READONLY, + "search log" }, + { "search_length", T_UINT, + offsetof(CompressionParametersObject, searchLength), READONLY, + "search length" }, + { "target_length", T_UINT, + offsetof(CompressionParametersObject, targetLength), READONLY, + "target length" }, + { "strategy", T_INT, + offsetof(CompressionParametersObject, strategy), READONLY, + "strategy" }, + { NULL } }; PyTypeObject CompressionParametersType = { @@ -185,7 +179,7 @@ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ - &CompressionParameters_sq, /* tp_as_sequence */ + 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ @@ -193,7 +187,7 @@ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ CompressionParameters__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ @@ -202,16 +196,16 @@ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ - 0, /* tp_members */ + CompressionParameters_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ + (initproc)CompressionParameters_init, /* tp_init */ 0, /* tp_alloc */ - CompressionParameters_new, /* tp_new */ + PyType_GenericNew, /* tp_new */ }; void compressionparams_module_init(PyObject* mod) {
--- a/contrib/python-zstandard/c-ext/compressionwriter.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/compressionwriter.c Sat Mar 11 13:53:14 2017 -0500 @@ -52,7 +52,7 @@ ZSTD_outBuffer output; PyObject* res; - if (!PyArg_ParseTuple(args, "OOO", &exc_type, &exc_value, &exc_tb)) { + if (!PyArg_ParseTuple(args, "OOO:__exit__", &exc_type, &exc_value, &exc_tb)) { return NULL; } @@ -119,11 +119,12 @@ ZSTD_inBuffer input; ZSTD_outBuffer output; PyObject* res; + Py_ssize_t totalWrite = 0; #if PY_MAJOR_VERSION >= 3 - if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) { + if (!PyArg_ParseTuple(args, "y#:write", &source, &sourceSize)) { #else - if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) { + if (!PyArg_ParseTuple(args, "s#:write", &source, &sourceSize)) { #endif return NULL; } @@ -164,20 +165,21 @@ #endif output.dst, output.pos); Py_XDECREF(res); + totalWrite += output.pos; } output.pos = 0; } PyMem_Free(output.dst); - /* TODO return bytes written */ - Py_RETURN_NONE; + return PyLong_FromSsize_t(totalWrite); } static PyObject* ZstdCompressionWriter_flush(ZstdCompressionWriter* self, PyObject* args) { size_t zresult; ZSTD_outBuffer output; PyObject* res; + Py_ssize_t totalWrite = 0; if (!self->entered) { PyErr_SetString(ZstdError, "flush must be called from an active context manager"); @@ -215,14 +217,14 @@ #endif output.dst, output.pos); Py_XDECREF(res); + totalWrite += output.pos; } output.pos = 0; } PyMem_Free(output.dst); - /* TODO return bytes written */ - Py_RETURN_NONE; + return PyLong_FromSsize_t(totalWrite); } static PyMethodDef ZstdCompressionWriter_methods[] = {
--- a/contrib/python-zstandard/c-ext/compressobj.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/compressobj.c Sat Mar 11 13:53:14 2017 -0500 @@ -42,9 +42,9 @@ } #if PY_MAJOR_VERSION >= 3 - if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) { + if (!PyArg_ParseTuple(args, "y#:compress", &source, &sourceSize)) { #else - if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) { + if (!PyArg_ParseTuple(args, "s#:compress", &source, &sourceSize)) { #endif return NULL; } @@ -98,7 +98,7 @@ PyObject* result = NULL; Py_ssize_t resultSize = 0; - if (!PyArg_ParseTuple(args, "|i", &flushMode)) { + if (!PyArg_ParseTuple(args, "|i:flush", &flushMode)) { return NULL; }
--- a/contrib/python-zstandard/c-ext/compressor.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/compressor.c Sat Mar 11 13:53:14 2017 -0500 @@ -16,7 +16,7 @@ Py_BEGIN_ALLOW_THREADS memset(&zmem, 0, sizeof(zmem)); compressor->cdict = ZSTD_createCDict_advanced(compressor->dict->dictData, - compressor->dict->dictSize, *zparams, zmem); + compressor->dict->dictSize, 1, *zparams, zmem); Py_END_ALLOW_THREADS if (!compressor->cdict) { @@ -128,8 +128,8 @@ self->cparams = NULL; self->cdict = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO!O!OOO", kwlist, - &level, &ZstdCompressionDictType, &dict, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO!O!OOO:ZstdCompressor", + kwlist, &level, &ZstdCompressionDictType, &dict, &CompressionParametersType, ¶ms, &writeChecksum, &writeContentSize, &writeDictID)) { return -1; @@ -243,8 +243,8 @@ PyObject* totalReadPy; PyObject* totalWritePy; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|nkk", kwlist, &source, &dest, &sourceSize, - &inSize, &outSize)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|nkk:copy_stream", kwlist, + &source, &dest, &sourceSize, &inSize, &outSize)) { return NULL; } @@ -402,9 +402,9 @@ ZSTD_parameters zparams; #if PY_MAJOR_VERSION >= 3 - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|O", + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|O:compress", #else - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|O", + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|O:compress", #endif kwlist, &source, &sourceSize, &allowEmpty)) { return NULL; @@ -512,7 +512,7 @@ return NULL; } - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|n", kwlist, &inSize)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|n:compressobj", kwlist, &inSize)) { return NULL; } @@ -574,8 +574,8 @@ size_t outSize = ZSTD_CStreamOutSize(); ZstdCompressorIterator* result; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nkk", kwlist, &reader, &sourceSize, - &inSize, &outSize)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nkk:read_from", kwlist, + &reader, &sourceSize, &inSize, &outSize)) { return NULL; } @@ -693,8 +693,8 @@ Py_ssize_t sourceSize = 0; size_t outSize = ZSTD_CStreamOutSize(); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nk", kwlist, &writer, &sourceSize, - &outSize)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nk:write_to", kwlist, + &writer, &sourceSize, &outSize)) { return NULL; }
--- a/contrib/python-zstandard/c-ext/decompressionwriter.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/decompressionwriter.c Sat Mar 11 13:53:14 2017 -0500 @@ -71,11 +71,12 @@ ZSTD_inBuffer input; ZSTD_outBuffer output; PyObject* res; + Py_ssize_t totalWrite = 0; #if PY_MAJOR_VERSION >= 3 - if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) { + if (!PyArg_ParseTuple(args, "y#:write", &source, &sourceSize)) { #else - if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) { + if (!PyArg_ParseTuple(args, "s#:write", &source, &sourceSize)) { #endif return NULL; } @@ -116,15 +117,15 @@ #endif output.dst, output.pos); Py_XDECREF(res); + totalWrite += output.pos; output.pos = 0; } } PyMem_Free(output.dst); - /* TODO return bytes written */ - Py_RETURN_NONE; - } + return PyLong_FromSsize_t(totalWrite); +} static PyMethodDef ZstdDecompressionWriter_methods[] = { { "__enter__", (PyCFunction)ZstdDecompressionWriter_enter, METH_NOARGS,
--- a/contrib/python-zstandard/c-ext/decompressobj.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/decompressobj.c Sat Mar 11 13:53:14 2017 -0500 @@ -41,9 +41,9 @@ } #if PY_MAJOR_VERSION >= 3 - if (!PyArg_ParseTuple(args, "y#", + if (!PyArg_ParseTuple(args, "y#:decompress", #else - if (!PyArg_ParseTuple(args, "s#", + if (!PyArg_ParseTuple(args, "s#:decompress", #endif &source, &sourceSize)) { return NULL;
--- a/contrib/python-zstandard/c-ext/decompressor.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/decompressor.c Sat Mar 11 13:53:14 2017 -0500 @@ -59,23 +59,19 @@ ZstdCompressionDict* dict = NULL; - self->refdctx = NULL; + self->dctx = NULL; self->dict = NULL; self->ddict = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!:ZstdDecompressor", kwlist, &ZstdCompressionDictType, &dict)) { return -1; } - /* Instead of creating a ZSTD_DCtx for every decompression operation, - we create an instance at object creation time and recycle it via - ZSTD_copyDCTx() on each use. This means each use is a malloc+memcpy - instead of a malloc+init. */ /* TODO lazily initialize the reference ZSTD_DCtx on first use since not instances of ZstdDecompressor will use a ZSTD_DCtx. */ - self->refdctx = ZSTD_createDCtx(); - if (!self->refdctx) { + self->dctx = ZSTD_createDCtx(); + if (!self->dctx) { PyErr_NoMemory(); goto except; } @@ -88,17 +84,17 @@ return 0; except: - if (self->refdctx) { - ZSTD_freeDCtx(self->refdctx); - self->refdctx = NULL; + if (self->dctx) { + ZSTD_freeDCtx(self->dctx); + self->dctx = NULL; } return -1; } static void Decompressor_dealloc(ZstdDecompressor* self) { - if (self->refdctx) { - ZSTD_freeDCtx(self->refdctx); + if (self->dctx) { + ZSTD_freeDCtx(self->dctx); } Py_XDECREF(self->dict); @@ -150,8 +146,8 @@ PyObject* totalReadPy; PyObject* totalWritePy; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|kk", kwlist, &source, - &dest, &inSize, &outSize)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|kk:copy_stream", kwlist, + &source, &dest, &inSize, &outSize)) { return NULL; } @@ -243,7 +239,7 @@ Py_DecRef(totalReadPy); Py_DecRef(totalWritePy); - finally: +finally: if (output.dst) { PyMem_Free(output.dst); } @@ -291,28 +287,19 @@ unsigned long long decompressedSize; size_t destCapacity; PyObject* result = NULL; - ZSTD_DCtx* dctx = NULL; void* dictData = NULL; size_t dictSize = 0; size_t zresult; #if PY_MAJOR_VERSION >= 3 - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|n", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|n:decompress", #else - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|n", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|n:decompress", #endif - &source, &sourceSize, &maxOutputSize)) { + kwlist, &source, &sourceSize, &maxOutputSize)) { return NULL; } - dctx = PyMem_Malloc(ZSTD_sizeof_DCtx(self->refdctx)); - if (!dctx) { - PyErr_NoMemory(); - return NULL; - } - - ZSTD_copyDCtx(dctx, self->refdctx); - if (self->dict) { dictData = self->dict->dictData; dictSize = self->dict->dictSize; @@ -320,12 +307,12 @@ if (dictData && !self->ddict) { Py_BEGIN_ALLOW_THREADS - self->ddict = ZSTD_createDDict(dictData, dictSize); + self->ddict = ZSTD_createDDict_byReference(dictData, dictSize); Py_END_ALLOW_THREADS if (!self->ddict) { PyErr_SetString(ZstdError, "could not create decompression dict"); - goto except; + return NULL; } } @@ -335,7 +322,7 @@ if (0 == maxOutputSize) { PyErr_SetString(ZstdError, "input data invalid or missing content size " "in frame header"); - goto except; + return NULL; } else { result = PyBytes_FromStringAndSize(NULL, maxOutputSize); @@ -348,45 +335,39 @@ } if (!result) { - goto except; + return NULL; } Py_BEGIN_ALLOW_THREADS if (self->ddict) { - zresult = ZSTD_decompress_usingDDict(dctx, PyBytes_AsString(result), destCapacity, + zresult = ZSTD_decompress_usingDDict(self->dctx, + PyBytes_AsString(result), destCapacity, source, sourceSize, self->ddict); } else { - zresult = ZSTD_decompressDCtx(dctx, PyBytes_AsString(result), destCapacity, source, sourceSize); + zresult = ZSTD_decompressDCtx(self->dctx, + PyBytes_AsString(result), destCapacity, source, sourceSize); } Py_END_ALLOW_THREADS if (ZSTD_isError(zresult)) { PyErr_Format(ZstdError, "decompression error: %s", ZSTD_getErrorName(zresult)); - goto except; + Py_DecRef(result); + return NULL; } else if (decompressedSize && zresult != decompressedSize) { PyErr_Format(ZstdError, "decompression error: decompressed %zu bytes; expected %llu", zresult, decompressedSize); - goto except; + Py_DecRef(result); + return NULL; } else if (zresult < destCapacity) { if (_PyBytes_Resize(&result, zresult)) { - goto except; + Py_DecRef(result); + return NULL; } } - goto finally; - -except: - Py_DecRef(result); - result = NULL; - -finally: - if (dctx) { - PyMem_FREE(dctx); - } - return result; } @@ -455,8 +436,8 @@ ZstdDecompressorIterator* result; size_t skipBytes = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|kkk", kwlist, &reader, - &inSize, &outSize, &skipBytes)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|kkk:read_from", kwlist, + &reader, &inSize, &outSize, &skipBytes)) { return NULL; } @@ -534,19 +515,14 @@ goto finally; except: - if (result->reader) { - Py_DECREF(result->reader); - result->reader = NULL; - } + Py_CLEAR(result->reader); if (result->buffer) { PyBuffer_Release(result->buffer); - Py_DECREF(result->buffer); - result->buffer = NULL; + Py_CLEAR(result->buffer); } - Py_DECREF(result); - result = NULL; + Py_CLEAR(result); finally: @@ -577,7 +553,8 @@ size_t outSize = ZSTD_DStreamOutSize(); ZstdDecompressionWriter* result; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|k", kwlist, &writer, &outSize)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|k:write_to", kwlist, + &writer, &outSize)) { return NULL; } @@ -605,6 +582,200 @@ return result; } +PyDoc_STRVAR(Decompressor_decompress_content_dict_chain__doc__, +"Decompress a series of chunks using the content dictionary chaining technique\n" +); + +static PyObject* Decompressor_decompress_content_dict_chain(PyObject* self, PyObject* args, PyObject* kwargs) { + static char* kwlist[] = { + "frames", + NULL + }; + + PyObject* chunks; + Py_ssize_t chunksLen; + Py_ssize_t chunkIndex; + char parity = 0; + PyObject* chunk; + char* chunkData; + Py_ssize_t chunkSize; + ZSTD_DCtx* dctx = NULL; + size_t zresult; + ZSTD_frameParams frameParams; + void* buffer1 = NULL; + size_t buffer1Size = 0; + size_t buffer1ContentSize = 0; + void* buffer2 = NULL; + size_t buffer2Size = 0; + size_t buffer2ContentSize = 0; + void* destBuffer = NULL; + PyObject* result = NULL; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!:decompress_content_dict_chain", + kwlist, &PyList_Type, &chunks)) { + return NULL; + } + + chunksLen = PyList_Size(chunks); + if (!chunksLen) { + PyErr_SetString(PyExc_ValueError, "empty input chain"); + return NULL; + } + + /* The first chunk should not be using a dictionary. We handle it specially. */ + chunk = PyList_GetItem(chunks, 0); + if (!PyBytes_Check(chunk)) { + PyErr_SetString(PyExc_ValueError, "chunk 0 must be bytes"); + return NULL; + } + + /* We require that all chunks be zstd frames and that they have content size set. */ + PyBytes_AsStringAndSize(chunk, &chunkData, &chunkSize); + zresult = ZSTD_getFrameParams(&frameParams, (void*)chunkData, chunkSize); + if (ZSTD_isError(zresult)) { + PyErr_SetString(PyExc_ValueError, "chunk 0 is not a valid zstd frame"); + return NULL; + } + else if (zresult) { + PyErr_SetString(PyExc_ValueError, "chunk 0 is too small to contain a zstd frame"); + return NULL; + } + + if (0 == frameParams.frameContentSize) { + PyErr_SetString(PyExc_ValueError, "chunk 0 missing content size in frame"); + return NULL; + } + + dctx = ZSTD_createDCtx(); + if (!dctx) { + PyErr_NoMemory(); + goto finally; + } + + buffer1Size = frameParams.frameContentSize; + buffer1 = PyMem_Malloc(buffer1Size); + if (!buffer1) { + goto finally; + } + + Py_BEGIN_ALLOW_THREADS + zresult = ZSTD_decompressDCtx(dctx, buffer1, buffer1Size, chunkData, chunkSize); + Py_END_ALLOW_THREADS + if (ZSTD_isError(zresult)) { + PyErr_Format(ZstdError, "could not decompress chunk 0: %s", ZSTD_getErrorName(zresult)); + goto finally; + } + + buffer1ContentSize = zresult; + + /* Special case of a simple chain. */ + if (1 == chunksLen) { + result = PyBytes_FromStringAndSize(buffer1, buffer1Size); + goto finally; + } + + /* This should ideally look at next chunk. But this is slightly simpler. */ + buffer2Size = frameParams.frameContentSize; + buffer2 = PyMem_Malloc(buffer2Size); + if (!buffer2) { + goto finally; + } + + /* For each subsequent chunk, use the previous fulltext as a content dictionary. + Our strategy is to have 2 buffers. One holds the previous fulltext (to be + used as a content dictionary) and the other holds the new fulltext. The + buffers grow when needed but never decrease in size. This limits the + memory allocator overhead. + */ + for (chunkIndex = 1; chunkIndex < chunksLen; chunkIndex++) { + chunk = PyList_GetItem(chunks, chunkIndex); + if (!PyBytes_Check(chunk)) { + PyErr_Format(PyExc_ValueError, "chunk %zd must be bytes", chunkIndex); + goto finally; + } + + PyBytes_AsStringAndSize(chunk, &chunkData, &chunkSize); + zresult = ZSTD_getFrameParams(&frameParams, (void*)chunkData, chunkSize); + if (ZSTD_isError(zresult)) { + PyErr_Format(PyExc_ValueError, "chunk %zd is not a valid zstd frame", chunkIndex); + goto finally; + } + else if (zresult) { + PyErr_Format(PyExc_ValueError, "chunk %zd is too small to contain a zstd frame", chunkIndex); + goto finally; + } + + if (0 == frameParams.frameContentSize) { + PyErr_Format(PyExc_ValueError, "chunk %zd missing content size in frame", chunkIndex); + goto finally; + } + + parity = chunkIndex % 2; + + /* This could definitely be abstracted to reduce code duplication. */ + if (parity) { + /* Resize destination buffer to hold larger content. */ + if (buffer2Size < frameParams.frameContentSize) { + buffer2Size = frameParams.frameContentSize; + destBuffer = PyMem_Realloc(buffer2, buffer2Size); + if (!destBuffer) { + goto finally; + } + buffer2 = destBuffer; + } + + Py_BEGIN_ALLOW_THREADS + zresult = ZSTD_decompress_usingDict(dctx, buffer2, buffer2Size, + chunkData, chunkSize, buffer1, buffer1ContentSize); + Py_END_ALLOW_THREADS + if (ZSTD_isError(zresult)) { + PyErr_Format(ZstdError, "could not decompress chunk %zd: %s", + chunkIndex, ZSTD_getErrorName(zresult)); + goto finally; + } + buffer2ContentSize = zresult; + } + else { + if (buffer1Size < frameParams.frameContentSize) { + buffer1Size = frameParams.frameContentSize; + destBuffer = PyMem_Realloc(buffer1, buffer1Size); + if (!destBuffer) { + goto finally; + } + buffer1 = destBuffer; + } + + Py_BEGIN_ALLOW_THREADS + zresult = ZSTD_decompress_usingDict(dctx, buffer1, buffer1Size, + chunkData, chunkSize, buffer2, buffer2ContentSize); + Py_END_ALLOW_THREADS + if (ZSTD_isError(zresult)) { + PyErr_Format(ZstdError, "could not decompress chunk %zd: %s", + chunkIndex, ZSTD_getErrorName(zresult)); + goto finally; + } + buffer1ContentSize = zresult; + } + } + + result = PyBytes_FromStringAndSize(parity ? buffer2 : buffer1, + parity ? buffer2ContentSize : buffer1ContentSize); + +finally: + if (buffer2) { + PyMem_Free(buffer2); + } + if (buffer1) { + PyMem_Free(buffer1); + } + + if (dctx) { + ZSTD_freeDCtx(dctx); + } + + return result; +} + static PyMethodDef Decompressor_methods[] = { { "copy_stream", (PyCFunction)Decompressor_copy_stream, METH_VARARGS | METH_KEYWORDS, Decompressor_copy_stream__doc__ }, @@ -616,6 +787,8 @@ Decompressor_read_from__doc__ }, { "write_to", (PyCFunction)Decompressor_write_to, METH_VARARGS | METH_KEYWORDS, Decompressor_write_to__doc__ }, + { "decompress_content_dict_chain", (PyCFunction)Decompressor_decompress_content_dict_chain, + METH_VARARGS | METH_KEYWORDS, Decompressor_decompress_content_dict_chain__doc__ }, { NULL, NULL } };
--- a/contrib/python-zstandard/c-ext/dictparams.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/dictparams.c Sat Mar 11 13:53:14 2017 -0500 @@ -18,8 +18,8 @@ unsigned notificationLevel; unsigned dictID; - if (!PyArg_ParseTuple(args, "IiII", &selectivityLevel, &compressionLevel, - ¬ificationLevel, &dictID)) { + if (!PyArg_ParseTuple(args, "IiII:DictParameters", + &selectivityLevel, &compressionLevel, ¬ificationLevel, &dictID)) { return NULL; } @@ -40,6 +40,22 @@ PyObject_Del(self); } +static PyMemberDef DictParameters_members[] = { + { "selectivity_level", T_UINT, + offsetof(DictParametersObject, selectivityLevel), READONLY, + "selectivity level" }, + { "compression_level", T_INT, + offsetof(DictParametersObject, compressionLevel), READONLY, + "compression level" }, + { "notification_level", T_UINT, + offsetof(DictParametersObject, notificationLevel), READONLY, + "notification level" }, + { "dict_id", T_UINT, + offsetof(DictParametersObject, dictID), READONLY, + "dictionary ID" }, + { NULL } +}; + static Py_ssize_t DictParameters_length(PyObject* self) { return 4; } @@ -102,7 +118,7 @@ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ - 0, /* tp_members */ + DictParameters_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/python-zstandard/c-ext/frameparams.c Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,132 @@ +/** +* Copyright (c) 2017-present, Gregory Szorc +* All rights reserved. +* +* This software may be modified and distributed under the terms +* of the BSD license. See the LICENSE file for details. +*/ + +#include "python-zstandard.h" + +extern PyObject* ZstdError; + +PyDoc_STRVAR(FrameParameters__doc__, + "FrameParameters: information about a zstd frame"); + +FrameParametersObject* get_frame_parameters(PyObject* self, PyObject* args) { + const char* source; + Py_ssize_t sourceSize; + ZSTD_frameParams params; + FrameParametersObject* result = NULL; + size_t zresult; + +#if PY_MAJOR_VERSION >= 3 + if (!PyArg_ParseTuple(args, "y#:get_frame_parameters", +#else + if (!PyArg_ParseTuple(args, "s#:get_frame_parameters", +#endif + &source, &sourceSize)) { + return NULL; + } + + /* Needed for Python 2 to reject unicode */ + if (!PyBytes_Check(PyTuple_GET_ITEM(args, 0))) { + PyErr_SetString(PyExc_TypeError, "argument must be bytes"); + return NULL; + } + + zresult = ZSTD_getFrameParams(¶ms, (void*)source, sourceSize); + + if (ZSTD_isError(zresult)) { + PyErr_Format(ZstdError, "cannot get frame parameters: %s", ZSTD_getErrorName(zresult)); + return NULL; + } + + if (zresult) { + PyErr_Format(ZstdError, "not enough data for frame parameters; need %zu bytes", zresult); + return NULL; + } + + result = PyObject_New(FrameParametersObject, &FrameParametersType); + if (!result) { + return NULL; + } + + result->frameContentSize = params.frameContentSize; + result->windowSize = params.windowSize; + result->dictID = params.dictID; + result->checksumFlag = params.checksumFlag ? 1 : 0; + + return result; +} + +static void FrameParameters_dealloc(PyObject* self) { + PyObject_Del(self); +} + +static PyMemberDef FrameParameters_members[] = { + { "content_size", T_ULONGLONG, + offsetof(FrameParametersObject, frameContentSize), READONLY, + "frame content size" }, + { "window_size", T_UINT, + offsetof(FrameParametersObject, windowSize), READONLY, + "window size" }, + { "dict_id", T_UINT, + offsetof(FrameParametersObject, dictID), READONLY, + "dictionary ID" }, + { "has_checksum", T_BOOL, + offsetof(FrameParametersObject, checksumFlag), READONLY, + "checksum flag" }, + { NULL } +}; + +PyTypeObject FrameParametersType = { + PyVarObject_HEAD_INIT(NULL, 0) + "FrameParameters", /* tp_name */ + sizeof(FrameParametersObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)FrameParameters_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + FrameParameters__doc__, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + FrameParameters_members, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ +}; + +void frameparams_module_init(PyObject* mod) { + Py_TYPE(&FrameParametersType) = &PyType_Type; + if (PyType_Ready(&FrameParametersType) < 0) { + return; + } + + Py_IncRef((PyObject*)&FrameParametersType); + PyModule_AddObject(mod, "FrameParameters", (PyObject*)&FrameParametersType); +}
--- a/contrib/python-zstandard/c-ext/python-zstandard.h Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/c-ext/python-zstandard.h Sat Mar 11 13:53:14 2017 -0500 @@ -8,6 +8,7 @@ #define PY_SSIZE_T_CLEAN #include <Python.h> +#include "structmember.h" #define ZSTD_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY @@ -15,7 +16,7 @@ #include "zstd.h" #include "zdict.h" -#define PYTHON_ZSTANDARD_VERSION "0.6.0" +#define PYTHON_ZSTANDARD_VERSION "0.7.0" typedef enum { compressorobj_flush_finish, @@ -37,6 +38,16 @@ typedef struct { PyObject_HEAD + unsigned long long frameContentSize; + unsigned windowSize; + unsigned dictID; + char checksumFlag; +} FrameParametersObject; + +extern PyTypeObject FrameParametersType; + +typedef struct { + PyObject_HEAD unsigned selectivityLevel; int compressionLevel; unsigned notificationLevel; @@ -115,7 +126,7 @@ typedef struct { PyObject_HEAD - ZSTD_DCtx* refdctx; + ZSTD_DCtx* dctx; ZstdCompressionDict* dict; ZSTD_DDict* ddict; @@ -172,6 +183,7 @@ void ztopy_compression_parameters(CompressionParametersObject* params, ZSTD_compressionParameters* zparams); CompressionParametersObject* get_compression_parameters(PyObject* self, PyObject* args); +FrameParametersObject* get_frame_parameters(PyObject* self, PyObject* args); PyObject* estimate_compression_context_size(PyObject* self, PyObject* args); ZSTD_CStream* CStream_from_ZstdCompressor(ZstdCompressor* compressor, Py_ssize_t sourceSize); ZSTD_DStream* DStream_from_ZstdDecompressor(ZstdDecompressor* decompressor);
--- a/contrib/python-zstandard/make_cffi.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/make_cffi.py Sat Mar 11 13:53:14 2017 -0500 @@ -9,6 +9,7 @@ import cffi import distutils.ccompiler import os +import re import subprocess import tempfile @@ -19,6 +20,8 @@ 'common/entropy_common.c', 'common/error_private.c', 'common/fse_decompress.c', + 'common/pool.c', + 'common/threading.c', 'common/xxhash.c', 'common/zstd_common.c', 'compress/fse_compress.c', @@ -26,10 +29,17 @@ 'compress/zstd_compress.c', 'decompress/huf_decompress.c', 'decompress/zstd_decompress.c', + 'dictBuilder/cover.c', 'dictBuilder/divsufsort.c', 'dictBuilder/zdict.c', )] +HEADERS = [os.path.join(HERE, 'zstd', *p) for p in ( + ('zstd.h',), + ('common', 'pool.h'), + ('dictBuilder', 'zdict.h'), +)] + INCLUDE_DIRS = [os.path.join(HERE, d) for d in ( 'zstd', 'zstd/common', @@ -53,56 +63,92 @@ args.extend([ '-E', '-DZSTD_STATIC_LINKING_ONLY', + '-DZDICT_STATIC_LINKING_ONLY', ]) elif compiler.compiler_type == 'msvc': args = [compiler.cc] args.extend([ '/EP', '/DZSTD_STATIC_LINKING_ONLY', + '/DZDICT_STATIC_LINKING_ONLY', ]) else: raise Exception('unsupported compiler type: %s' % compiler.compiler_type) -# zstd.h includes <stddef.h>, which is also included by cffi's boilerplate. -# This can lead to duplicate declarations. So we strip this include from the -# preprocessor invocation. +def preprocess(path): + # zstd.h includes <stddef.h>, which is also included by cffi's boilerplate. + # This can lead to duplicate declarations. So we strip this include from the + # preprocessor invocation. + with open(path, 'rb') as fh: + lines = [l for l in fh if not l.startswith(b'#include <stddef.h>')] -with open(os.path.join(HERE, 'zstd', 'zstd.h'), 'rb') as fh: - lines = [l for l in fh if not l.startswith(b'#include <stddef.h>')] - -fd, input_file = tempfile.mkstemp(suffix='.h') -os.write(fd, b''.join(lines)) -os.close(fd) + fd, input_file = tempfile.mkstemp(suffix='.h') + os.write(fd, b''.join(lines)) + os.close(fd) -args.append(input_file) + try: + process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE) + output = process.communicate()[0] + ret = process.poll() + if ret: + raise Exception('preprocessor exited with error') -try: - process = subprocess.Popen(args, stdout=subprocess.PIPE) - output = process.communicate()[0] - ret = process.poll() - if ret: - raise Exception('preprocessor exited with error') -finally: - os.unlink(input_file) + return output + finally: + os.unlink(input_file) -def normalize_output(): + +def normalize_output(output): lines = [] for line in output.splitlines(): # CFFI's parser doesn't like __attribute__ on UNIX compilers. if line.startswith(b'__attribute__ ((visibility ("default"))) '): line = line[len(b'__attribute__ ((visibility ("default"))) '):] + if line.startswith(b'__attribute__((deprecated('): + continue + elif b'__declspec(deprecated(' in line: + continue + lines.append(line) return b'\n'.join(lines) + ffi = cffi.FFI() ffi.set_source('_zstd_cffi', ''' +#include "mem.h" #define ZSTD_STATIC_LINKING_ONLY #include "zstd.h" +#define ZDICT_STATIC_LINKING_ONLY +#include "pool.h" +#include "zdict.h" ''', sources=SOURCES, include_dirs=INCLUDE_DIRS) -ffi.cdef(normalize_output().decode('latin1')) +DEFINE = re.compile(b'^\\#define ([a-zA-Z0-9_]+) ') + +sources = [] + +for header in HEADERS: + preprocessed = preprocess(header) + sources.append(normalize_output(preprocessed)) + + # Do another pass over source and find constants that were preprocessed + # away. + with open(header, 'rb') as fh: + for line in fh: + line = line.strip() + m = DEFINE.match(line) + if not m: + continue + + # The parser doesn't like some constants with complex values. + if m.group(1) in (b'ZSTD_LIB_VERSION', b'ZSTD_VERSION_STRING'): + continue + + sources.append(m.group(0) + b' ...') + +ffi.cdef(u'\n'.join(s.decode('latin1') for s in sources)) if __name__ == '__main__': ffi.compile()
--- a/contrib/python-zstandard/setup.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/setup.py Sat Mar 11 13:53:14 2017 -0500 @@ -62,6 +62,7 @@ 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', ], keywords='zstandard zstd compression', ext_modules=extensions,
--- a/contrib/python-zstandard/setup_zstd.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/setup_zstd.py Sat Mar 11 13:53:14 2017 -0500 @@ -12,6 +12,8 @@ 'common/entropy_common.c', 'common/error_private.c', 'common/fse_decompress.c', + 'common/pool.c', + 'common/threading.c', 'common/xxhash.c', 'common/zstd_common.c', 'compress/fse_compress.c', @@ -19,11 +21,13 @@ 'compress/zstd_compress.c', 'decompress/huf_decompress.c', 'decompress/zstd_decompress.c', + 'dictBuilder/cover.c', 'dictBuilder/divsufsort.c', 'dictBuilder/zdict.c', )] zstd_sources_legacy = ['zstd/%s' % p for p in ( + 'deprecated/zbuff_common.c', 'deprecated/zbuff_compress.c', 'deprecated/zbuff_decompress.c', 'legacy/zstd_v01.c', @@ -63,6 +67,7 @@ 'c-ext/decompressoriterator.c', 'c-ext/decompressionwriter.c', 'c-ext/dictparams.c', + 'c-ext/frameparams.c', ] zstd_depends = [
--- a/contrib/python-zstandard/tests/common.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/tests/common.py Sat Mar 11 13:53:14 2017 -0500 @@ -1,4 +1,50 @@ +import inspect import io +import types + + +def make_cffi(cls): + """Decorator to add CFFI versions of each test method.""" + + try: + import zstd_cffi + except ImportError: + return cls + + # If CFFI version is available, dynamically construct test methods + # that use it. + + for attr in dir(cls): + fn = getattr(cls, attr) + if not inspect.ismethod(fn) and not inspect.isfunction(fn): + continue + + if not fn.__name__.startswith('test_'): + continue + + name = '%s_cffi' % fn.__name__ + + # Replace the "zstd" symbol with the CFFI module instance. Then copy + # the function object and install it in a new attribute. + if isinstance(fn, types.FunctionType): + globs = dict(fn.__globals__) + globs['zstd'] = zstd_cffi + new_fn = types.FunctionType(fn.__code__, globs, name, + fn.__defaults__, fn.__closure__) + new_method = new_fn + else: + globs = dict(fn.__func__.func_globals) + globs['zstd'] = zstd_cffi + new_fn = types.FunctionType(fn.__func__.func_code, globs, name, + fn.__func__.func_defaults, + fn.__func__.func_closure) + new_method = types.UnboundMethodType(new_fn, fn.im_self, + fn.im_class) + + setattr(cls, name, new_method) + + return cls + class OpCountingBytesIO(io.BytesIO): def __init__(self, *args, **kwargs):
--- a/contrib/python-zstandard/tests/test_cffi.py Tue Mar 07 13:24:24 2017 -0500 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,35 +0,0 @@ -import io - -try: - import unittest2 as unittest -except ImportError: - import unittest - -import zstd - -try: - import zstd_cffi -except ImportError: - raise unittest.SkipTest('cffi version of zstd not available') - - -class TestCFFIWriteToToCDecompressor(unittest.TestCase): - def test_simple(self): - orig = io.BytesIO() - orig.write(b'foo') - orig.write(b'bar') - orig.write(b'foobar' * 16384) - - dest = io.BytesIO() - cctx = zstd_cffi.ZstdCompressor() - with cctx.write_to(dest) as compressor: - compressor.write(orig.getvalue()) - - uncompressed = io.BytesIO() - dctx = zstd.ZstdDecompressor() - with dctx.write_to(uncompressed) as decompressor: - decompressor.write(dest.getvalue()) - - self.assertEqual(uncompressed.getvalue(), orig.getvalue()) - -
--- a/contrib/python-zstandard/tests/test_compressor.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/tests/test_compressor.py Sat Mar 11 13:53:14 2017 -0500 @@ -10,7 +10,10 @@ import zstd -from .common import OpCountingBytesIO +from .common import ( + make_cffi, + OpCountingBytesIO, +) if sys.version_info[0] >= 3: @@ -19,6 +22,7 @@ next = lambda it: it.next() +@make_cffi class TestCompressor(unittest.TestCase): def test_level_bounds(self): with self.assertRaises(ValueError): @@ -28,18 +32,17 @@ zstd.ZstdCompressor(level=23) +@make_cffi class TestCompressor_compress(unittest.TestCase): def test_compress_empty(self): cctx = zstd.ZstdCompressor(level=1) - cctx.compress(b'') - - cctx = zstd.ZstdCompressor(level=22) - cctx.compress(b'') - - def test_compress_empty(self): - cctx = zstd.ZstdCompressor(level=1) - self.assertEqual(cctx.compress(b''), - b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00') + result = cctx.compress(b'') + self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00') + params = zstd.get_frame_parameters(result) + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 524288) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum, 0) # TODO should be temporary until https://github.com/facebook/zstd/issues/506 # is fixed. @@ -59,6 +62,13 @@ self.assertEqual(len(result), 999) self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd') + # This matches the test for read_from() below. + cctx = zstd.ZstdCompressor(level=1) + result = cctx.compress(b'f' * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b'o') + self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00' + b'\x10\x66\x66\x01\x00\xfb\xff\x39\xc0' + b'\x02\x09\x00\x00\x6f') + def test_write_checksum(self): cctx = zstd.ZstdCompressor(level=1) no_checksum = cctx.compress(b'foobar') @@ -67,6 +77,12 @@ self.assertEqual(len(with_checksum), len(no_checksum) + 4) + no_params = zstd.get_frame_parameters(no_checksum) + with_params = zstd.get_frame_parameters(with_checksum) + + self.assertFalse(no_params.has_checksum) + self.assertTrue(with_params.has_checksum) + def test_write_content_size(self): cctx = zstd.ZstdCompressor(level=1) no_size = cctx.compress(b'foobar' * 256) @@ -75,6 +91,11 @@ self.assertEqual(len(with_size), len(no_size) + 1) + no_params = zstd.get_frame_parameters(no_size) + with_params = zstd.get_frame_parameters(with_size) + self.assertEqual(no_params.content_size, 0) + self.assertEqual(with_params.content_size, 1536) + def test_no_dict_id(self): samples = [] for i in range(128): @@ -92,6 +113,11 @@ self.assertEqual(len(with_dict_id), len(no_dict_id) + 4) + no_params = zstd.get_frame_parameters(no_dict_id) + with_params = zstd.get_frame_parameters(with_dict_id) + self.assertEqual(no_params.dict_id, 0) + self.assertEqual(with_params.dict_id, 1584102229) + def test_compress_dict_multiple(self): samples = [] for i in range(128): @@ -107,6 +133,7 @@ cctx.compress(b'foo bar foobar foo bar foobar') +@make_cffi class TestCompressor_compressobj(unittest.TestCase): def test_compressobj_empty(self): cctx = zstd.ZstdCompressor(level=1) @@ -127,6 +154,12 @@ self.assertEqual(len(result), 999) self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd') + params = zstd.get_frame_parameters(result) + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 1048576) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum) + def test_write_checksum(self): cctx = zstd.ZstdCompressor(level=1) cobj = cctx.compressobj() @@ -135,6 +168,15 @@ cobj = cctx.compressobj() with_checksum = cobj.compress(b'foobar') + cobj.flush() + no_params = zstd.get_frame_parameters(no_checksum) + with_params = zstd.get_frame_parameters(with_checksum) + self.assertEqual(no_params.content_size, 0) + self.assertEqual(with_params.content_size, 0) + self.assertEqual(no_params.dict_id, 0) + self.assertEqual(with_params.dict_id, 0) + self.assertFalse(no_params.has_checksum) + self.assertTrue(with_params.has_checksum) + self.assertEqual(len(with_checksum), len(no_checksum) + 4) def test_write_content_size(self): @@ -145,6 +187,15 @@ cobj = cctx.compressobj(size=len(b'foobar' * 256)) with_size = cobj.compress(b'foobar' * 256) + cobj.flush() + no_params = zstd.get_frame_parameters(no_size) + with_params = zstd.get_frame_parameters(with_size) + self.assertEqual(no_params.content_size, 0) + self.assertEqual(with_params.content_size, 1536) + self.assertEqual(no_params.dict_id, 0) + self.assertEqual(with_params.dict_id, 0) + self.assertFalse(no_params.has_checksum) + self.assertFalse(with_params.has_checksum) + self.assertEqual(len(with_size), len(no_size) + 1) def test_compress_after_finished(self): @@ -187,6 +238,7 @@ self.assertEqual(header, b'\x01\x00\x00') +@make_cffi class TestCompressor_copy_stream(unittest.TestCase): def test_no_read(self): source = object() @@ -229,6 +281,12 @@ self.assertEqual(r, 255 * 16384) self.assertEqual(w, 999) + params = zstd.get_frame_parameters(dest.getvalue()) + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 1048576) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum) + def test_write_checksum(self): source = io.BytesIO(b'foobar') no_checksum = io.BytesIO() @@ -244,6 +302,15 @@ self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4) + no_params = zstd.get_frame_parameters(no_checksum.getvalue()) + with_params = zstd.get_frame_parameters(with_checksum.getvalue()) + self.assertEqual(no_params.content_size, 0) + self.assertEqual(with_params.content_size, 0) + self.assertEqual(no_params.dict_id, 0) + self.assertEqual(with_params.dict_id, 0) + self.assertFalse(no_params.has_checksum) + self.assertTrue(with_params.has_checksum) + def test_write_content_size(self): source = io.BytesIO(b'foobar' * 256) no_size = io.BytesIO() @@ -268,6 +335,15 @@ self.assertEqual(len(with_size.getvalue()), len(no_size.getvalue()) + 1) + no_params = zstd.get_frame_parameters(no_size.getvalue()) + with_params = zstd.get_frame_parameters(with_size.getvalue()) + self.assertEqual(no_params.content_size, 0) + self.assertEqual(with_params.content_size, 1536) + self.assertEqual(no_params.dict_id, 0) + self.assertEqual(with_params.dict_id, 0) + self.assertFalse(no_params.has_checksum) + self.assertFalse(with_params.has_checksum) + def test_read_write_size(self): source = OpCountingBytesIO(b'foobarfoobar') dest = OpCountingBytesIO() @@ -288,18 +364,25 @@ return buffer.getvalue() +@make_cffi class TestCompressor_write_to(unittest.TestCase): def test_empty(self): - self.assertEqual(compress(b'', 1), - b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00') + result = compress(b'', 1) + self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00') + + params = zstd.get_frame_parameters(result) + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 524288) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum) def test_multiple_compress(self): buffer = io.BytesIO() cctx = zstd.ZstdCompressor(level=5) with cctx.write_to(buffer) as compressor: - compressor.write(b'foo') - compressor.write(b'bar') - compressor.write(b'x' * 8192) + self.assertEqual(compressor.write(b'foo'), 0) + self.assertEqual(compressor.write(b'bar'), 0) + self.assertEqual(compressor.write(b'x' * 8192), 0) result = buffer.getvalue() self.assertEqual(result, @@ -318,11 +401,23 @@ buffer = io.BytesIO() cctx = zstd.ZstdCompressor(level=9, dict_data=d) with cctx.write_to(buffer) as compressor: - compressor.write(b'foo') - compressor.write(b'bar') - compressor.write(b'foo' * 16384) + self.assertEqual(compressor.write(b'foo'), 0) + self.assertEqual(compressor.write(b'bar'), 0) + self.assertEqual(compressor.write(b'foo' * 16384), 634) compressed = buffer.getvalue() + + params = zstd.get_frame_parameters(compressed) + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 1024) + self.assertEqual(params.dict_id, d.dict_id()) + self.assertFalse(params.has_checksum) + + self.assertEqual(compressed[0:32], + b'\x28\xb5\x2f\xfd\x03\x00\x55\x7b\x6b\x5e\x54\x00' + b'\x00\x00\x02\xfc\xf4\xa5\xba\x23\x3f\x85\xb3\x54' + b'\x00\x00\x18\x6f\x6f\x66\x01\x00') + h = hashlib.sha1(compressed).hexdigest() self.assertEqual(h, '1c5bcd25181bcd8c1a73ea8773323e0056129f92') @@ -332,11 +427,18 @@ buffer = io.BytesIO() cctx = zstd.ZstdCompressor(compression_params=params) with cctx.write_to(buffer) as compressor: - compressor.write(b'foo') - compressor.write(b'bar') - compressor.write(b'foobar' * 16384) + self.assertEqual(compressor.write(b'foo'), 0) + self.assertEqual(compressor.write(b'bar'), 0) + self.assertEqual(compressor.write(b'foobar' * 16384), 0) compressed = buffer.getvalue() + + params = zstd.get_frame_parameters(compressed) + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 1048576) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum) + h = hashlib.sha1(compressed).hexdigest() self.assertEqual(h, '1ae31f270ed7de14235221a604b31ecd517ebd99') @@ -344,12 +446,21 @@ no_checksum = io.BytesIO() cctx = zstd.ZstdCompressor(level=1) with cctx.write_to(no_checksum) as compressor: - compressor.write(b'foobar') + self.assertEqual(compressor.write(b'foobar'), 0) with_checksum = io.BytesIO() cctx = zstd.ZstdCompressor(level=1, write_checksum=True) with cctx.write_to(with_checksum) as compressor: - compressor.write(b'foobar') + self.assertEqual(compressor.write(b'foobar'), 0) + + no_params = zstd.get_frame_parameters(no_checksum.getvalue()) + with_params = zstd.get_frame_parameters(with_checksum.getvalue()) + self.assertEqual(no_params.content_size, 0) + self.assertEqual(with_params.content_size, 0) + self.assertEqual(no_params.dict_id, 0) + self.assertEqual(with_params.dict_id, 0) + self.assertFalse(no_params.has_checksum) + self.assertTrue(with_params.has_checksum) self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4) @@ -358,12 +469,12 @@ no_size = io.BytesIO() cctx = zstd.ZstdCompressor(level=1) with cctx.write_to(no_size) as compressor: - compressor.write(b'foobar' * 256) + self.assertEqual(compressor.write(b'foobar' * 256), 0) with_size = io.BytesIO() cctx = zstd.ZstdCompressor(level=1, write_content_size=True) with cctx.write_to(with_size) as compressor: - compressor.write(b'foobar' * 256) + self.assertEqual(compressor.write(b'foobar' * 256), 0) # Source size is not known in streaming mode, so header not # written. @@ -373,7 +484,16 @@ # Declaring size will write the header. with_size = io.BytesIO() with cctx.write_to(with_size, size=len(b'foobar' * 256)) as compressor: - compressor.write(b'foobar' * 256) + self.assertEqual(compressor.write(b'foobar' * 256), 0) + + no_params = zstd.get_frame_parameters(no_size.getvalue()) + with_params = zstd.get_frame_parameters(with_size.getvalue()) + self.assertEqual(no_params.content_size, 0) + self.assertEqual(with_params.content_size, 1536) + self.assertEqual(no_params.dict_id, 0) + self.assertEqual(with_params.dict_id, 0) + self.assertFalse(no_params.has_checksum) + self.assertFalse(with_params.has_checksum) self.assertEqual(len(with_size.getvalue()), len(no_size.getvalue()) + 1) @@ -390,12 +510,21 @@ with_dict_id = io.BytesIO() cctx = zstd.ZstdCompressor(level=1, dict_data=d) with cctx.write_to(with_dict_id) as compressor: - compressor.write(b'foobarfoobar') + self.assertEqual(compressor.write(b'foobarfoobar'), 0) cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False) no_dict_id = io.BytesIO() with cctx.write_to(no_dict_id) as compressor: - compressor.write(b'foobarfoobar') + self.assertEqual(compressor.write(b'foobarfoobar'), 0) + + no_params = zstd.get_frame_parameters(no_dict_id.getvalue()) + with_params = zstd.get_frame_parameters(with_dict_id.getvalue()) + self.assertEqual(no_params.content_size, 0) + self.assertEqual(with_params.content_size, 0) + self.assertEqual(no_params.dict_id, 0) + self.assertEqual(with_params.dict_id, d.dict_id()) + self.assertFalse(no_params.has_checksum) + self.assertFalse(with_params.has_checksum) self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4) @@ -412,9 +541,9 @@ cctx = zstd.ZstdCompressor(level=3) dest = OpCountingBytesIO() with cctx.write_to(dest, write_size=1) as compressor: - compressor.write(b'foo') - compressor.write(b'bar') - compressor.write(b'foobar') + self.assertEqual(compressor.write(b'foo'), 0) + self.assertEqual(compressor.write(b'bar'), 0) + self.assertEqual(compressor.write(b'foobar'), 0) self.assertEqual(len(dest.getvalue()), dest._write_count) @@ -422,15 +551,15 @@ cctx = zstd.ZstdCompressor(level=3) dest = OpCountingBytesIO() with cctx.write_to(dest) as compressor: - compressor.write(b'foo') + self.assertEqual(compressor.write(b'foo'), 0) self.assertEqual(dest._write_count, 0) - compressor.flush() + self.assertEqual(compressor.flush(), 12) self.assertEqual(dest._write_count, 1) - compressor.write(b'bar') + self.assertEqual(compressor.write(b'bar'), 0) self.assertEqual(dest._write_count, 1) - compressor.flush() + self.assertEqual(compressor.flush(), 6) self.assertEqual(dest._write_count, 2) - compressor.write(b'baz') + self.assertEqual(compressor.write(b'baz'), 0) self.assertEqual(dest._write_count, 3) @@ -438,10 +567,10 @@ cctx = zstd.ZstdCompressor(level=3, write_checksum=True) dest = OpCountingBytesIO() with cctx.write_to(dest) as compressor: - compressor.write(b'foobar' * 8192) + self.assertEqual(compressor.write(b'foobar' * 8192), 0) count = dest._write_count offset = dest.tell() - compressor.flush() + self.assertEqual(compressor.flush(), 23) self.assertGreater(dest._write_count, count) self.assertGreater(dest.tell(), offset) offset = dest.tell() @@ -456,18 +585,22 @@ self.assertEqual(header, b'\x01\x00\x00') +@make_cffi class TestCompressor_read_from(unittest.TestCase): def test_type_validation(self): cctx = zstd.ZstdCompressor() # Object with read() works. - cctx.read_from(io.BytesIO()) + for chunk in cctx.read_from(io.BytesIO()): + pass # Buffer protocol works. - cctx.read_from(b'foobar') + for chunk in cctx.read_from(b'foobar'): + pass with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'): - cctx.read_from(True) + for chunk in cctx.read_from(True): + pass def test_read_empty(self): cctx = zstd.ZstdCompressor(level=1) @@ -521,6 +654,12 @@ # We should get the same output as the one-shot compression mechanism. self.assertEqual(b''.join(chunks), cctx.compress(source.getvalue())) + params = zstd.get_frame_parameters(b''.join(chunks)) + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 262144) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum) + # Now check the buffer protocol. it = cctx.read_from(source.getvalue()) chunks = list(it)
--- a/contrib/python-zstandard/tests/test_data_structures.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/tests/test_data_structures.py Sat Mar 11 13:53:14 2017 -0500 @@ -13,6 +13,12 @@ import zstd +from . common import ( + make_cffi, +) + + +@make_cffi class TestCompressionParameters(unittest.TestCase): def test_init_bad_arg_type(self): with self.assertRaises(TypeError): @@ -42,7 +48,81 @@ p = zstd.get_compression_parameters(1) self.assertIsInstance(p, zstd.CompressionParameters) - self.assertEqual(p[0], 19) + self.assertEqual(p.window_log, 19) + + def test_members(self): + p = zstd.CompressionParameters(10, 6, 7, 4, 5, 8, 1) + self.assertEqual(p.window_log, 10) + self.assertEqual(p.chain_log, 6) + self.assertEqual(p.hash_log, 7) + self.assertEqual(p.search_log, 4) + self.assertEqual(p.search_length, 5) + self.assertEqual(p.target_length, 8) + self.assertEqual(p.strategy, 1) + + +@make_cffi +class TestFrameParameters(unittest.TestCase): + def test_invalid_type(self): + with self.assertRaises(TypeError): + zstd.get_frame_parameters(None) + + with self.assertRaises(TypeError): + zstd.get_frame_parameters(u'foobarbaz') + + def test_invalid_input_sizes(self): + with self.assertRaisesRegexp(zstd.ZstdError, 'not enough data for frame'): + zstd.get_frame_parameters(b'') + + with self.assertRaisesRegexp(zstd.ZstdError, 'not enough data for frame'): + zstd.get_frame_parameters(zstd.FRAME_HEADER) + + def test_invalid_frame(self): + with self.assertRaisesRegexp(zstd.ZstdError, 'Unknown frame descriptor'): + zstd.get_frame_parameters(b'foobarbaz') + + def test_attributes(self): + params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x00\x00') + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 1024) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum) + + # Lowest 2 bits indicate a dictionary and length. Here, the dict id is 1 byte. + params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x01\x00\xff') + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 1024) + self.assertEqual(params.dict_id, 255) + self.assertFalse(params.has_checksum) + + # Lowest 3rd bit indicates if checksum is present. + params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x04\x00') + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 1024) + self.assertEqual(params.dict_id, 0) + self.assertTrue(params.has_checksum) + + # Upper 2 bits indicate content size. + params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x40\x00\xff\x00') + self.assertEqual(params.content_size, 511) + self.assertEqual(params.window_size, 1024) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum) + + # Window descriptor is 2nd byte after frame header. + params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x00\x40') + self.assertEqual(params.content_size, 0) + self.assertEqual(params.window_size, 262144) + self.assertEqual(params.dict_id, 0) + self.assertFalse(params.has_checksum) + + # Set multiple things. + params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x45\x40\x0f\x10\x00') + self.assertEqual(params.content_size, 272) + self.assertEqual(params.window_size, 262144) + self.assertEqual(params.dict_id, 15) + self.assertTrue(params.has_checksum) + if hypothesis: s_windowlog = strategies.integers(min_value=zstd.WINDOWLOG_MIN, @@ -65,6 +145,8 @@ zstd.STRATEGY_BTLAZY2, zstd.STRATEGY_BTOPT)) + + @make_cffi class TestCompressionParametersHypothesis(unittest.TestCase): @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog, s_searchlength, s_targetlength, s_strategy) @@ -73,9 +155,6 @@ p = zstd.CompressionParameters(windowlog, chainlog, hashlog, searchlog, searchlength, targetlength, strategy) - self.assertEqual(tuple(p), - (windowlog, chainlog, hashlog, searchlog, - searchlength, targetlength, strategy)) # Verify we can instantiate a compressor with the supplied values. # ZSTD_checkCParams moves the goal posts on us from what's advertised
--- a/contrib/python-zstandard/tests/test_decompressor.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/tests/test_decompressor.py Sat Mar 11 13:53:14 2017 -0500 @@ -10,7 +10,10 @@ import zstd -from .common import OpCountingBytesIO +from .common import ( + make_cffi, + OpCountingBytesIO, +) if sys.version_info[0] >= 3: @@ -19,6 +22,7 @@ next = lambda it: it.next() +@make_cffi class TestDecompressor_decompress(unittest.TestCase): def test_empty_input(self): dctx = zstd.ZstdDecompressor() @@ -119,6 +123,7 @@ self.assertEqual(decompressed, sources[i]) +@make_cffi class TestDecompressor_copy_stream(unittest.TestCase): def test_no_read(self): source = object() @@ -180,6 +185,7 @@ self.assertEqual(dest._write_count, len(dest.getvalue())) +@make_cffi class TestDecompressor_decompressobj(unittest.TestCase): def test_simple(self): data = zstd.ZstdCompressor(level=1).compress(b'foobar') @@ -207,6 +213,7 @@ return buffer.getvalue() +@make_cffi class TestDecompressor_write_to(unittest.TestCase): def test_empty_roundtrip(self): cctx = zstd.ZstdCompressor() @@ -256,14 +263,14 @@ buffer = io.BytesIO() cctx = zstd.ZstdCompressor(dict_data=d) with cctx.write_to(buffer) as compressor: - compressor.write(orig) + self.assertEqual(compressor.write(orig), 1544) compressed = buffer.getvalue() buffer = io.BytesIO() dctx = zstd.ZstdDecompressor(dict_data=d) with dctx.write_to(buffer) as decompressor: - decompressor.write(compressed) + self.assertEqual(decompressor.write(compressed), len(orig)) self.assertEqual(buffer.getvalue(), orig) @@ -291,6 +298,7 @@ self.assertEqual(dest._write_count, len(dest.getvalue())) +@make_cffi class TestDecompressor_read_from(unittest.TestCase): def test_type_validation(self): dctx = zstd.ZstdDecompressor() @@ -302,7 +310,7 @@ dctx.read_from(b'foobar') with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'): - dctx.read_from(True) + b''.join(dctx.read_from(True)) def test_empty_input(self): dctx = zstd.ZstdDecompressor() @@ -351,7 +359,7 @@ dctx = zstd.ZstdDecompressor() with self.assertRaisesRegexp(ValueError, 'skip_bytes must be smaller than read_size'): - dctx.read_from(b'', skip_bytes=1, read_size=1) + b''.join(dctx.read_from(b'', skip_bytes=1, read_size=1)) with self.assertRaisesRegexp(ValueError, 'skip_bytes larger than first input chunk'): b''.join(dctx.read_from(b'foobar', skip_bytes=10)) @@ -476,3 +484,94 @@ self.assertEqual(len(chunk), 1) self.assertEqual(source._read_count, len(source.getvalue())) + + +@make_cffi +class TestDecompressor_content_dict_chain(unittest.TestCase): + def test_bad_inputs_simple(self): + dctx = zstd.ZstdDecompressor() + + with self.assertRaises(TypeError): + dctx.decompress_content_dict_chain(b'foo') + + with self.assertRaises(TypeError): + dctx.decompress_content_dict_chain((b'foo', b'bar')) + + with self.assertRaisesRegexp(ValueError, 'empty input chain'): + dctx.decompress_content_dict_chain([]) + + with self.assertRaisesRegexp(ValueError, 'chunk 0 must be bytes'): + dctx.decompress_content_dict_chain([u'foo']) + + with self.assertRaisesRegexp(ValueError, 'chunk 0 must be bytes'): + dctx.decompress_content_dict_chain([True]) + + with self.assertRaisesRegexp(ValueError, 'chunk 0 is too small to contain a zstd frame'): + dctx.decompress_content_dict_chain([zstd.FRAME_HEADER]) + + with self.assertRaisesRegexp(ValueError, 'chunk 0 is not a valid zstd frame'): + dctx.decompress_content_dict_chain([b'foo' * 8]) + + no_size = zstd.ZstdCompressor().compress(b'foo' * 64) + + with self.assertRaisesRegexp(ValueError, 'chunk 0 missing content size in frame'): + dctx.decompress_content_dict_chain([no_size]) + + # Corrupt first frame. + frame = zstd.ZstdCompressor(write_content_size=True).compress(b'foo' * 64) + frame = frame[0:12] + frame[15:] + with self.assertRaisesRegexp(zstd.ZstdError, 'could not decompress chunk 0'): + dctx.decompress_content_dict_chain([frame]) + + def test_bad_subsequent_input(self): + initial = zstd.ZstdCompressor(write_content_size=True).compress(b'foo' * 64) + + dctx = zstd.ZstdDecompressor() + + with self.assertRaisesRegexp(ValueError, 'chunk 1 must be bytes'): + dctx.decompress_content_dict_chain([initial, u'foo']) + + with self.assertRaisesRegexp(ValueError, 'chunk 1 must be bytes'): + dctx.decompress_content_dict_chain([initial, None]) + + with self.assertRaisesRegexp(ValueError, 'chunk 1 is too small to contain a zstd frame'): + dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER]) + + with self.assertRaisesRegexp(ValueError, 'chunk 1 is not a valid zstd frame'): + dctx.decompress_content_dict_chain([initial, b'foo' * 8]) + + no_size = zstd.ZstdCompressor().compress(b'foo' * 64) + + with self.assertRaisesRegexp(ValueError, 'chunk 1 missing content size in frame'): + dctx.decompress_content_dict_chain([initial, no_size]) + + # Corrupt second frame. + cctx = zstd.ZstdCompressor(write_content_size=True, dict_data=zstd.ZstdCompressionDict(b'foo' * 64)) + frame = cctx.compress(b'bar' * 64) + frame = frame[0:12] + frame[15:] + + with self.assertRaisesRegexp(zstd.ZstdError, 'could not decompress chunk 1'): + dctx.decompress_content_dict_chain([initial, frame]) + + def test_simple(self): + original = [ + b'foo' * 64, + b'foobar' * 64, + b'baz' * 64, + b'foobaz' * 64, + b'foobarbaz' * 64, + ] + + chunks = [] + chunks.append(zstd.ZstdCompressor(write_content_size=True).compress(original[0])) + for i, chunk in enumerate(original[1:]): + d = zstd.ZstdCompressionDict(original[i]) + cctx = zstd.ZstdCompressor(dict_data=d, write_content_size=True) + chunks.append(cctx.compress(chunk)) + + for i in range(1, len(original)): + chain = chunks[0:i] + expected = original[i - 1] + dctx = zstd.ZstdDecompressor() + decompressed = dctx.decompress_content_dict_chain(chain) + self.assertEqual(decompressed, expected)
--- a/contrib/python-zstandard/tests/test_estimate_sizes.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/tests/test_estimate_sizes.py Sat Mar 11 13:53:14 2017 -0500 @@ -5,7 +5,12 @@ import zstd +from . common import ( + make_cffi, +) + +@make_cffi class TestSizes(unittest.TestCase): def test_decompression_size(self): size = zstd.estimate_decompression_context_size()
--- a/contrib/python-zstandard/tests/test_module_attributes.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/tests/test_module_attributes.py Sat Mar 11 13:53:14 2017 -0500 @@ -7,9 +7,15 @@ import zstd +from . common import ( + make_cffi, +) + + +@make_cffi class TestModuleAttributes(unittest.TestCase): def test_version(self): - self.assertEqual(zstd.ZSTD_VERSION, (1, 1, 2)) + self.assertEqual(zstd.ZSTD_VERSION, (1, 1, 3)) def test_constants(self): self.assertEqual(zstd.MAX_COMPRESSION_LEVEL, 22) @@ -45,4 +51,4 @@ ) for a in attrs: - self.assertTrue(hasattr(zstd, a)) + self.assertTrue(hasattr(zstd, a), a)
--- a/contrib/python-zstandard/tests/test_roundtrip.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/tests/test_roundtrip.py Sat Mar 11 13:53:14 2017 -0500 @@ -13,10 +13,14 @@ import zstd +from .common import ( + make_cffi, +) compression_levels = strategies.integers(min_value=1, max_value=22) +@make_cffi class TestRoundTrip(unittest.TestCase): @hypothesis.given(strategies.binary(), compression_levels) def test_compress_write_to(self, data, level):
--- a/contrib/python-zstandard/tests/test_train_dictionary.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/tests/test_train_dictionary.py Sat Mar 11 13:53:14 2017 -0500 @@ -7,6 +7,9 @@ import zstd +from . common import ( + make_cffi, +) if sys.version_info[0] >= 3: int_type = int @@ -14,6 +17,7 @@ int_type = long +@make_cffi class TestTrainDictionary(unittest.TestCase): def test_no_args(self): with self.assertRaises(TypeError):
--- a/contrib/python-zstandard/zstd.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd.c Sat Mar 11 13:53:14 2017 -0500 @@ -34,6 +34,11 @@ "Obtains a ``CompressionParameters`` instance from a compression level and\n" "optional input size and dictionary size"); +PyDoc_STRVAR(get_frame_parameters__doc__, +"get_frame_parameters(data)\n" +"\n" +"Obtains a ``FrameParameters`` instance by parsing data.\n"); + PyDoc_STRVAR(train_dictionary__doc__, "train_dictionary(dict_size, samples)\n" "\n" @@ -53,6 +58,8 @@ METH_NOARGS, estimate_decompression_context_size__doc__ }, { "get_compression_parameters", (PyCFunction)get_compression_parameters, METH_VARARGS, get_compression_parameters__doc__ }, + { "get_frame_parameters", (PyCFunction)get_frame_parameters, + METH_VARARGS, get_frame_parameters__doc__ }, { "train_dictionary", (PyCFunction)train_dictionary, METH_VARARGS | METH_KEYWORDS, train_dictionary__doc__ }, { NULL, NULL } @@ -70,6 +77,7 @@ void decompressobj_module_init(PyObject* mod); void decompressionwriter_module_init(PyObject* mod); void decompressoriterator_module_init(PyObject* mod); +void frameparams_module_init(PyObject* mod); void zstd_module_init(PyObject* m) { /* python-zstandard relies on unstable zstd C API features. This means @@ -87,7 +95,7 @@ We detect this mismatch here and refuse to load the module if this scenario is detected. */ - if (ZSTD_VERSION_NUMBER != 10102 || ZSTD_versionNumber() != 10102) { + if (ZSTD_VERSION_NUMBER != 10103 || ZSTD_versionNumber() != 10103) { PyErr_SetString(PyExc_ImportError, "zstd C API mismatch; Python bindings not compiled against expected zstd version"); return; } @@ -104,6 +112,7 @@ decompressobj_module_init(m); decompressionwriter_module_init(m); decompressoriterator_module_init(m); + frameparams_module_init(m); } #if PY_MAJOR_VERSION >= 3
--- a/contrib/python-zstandard/zstd/common/mem.h Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/common/mem.h Sat Mar 11 13:53:14 2017 -0500 @@ -39,7 +39,7 @@ #endif /* code only tested on 32 and 64 bits systems */ -#define MEM_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } +#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/python-zstandard/zstd/common/pool.c Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,194 @@ +/** + * Copyright (c) 2016-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/* ====== Dependencies ======= */ +#include <stddef.h> /* size_t */ +#include <stdlib.h> /* malloc, calloc, free */ +#include "pool.h" + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +#endif + + +#ifdef ZSTD_MULTITHREAD + +#include "threading.h" /* pthread adaptation */ + +/* A job is a function and an opaque argument */ +typedef struct POOL_job_s { + POOL_function function; + void *opaque; +} POOL_job; + +struct POOL_ctx_s { + /* Keep track of the threads */ + pthread_t *threads; + size_t numThreads; + + /* The queue is a circular buffer */ + POOL_job *queue; + size_t queueHead; + size_t queueTail; + size_t queueSize; + /* The mutex protects the queue */ + pthread_mutex_t queueMutex; + /* Condition variable for pushers to wait on when the queue is full */ + pthread_cond_t queuePushCond; + /* Condition variables for poppers to wait on when the queue is empty */ + pthread_cond_t queuePopCond; + /* Indicates if the queue is shutting down */ + int shutdown; +}; + +/* POOL_thread() : + Work thread for the thread pool. + Waits for jobs and executes them. + @returns : NULL on failure else non-null. +*/ +static void* POOL_thread(void* opaque) { + POOL_ctx* const ctx = (POOL_ctx*)opaque; + if (!ctx) { return NULL; } + for (;;) { + /* Lock the mutex and wait for a non-empty queue or until shutdown */ + pthread_mutex_lock(&ctx->queueMutex); + while (ctx->queueHead == ctx->queueTail && !ctx->shutdown) { + pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); + } + /* empty => shutting down: so stop */ + if (ctx->queueHead == ctx->queueTail) { + pthread_mutex_unlock(&ctx->queueMutex); + return opaque; + } + /* Pop a job off the queue */ + { POOL_job const job = ctx->queue[ctx->queueHead]; + ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; + /* Unlock the mutex, signal a pusher, and run the job */ + pthread_mutex_unlock(&ctx->queueMutex); + pthread_cond_signal(&ctx->queuePushCond); + job.function(job.opaque); + } + } + /* Unreachable */ +} + +POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) { + POOL_ctx *ctx; + /* Check the parameters */ + if (!numThreads || !queueSize) { return NULL; } + /* Allocate the context and zero initialize */ + ctx = (POOL_ctx *)calloc(1, sizeof(POOL_ctx)); + if (!ctx) { return NULL; } + /* Initialize the job queue. + * It needs one extra space since one space is wasted to differentiate empty + * and full queues. + */ + ctx->queueSize = queueSize + 1; + ctx->queue = (POOL_job *)malloc(ctx->queueSize * sizeof(POOL_job)); + ctx->queueHead = 0; + ctx->queueTail = 0; + pthread_mutex_init(&ctx->queueMutex, NULL); + pthread_cond_init(&ctx->queuePushCond, NULL); + pthread_cond_init(&ctx->queuePopCond, NULL); + ctx->shutdown = 0; + /* Allocate space for the thread handles */ + ctx->threads = (pthread_t *)malloc(numThreads * sizeof(pthread_t)); + ctx->numThreads = 0; + /* Check for errors */ + if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } + /* Initialize the threads */ + { size_t i; + for (i = 0; i < numThreads; ++i) { + if (pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { + ctx->numThreads = i; + POOL_free(ctx); + return NULL; + } } + ctx->numThreads = numThreads; + } + return ctx; +} + +/*! POOL_join() : + Shutdown the queue, wake any sleeping threads, and join all of the threads. +*/ +static void POOL_join(POOL_ctx *ctx) { + /* Shut down the queue */ + pthread_mutex_lock(&ctx->queueMutex); + ctx->shutdown = 1; + pthread_mutex_unlock(&ctx->queueMutex); + /* Wake up sleeping threads */ + pthread_cond_broadcast(&ctx->queuePushCond); + pthread_cond_broadcast(&ctx->queuePopCond); + /* Join all of the threads */ + { size_t i; + for (i = 0; i < ctx->numThreads; ++i) { + pthread_join(ctx->threads[i], NULL); + } } +} + +void POOL_free(POOL_ctx *ctx) { + if (!ctx) { return; } + POOL_join(ctx); + pthread_mutex_destroy(&ctx->queueMutex); + pthread_cond_destroy(&ctx->queuePushCond); + pthread_cond_destroy(&ctx->queuePopCond); + if (ctx->queue) free(ctx->queue); + if (ctx->threads) free(ctx->threads); + free(ctx); +} + +void POOL_add(void *ctxVoid, POOL_function function, void *opaque) { + POOL_ctx *ctx = (POOL_ctx *)ctxVoid; + if (!ctx) { return; } + + pthread_mutex_lock(&ctx->queueMutex); + { POOL_job const job = {function, opaque}; + /* Wait until there is space in the queue for the new job */ + size_t newTail = (ctx->queueTail + 1) % ctx->queueSize; + while (ctx->queueHead == newTail && !ctx->shutdown) { + pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + newTail = (ctx->queueTail + 1) % ctx->queueSize; + } + /* The queue is still going => there is space */ + if (!ctx->shutdown) { + ctx->queue[ctx->queueTail] = job; + ctx->queueTail = newTail; + } + } + pthread_mutex_unlock(&ctx->queueMutex); + pthread_cond_signal(&ctx->queuePopCond); +} + +#else /* ZSTD_MULTITHREAD not defined */ +/* No multi-threading support */ + +/* We don't need any data, but if it is empty malloc() might return NULL. */ +struct POOL_ctx_s { + int data; +}; + +POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) { + (void)numThreads; + (void)queueSize; + return (POOL_ctx *)malloc(sizeof(POOL_ctx)); +} + +void POOL_free(POOL_ctx *ctx) { + if (ctx) free(ctx); +} + +void POOL_add(void *ctx, POOL_function function, void *opaque) { + (void)ctx; + function(opaque); +} + +#endif /* ZSTD_MULTITHREAD */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/python-zstandard/zstd/common/pool.h Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,56 @@ +/** + * Copyright (c) 2016-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ +#ifndef POOL_H +#define POOL_H + +#if defined (__cplusplus) +extern "C" { +#endif + + +#include <stddef.h> /* size_t */ + +typedef struct POOL_ctx_s POOL_ctx; + +/*! POOL_create() : + Create a thread pool with at most `numThreads` threads. + `numThreads` must be at least 1. + The maximum number of queued jobs before blocking is `queueSize`. + `queueSize` must be at least 1. + @return : The POOL_ctx pointer on success else NULL. +*/ +POOL_ctx *POOL_create(size_t numThreads, size_t queueSize); + +/*! POOL_free() : + Free a thread pool returned by POOL_create(). +*/ +void POOL_free(POOL_ctx *ctx); + +/*! POOL_function : + The function type that can be added to a thread pool. +*/ +typedef void (*POOL_function)(void *); +/*! POOL_add_function : + The function type for a generic thread pool add function. +*/ +typedef void (*POOL_add_function)(void *, POOL_function, void *); + +/*! POOL_add() : + Add the job `function(opaque)` to the thread pool. + Possibly blocks until there is room in the queue. + Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed. +*/ +void POOL_add(void *ctx, POOL_function function, void *opaque); + + +#if defined (__cplusplus) +} +#endif + +#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/python-zstandard/zstd/common/threading.c Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,79 @@ + +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + */ + +/** + * This file will hold wrapper for systems, which do not support pthreads + */ + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4206) /* disable: C4206: translation unit is empty (when ZSTD_MULTITHREAD is not defined) */ +#endif + + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ + + +/* === Dependencies === */ +#include <process.h> +#include <errno.h> +#include "threading.h" + + +/* === Implementation === */ + +static unsigned __stdcall worker(void *arg) +{ + pthread_t* const thread = (pthread_t*) arg; + thread->arg = thread->start_routine(thread->arg); + return 0; +} + +int pthread_create(pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg) +{ + (void)unused; + thread->arg = arg; + thread->start_routine = start_routine; + thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); + + if (!thread->handle) + return errno; + else + return 0; +} + +int _pthread_join(pthread_t * thread, void **value_ptr) +{ + DWORD result; + + if (!thread->handle) return 0; + + result = WaitForSingleObject(thread->handle, INFINITE); + switch (result) { + case WAIT_OBJECT_0: + if (value_ptr) *value_ptr = thread->arg; + return 0; + case WAIT_ABANDONED: + return EINVAL; + default: + return GetLastError(); + } +} + +#endif /* ZSTD_MULTITHREAD */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/python-zstandard/zstd/common/threading.h Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,104 @@ + +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + */ + +#ifndef THREADING_H_938743 +#define THREADING_H_938743 + +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ +#ifdef WINVER +# undef WINVER +#endif +#define WINVER 0x0600 + +#ifdef _WIN32_WINNT +# undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0600 + +#ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +#endif + +#include <windows.h> + +/* mutex */ +#define pthread_mutex_t CRITICAL_SECTION +#define pthread_mutex_init(a,b) InitializeCriticalSection((a)) +#define pthread_mutex_destroy(a) DeleteCriticalSection((a)) +#define pthread_mutex_lock(a) EnterCriticalSection((a)) +#define pthread_mutex_unlock(a) LeaveCriticalSection((a)) + +/* condition variable */ +#define pthread_cond_t CONDITION_VARIABLE +#define pthread_cond_init(a, b) InitializeConditionVariable((a)) +#define pthread_cond_destroy(a) /* No delete */ +#define pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) +#define pthread_cond_signal(a) WakeConditionVariable((a)) +#define pthread_cond_broadcast(a) WakeAllConditionVariable((a)) + +/* pthread_create() and pthread_join() */ +typedef struct { + HANDLE handle; + void* (*start_routine)(void*); + void* arg; +} pthread_t; + +int pthread_create(pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg); + +#define pthread_join(a, b) _pthread_join(&(a), (b)) +int _pthread_join(pthread_t* thread, void** value_ptr); + +/** + * add here more wrappers as required + */ + + +#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection mathod */ +/* === POSIX Systems === */ +# include <pthread.h> + +#else /* ZSTD_MULTITHREAD not defined */ +/* No multithreading support */ + +#define pthread_mutex_t int /* #define rather than typedef, as sometimes pthread support is implicit, resulting in duplicated symbols */ +#define pthread_mutex_init(a,b) +#define pthread_mutex_destroy(a) +#define pthread_mutex_lock(a) +#define pthread_mutex_unlock(a) + +#define pthread_cond_t int +#define pthread_cond_init(a,b) +#define pthread_cond_destroy(a) +#define pthread_cond_wait(a,b) +#define pthread_cond_signal(a) +#define pthread_cond_broadcast(a) + +/* do not use pthread_t */ + +#endif /* ZSTD_MULTITHREAD */ + +#if defined (__cplusplus) +} +#endif + +#endif /* THREADING_H_938743 */
--- a/contrib/python-zstandard/zstd/common/zstd_common.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/common/zstd_common.c Sat Mar 11 13:53:14 2017 -0500 @@ -43,10 +43,6 @@ * provides error code string from enum */ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorName(code); } -/* --- ZBUFF Error Management (deprecated) --- */ -unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); } -const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } - /*=************************************************************** * Custom allocator
--- a/contrib/python-zstandard/zstd/common/zstd_errors.h Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/common/zstd_errors.h Sat Mar 11 13:53:14 2017 -0500 @@ -18,6 +18,20 @@ #include <stddef.h> /* size_t */ +/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ +#if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#else +# define ZSTDERRORLIB_VISIBILITY +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +#endif + /*-**************************************** * error codes list ******************************************/ @@ -49,8 +63,8 @@ /*! ZSTD_getErrorCode() : convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, which can be used to compare directly with enum list published into "error_public.h" */ -ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); -const char* ZSTD_getErrorString(ZSTD_ErrorCode code); +ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); +ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); #if defined (__cplusplus)
--- a/contrib/python-zstandard/zstd/common/zstd_internal.h Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/common/zstd_internal.h Sat Mar 11 13:53:14 2017 -0500 @@ -267,4 +267,13 @@ } +/* hidden functions */ + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); + + #endif /* ZSTD_CCOMMON_H_MODULE */
--- a/contrib/python-zstandard/zstd/compress/zstd_compress.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/compress/zstd_compress.c Sat Mar 11 13:53:14 2017 -0500 @@ -51,8 +51,7 @@ /*-************************************* * Context memory management ***************************************/ -struct ZSTD_CCtx_s -{ +struct ZSTD_CCtx_s { const BYTE* nextSrc; /* next block here to continue on current prefix */ const BYTE* base; /* All regular indexes relative to this position */ const BYTE* dictBase; /* extDict indexes relative to this position */ @@ -61,10 +60,11 @@ U32 nextToUpdate; /* index from which to continue dictionary update */ U32 nextToUpdate3; /* index from which to continue dictionary update */ U32 hashLog3; /* dispatch table : larger == faster, more memory */ - U32 loadedDictEnd; + U32 loadedDictEnd; /* index of end of dictionary */ + U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */ ZSTD_compressionStage_e stage; U32 rep[ZSTD_REP_NUM]; - U32 savedRep[ZSTD_REP_NUM]; + U32 repToConfirm[ZSTD_REP_NUM]; U32 dictID; ZSTD_parameters params; void* workSpace; @@ -101,7 +101,7 @@ cctx = (ZSTD_CCtx*) ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); if (!cctx) return NULL; memset(cctx, 0, sizeof(ZSTD_CCtx)); - memcpy(&(cctx->customMem), &customMem, sizeof(customMem)); + cctx->customMem = customMem; return cctx; } @@ -119,6 +119,15 @@ return sizeof(*cctx) + cctx->workSpaceSize; } +size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value) +{ + switch(param) + { + case ZSTD_p_forceWindow : cctx->forceWindow = value>0; cctx->loadedDictEnd = 0; return 0; + default: return ERROR(parameter_unknown); + } +} + const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) /* hidden interface */ { return &(ctx->seqStore); @@ -318,6 +327,14 @@ } } +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { + int i; + for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = 0; +} /*! ZSTD_copyCCtx() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. @@ -735,12 +752,19 @@ if ((size_t)(op-ostart) >= maxCSize) return 0; } /* confirm repcodes */ - { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->savedRep[i]; } + { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->repToConfirm[i]; } return op - ostart; } +#if 0 /* for debug */ +# define STORESEQ_DEBUG +#include <stdio.h> /* fprintf */ +U32 g_startDebug = 0; +const BYTE* g_start = NULL; +#endif + /*! ZSTD_storeSeq() : Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. `offsetCode` : distance to match, or 0 == repCode. @@ -748,13 +772,14 @@ */ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode) { -#if 0 /* for debug */ - static const BYTE* g_start = NULL; - const U32 pos = (U32)((const BYTE*)literals - g_start); - if (g_start==NULL) g_start = (const BYTE*)literals; - //if ((pos > 1) && (pos < 50000)) - printf("Cpos %6u :%5u literals & match %3u bytes at distance %6u \n", - pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode); +#ifdef STORESEQ_DEBUG + if (g_startDebug) { + const U32 pos = (U32)((const BYTE*)literals - g_start); + if (g_start==NULL) g_start = (const BYTE*)literals; + if ((pos > 1895000) && (pos < 1895300)) + fprintf(stderr, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n", + pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode); + } #endif /* copy Literals */ ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); @@ -1004,8 +1029,8 @@ } } } /* save reps for next block */ - cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved; - cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved; + cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; /* Last Literals */ { size_t const lastLLSize = iend - anchor; @@ -1119,7 +1144,7 @@ } } } /* save reps for next block */ - ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2; + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; /* Last Literals */ { size_t const lastLLSize = iend - anchor; @@ -1273,8 +1298,8 @@ } } } /* save reps for next block */ - cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved; - cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved; + cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; /* Last Literals */ { size_t const lastLLSize = iend - anchor; @@ -1423,7 +1448,7 @@ } } } /* save reps for next block */ - ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2; + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; /* Last Literals */ { size_t const lastLLSize = iend - anchor; @@ -1955,8 +1980,8 @@ } } /* Save reps for next block */ - ctx->savedRep[0] = offset_1 ? offset_1 : savedOffset; - ctx->savedRep[1] = offset_2 ? offset_2 : savedOffset; + ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; + ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; /* Last Literals */ { size_t const lastLLSize = iend - anchor; @@ -2150,7 +2175,7 @@ } } /* Save reps for next block */ - ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2; + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; /* Last Literals */ { size_t const lastLLSize = iend - anchor; @@ -2409,12 +2434,14 @@ cctx->nextSrc = ip + srcSize; - { size_t const cSize = frame ? + if (srcSize) { + size_t const cSize = frame ? ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); if (ZSTD_isError(cSize)) return cSize; return cSize + fhSize; - } + } else + return fhSize; } @@ -2450,7 +2477,7 @@ zc->dictBase = zc->base; zc->base += ip - zc->nextSrc; zc->nextToUpdate = zc->dictLimit; - zc->loadedDictEnd = (U32)(iend - zc->base); + zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base); zc->nextSrc = iend; if (srcSize <= HASH_READ_SIZE) return 0; @@ -2557,9 +2584,9 @@ } if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); - cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted); - cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted); - cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted); + cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] == 0 || cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted); + cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] == 0 || cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted); + cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] == 0 || cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted); dictPtr += 12; { U32 offcodeMax = MaxOff; @@ -2594,7 +2621,6 @@ } } - /*! ZSTD_compressBegin_internal() : * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, @@ -2626,9 +2652,9 @@ } -size_t ZSTD_compressBegin(ZSTD_CCtx* zc, int compressionLevel) +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) { - return ZSTD_compressBegin_usingDict(zc, NULL, 0, compressionLevel); + return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } @@ -2733,7 +2759,8 @@ /* ===== Dictionary API ===== */ struct ZSTD_CDict_s { - void* dictContent; + void* dictBuffer; + const void* dictContent; size_t dictContentSize; ZSTD_CCtx* refContext; }; /* typedef'd tp ZSTD_CDict within "zstd.h" */ @@ -2741,39 +2768,45 @@ size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support sizeof on NULL */ - return ZSTD_sizeof_CCtx(cdict->refContext) + cdict->dictContentSize; + return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict); } -ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_parameters params, ZSTD_customMem customMem) +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, unsigned byReference, + ZSTD_parameters params, ZSTD_customMem customMem) { if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; if (!customMem.customAlloc || !customMem.customFree) return NULL; { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem); - void* const dictContent = ZSTD_malloc(dictSize, customMem); ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem); - if (!dictContent || !cdict || !cctx) { - ZSTD_free(dictContent, customMem); + if (!cdict || !cctx) { ZSTD_free(cdict, customMem); ZSTD_free(cctx, customMem); return NULL; } - if (dictSize) { - memcpy(dictContent, dict, dictSize); + if ((byReference) || (!dictBuffer) || (!dictSize)) { + cdict->dictBuffer = NULL; + cdict->dictContent = dictBuffer; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, customMem); + if (!internalBuffer) { ZSTD_free(cctx, customMem); ZSTD_free(cdict, customMem); return NULL; } + memcpy(internalBuffer, dictBuffer, dictSize); + cdict->dictBuffer = internalBuffer; + cdict->dictContent = internalBuffer; } - { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, dictContent, dictSize, params, 0); + + { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); if (ZSTD_isError(errorCode)) { - ZSTD_free(dictContent, customMem); + ZSTD_free(cdict->dictBuffer, customMem); + ZSTD_free(cctx, customMem); ZSTD_free(cdict, customMem); - ZSTD_free(cctx, customMem); return NULL; } } - cdict->dictContent = dictContent; + cdict->refContext = cctx; cdict->dictContentSize = dictSize; - cdict->refContext = cctx; return cdict; } } @@ -2783,7 +2816,15 @@ ZSTD_customMem const allocator = { NULL, NULL, NULL }; ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize); params.fParams.contentSizeFlag = 1; - return ZSTD_createCDict_advanced(dict, dictSize, params, allocator); + return ZSTD_createCDict_advanced(dict, dictSize, 0, params, allocator); +} + +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize); + params.fParams.contentSizeFlag = 1; + return ZSTD_createCDict_advanced(dict, dictSize, 1, params, allocator); } size_t ZSTD_freeCDict(ZSTD_CDict* cdict) @@ -2791,7 +2832,7 @@ if (cdict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = cdict->refContext->customMem; ZSTD_freeCCtx(cdict->refContext); - ZSTD_free(cdict->dictContent, cMem); + ZSTD_free(cdict->dictBuffer, cMem); ZSTD_free(cdict, cMem); return 0; } @@ -2801,7 +2842,7 @@ return ZSTD_getParamsFromCCtx(cdict->refContext); } -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, U64 pledgedSrcSize) +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize) { if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize)) else CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, cdict->refContext->params, pledgedSrcSize)); @@ -2900,7 +2941,7 @@ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) { - if (zcs->inBuffSize==0) return ERROR(stage_wrong); /* zcs has not been init at least once */ + if (zcs->inBuffSize==0) return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */ if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize)) else CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize)); @@ -2937,9 +2978,9 @@ if (zcs->outBuff == NULL) return ERROR(memory_allocation); } - if (dict) { + if (dict && dictSize >= 8) { ZSTD_freeCDict(zcs->cdictLocal); - zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, params, zcs->customMem); + zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem); if (zcs->cdictLocal == NULL) return ERROR(memory_allocation); zcs->cdict = zcs->cdictLocal; } else zcs->cdict = NULL; @@ -2956,6 +2997,7 @@ ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict); size_t const initError = ZSTD_initCStream_advanced(zcs, NULL, 0, params, 0); zcs->cdict = cdict; + zcs->cctx->dictID = params.fParams.noDictIDFlag ? 0 : cdict->refContext->dictID; return initError; } @@ -2967,7 +3009,8 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize) { - ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0); + ZSTD_parameters params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0); + if (pledgedSrcSize) params.fParams.contentSizeFlag = 1; return ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize); }
--- a/contrib/python-zstandard/zstd/compress/zstd_opt.h Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/compress/zstd_opt.h Sat Mar 11 13:53:14 2017 -0500 @@ -38,7 +38,7 @@ ssPtr->cachedLiterals = NULL; ssPtr->cachedPrice = ssPtr->cachedLitLength = 0; - ssPtr->staticPrices = 0; + ssPtr->staticPrices = 0; if (ssPtr->litLengthSum == 0) { if (srcSize <= 1024) ssPtr->staticPrices = 1; @@ -56,7 +56,7 @@ for (u=0; u<=MaxLit; u++) { ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV); - ssPtr->litSum += ssPtr->litFreq[u]; + ssPtr->litSum += ssPtr->litFreq[u]; } for (u=0; u<=MaxLL; u++) ssPtr->litLengthFreq[u] = 1; @@ -634,7 +634,7 @@ } } /* for (cur=0; cur < last_pos; ) */ /* Save reps for next block */ - { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; } + { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; } /* Last Literals */ { size_t const lastLLSize = iend - anchor; @@ -825,7 +825,7 @@ match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); - if (match_num > 0 && matches[match_num-1].len > sufficient_len) { + if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { best_mlen = matches[match_num-1].len; best_off = matches[match_num-1].off; last_pos = cur + 1; @@ -835,7 +835,7 @@ /* set prices using matches at position = cur */ for (u = 0; u < match_num; u++) { mlen = (u>0) ? matches[u-1].len+1 : best_mlen; - best_mlen = (cur + matches[u].len < ZSTD_OPT_NUM) ? matches[u].len : ZSTD_OPT_NUM - cur; + best_mlen = matches[u].len; while (mlen <= best_mlen) { if (opt[cur].mlen == 1) { @@ -907,7 +907,7 @@ } } /* for (cur=0; cur < last_pos; ) */ /* Save reps for next block */ - { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; } + { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; } /* Last Literals */ { size_t lastLLSize = iend - anchor;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.c Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,740 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/* ====== Tuning parameters ====== */ +#define ZSTDMT_NBTHREADS_MAX 128 + + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +#endif + + +/* ====== Dependencies ====== */ +#include <stdlib.h> /* malloc */ +#include <string.h> /* memcpy */ +#include "pool.h" /* threadpool */ +#include "threading.h" /* mutex */ +#include "zstd_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ +#include "zstdmt_compress.h" +#define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ +#include "xxhash.h" + + +/* ====== Debug ====== */ +#if 0 + +# include <stdio.h> +# include <unistd.h> +# include <sys/times.h> + static unsigned g_debugLevel = 3; +# define DEBUGLOGRAW(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __VA_ARGS__); } +# define DEBUGLOG(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __FILE__ ": "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " \n"); } + +# define DEBUG_PRINTHEX(l,p,n) { \ + unsigned debug_u; \ + for (debug_u=0; debug_u<(n); debug_u++) \ + DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ + DEBUGLOGRAW(l, " \n"); \ +} + +static unsigned long long GetCurrentClockTimeMicroseconds() +{ + static clock_t _ticksPerSecond = 0; + if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); + + struct tms junk; clock_t newTicks = (clock_t) times(&junk); + return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); +} + +#define MUTEX_WAIT_TIME_DLEVEL 5 +#define PTHREAD_MUTEX_LOCK(mutex) \ +if (g_debugLevel>=MUTEX_WAIT_TIME_DLEVEL) { \ + unsigned long long beforeTime = GetCurrentClockTimeMicroseconds(); \ + pthread_mutex_lock(mutex); \ + unsigned long long afterTime = GetCurrentClockTimeMicroseconds(); \ + unsigned long long elapsedTime = (afterTime-beforeTime); \ + if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ + DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ + elapsedTime, #mutex); \ + } \ +} else pthread_mutex_lock(mutex); + +#else + +# define DEBUGLOG(l, ...) {} /* disabled */ +# define PTHREAD_MUTEX_LOCK(m) pthread_mutex_lock(m) +# define DEBUG_PRINTHEX(l,p,n) {} + +#endif + + +/* ===== Buffer Pool ===== */ + +typedef struct buffer_s { + void* start; + size_t size; +} buffer_t; + +static const buffer_t g_nullBuffer = { NULL, 0 }; + +typedef struct ZSTDMT_bufferPool_s { + unsigned totalBuffers; + unsigned nbBuffers; + buffer_t bTable[1]; /* variable size */ +} ZSTDMT_bufferPool; + +static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads) +{ + unsigned const maxNbBuffers = 2*nbThreads + 2; + ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)calloc(1, sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t)); + if (bufPool==NULL) return NULL; + bufPool->totalBuffers = maxNbBuffers; + bufPool->nbBuffers = 0; + return bufPool; +} + +static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) +{ + unsigned u; + if (!bufPool) return; /* compatibility with free on NULL */ + for (u=0; u<bufPool->totalBuffers; u++) + free(bufPool->bTable[u].start); + free(bufPool); +} + +/* assumption : invocation from main thread only ! */ +static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* pool, size_t bSize) +{ + if (pool->nbBuffers) { /* try to use an existing buffer */ + buffer_t const buf = pool->bTable[--(pool->nbBuffers)]; + size_t const availBufferSize = buf.size; + if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize)) /* large enough, but not too much */ + return buf; + free(buf.start); /* size conditions not respected : scratch this buffer and create a new one */ + } + /* create new buffer */ + { buffer_t buffer; + void* const start = malloc(bSize); + if (start==NULL) bSize = 0; + buffer.start = start; /* note : start can be NULL if malloc fails ! */ + buffer.size = bSize; + return buffer; + } +} + +/* store buffer for later re-use, up to pool capacity */ +static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* pool, buffer_t buf) +{ + if (buf.start == NULL) return; /* release on NULL */ + if (pool->nbBuffers < pool->totalBuffers) { + pool->bTable[pool->nbBuffers++] = buf; /* store for later re-use */ + return; + } + /* Reached bufferPool capacity (should not happen) */ + free(buf.start); +} + + +/* ===== CCtx Pool ===== */ + +typedef struct { + unsigned totalCCtx; + unsigned availCCtx; + ZSTD_CCtx* cctx[1]; /* variable size */ +} ZSTDMT_CCtxPool; + +/* assumption : CCtxPool invocation only from main thread */ + +/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ +static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) +{ + unsigned u; + for (u=0; u<pool->totalCCtx; u++) + ZSTD_freeCCtx(pool->cctx[u]); /* note : compatible with free on NULL */ + free(pool); +} + +/* ZSTDMT_createCCtxPool() : + * implies nbThreads >= 1 , checked by caller ZSTDMT_createCCtx() */ +static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads) +{ + ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) calloc(1, sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*)); + if (!cctxPool) return NULL; + cctxPool->totalCCtx = nbThreads; + cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ + cctxPool->cctx[0] = ZSTD_createCCtx(); + if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } + DEBUGLOG(1, "cctxPool created, with %u threads", nbThreads); + return cctxPool; +} + +static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* pool) +{ + if (pool->availCCtx) { + pool->availCCtx--; + return pool->cctx[pool->availCCtx]; + } + return ZSTD_createCCtx(); /* note : can be NULL, when creation fails ! */ +} + +static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return; /* compatibility with release on NULL */ + if (pool->availCCtx < pool->totalCCtx) + pool->cctx[pool->availCCtx++] = cctx; + else + /* pool overflow : should not happen, since totalCCtx==nbThreads */ + ZSTD_freeCCtx(cctx); +} + + +/* ===== Thread worker ===== */ + +typedef struct { + buffer_t buffer; + size_t filled; +} inBuff_t; + +typedef struct { + ZSTD_CCtx* cctx; + buffer_t src; + const void* srcStart; + size_t srcSize; + size_t dictSize; + buffer_t dstBuff; + size_t cSize; + size_t dstFlushed; + unsigned firstChunk; + unsigned lastChunk; + unsigned jobCompleted; + unsigned jobScanned; + pthread_mutex_t* jobCompleted_mutex; + pthread_cond_t* jobCompleted_cond; + ZSTD_parameters params; + ZSTD_CDict* cdict; + unsigned long long fullFrameSize; +} ZSTDMT_jobDescription; + +/* ZSTDMT_compressChunk() : POOL_function type */ +void ZSTDMT_compressChunk(void* jobDescription) +{ + ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; + const void* const src = (const char*)job->srcStart + job->dictSize; + buffer_t const dstBuff = job->dstBuff; + DEBUGLOG(3, "job (first:%u) (last:%u) : dictSize %u, srcSize %u", job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize); + if (job->cdict) { + size_t const initError = ZSTD_compressBegin_usingCDict(job->cctx, job->cdict, job->fullFrameSize); + if (job->cdict) DEBUGLOG(3, "using CDict "); + if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; } + } else { + size_t const initError = ZSTD_compressBegin_advanced(job->cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize); + if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; } + ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceWindow, 1); + } + if (!job->firstChunk) { /* flush frame header */ + size_t const hSize = ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, 0); + if (ZSTD_isError(hSize)) { job->cSize = hSize; goto _endJob; } + ZSTD_invalidateRepCodes(job->cctx); + } + + DEBUGLOG(4, "Compressing : "); + DEBUG_PRINTHEX(4, job->srcStart, 12); + job->cSize = (job->lastChunk) ? /* last chunk signal */ + ZSTD_compressEnd (job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize) : + ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize); + DEBUGLOG(3, "compressed %u bytes into %u bytes (first:%u) (last:%u)", (unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk); + +_endJob: + PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex); + job->jobCompleted = 1; + job->jobScanned = 0; + pthread_cond_signal(job->jobCompleted_cond); + pthread_mutex_unlock(job->jobCompleted_mutex); +} + + +/* ------------------------------------------ */ +/* ===== Multi-threaded compression ===== */ +/* ------------------------------------------ */ + +struct ZSTDMT_CCtx_s { + POOL_ctx* factory; + ZSTDMT_bufferPool* buffPool; + ZSTDMT_CCtxPool* cctxPool; + pthread_mutex_t jobCompleted_mutex; + pthread_cond_t jobCompleted_cond; + size_t targetSectionSize; + size_t marginSize; + size_t inBuffSize; + size_t dictSize; + size_t targetDictSize; + inBuff_t inBuff; + ZSTD_parameters params; + XXH64_state_t xxhState; + unsigned nbThreads; + unsigned jobIDMask; + unsigned doneJobID; + unsigned nextJobID; + unsigned frameEnded; + unsigned allJobsCompleted; + unsigned overlapRLog; + unsigned long long frameContentSize; + size_t sectionSize; + ZSTD_CDict* cdict; + ZSTD_CStream* cstream; + ZSTDMT_jobDescription jobs[1]; /* variable size (must lies at the end) */ +}; + +ZSTDMT_CCtx *ZSTDMT_createCCtx(unsigned nbThreads) +{ + ZSTDMT_CCtx* cctx; + U32 const minNbJobs = nbThreads + 2; + U32 const nbJobsLog2 = ZSTD_highbit32(minNbJobs) + 1; + U32 const nbJobs = 1 << nbJobsLog2; + DEBUGLOG(5, "nbThreads : %u ; minNbJobs : %u ; nbJobsLog2 : %u ; nbJobs : %u \n", + nbThreads, minNbJobs, nbJobsLog2, nbJobs); + if ((nbThreads < 1) | (nbThreads > ZSTDMT_NBTHREADS_MAX)) return NULL; + cctx = (ZSTDMT_CCtx*) calloc(1, sizeof(ZSTDMT_CCtx) + nbJobs*sizeof(ZSTDMT_jobDescription)); + if (!cctx) return NULL; + cctx->nbThreads = nbThreads; + cctx->jobIDMask = nbJobs - 1; + cctx->allJobsCompleted = 1; + cctx->sectionSize = 0; + cctx->overlapRLog = 3; + cctx->factory = POOL_create(nbThreads, 1); + cctx->buffPool = ZSTDMT_createBufferPool(nbThreads); + cctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads); + if (!cctx->factory | !cctx->buffPool | !cctx->cctxPool) { /* one object was not created */ + ZSTDMT_freeCCtx(cctx); + return NULL; + } + if (nbThreads==1) { + cctx->cstream = ZSTD_createCStream(); + if (!cctx->cstream) { + ZSTDMT_freeCCtx(cctx); return NULL; + } } + pthread_mutex_init(&cctx->jobCompleted_mutex, NULL); /* Todo : check init function return */ + pthread_cond_init(&cctx->jobCompleted_cond, NULL); + DEBUGLOG(4, "mt_cctx created, for %u threads \n", nbThreads); + return cctx; +} + +/* ZSTDMT_releaseAllJobResources() : + * Ensure all workers are killed first. */ +static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) +{ + unsigned jobID; + for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { + ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].dstBuff); + mtctx->jobs[jobID].dstBuff = g_nullBuffer; + ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].src); + mtctx->jobs[jobID].src = g_nullBuffer; + ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[jobID].cctx); + mtctx->jobs[jobID].cctx = NULL; + } + memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription)); + ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer); + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->allJobsCompleted = 1; +} + +size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) +{ + if (mtctx==NULL) return 0; /* compatible with free on NULL */ + POOL_free(mtctx->factory); + if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */ + ZSTDMT_freeBufferPool(mtctx->buffPool); /* release job resources into pools first */ + ZSTDMT_freeCCtxPool(mtctx->cctxPool); + ZSTD_freeCDict(mtctx->cdict); + ZSTD_freeCStream(mtctx->cstream); + pthread_mutex_destroy(&mtctx->jobCompleted_mutex); + pthread_cond_destroy(&mtctx->jobCompleted_cond); + free(mtctx); + return 0; +} + +size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value) +{ + switch(parameter) + { + case ZSTDMT_p_sectionSize : + mtctx->sectionSize = value; + return 0; + case ZSTDMT_p_overlapSectionLog : + DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value); + mtctx->overlapRLog = (value >= 9) ? 0 : 9 - value; + return 0; + default : + return ERROR(compressionParameter_unsupported); + } +} + + +/* ------------------------------------------ */ +/* ===== Multi-threaded compression ===== */ +/* ------------------------------------------ */ + +size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel) +{ + ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0); + size_t const chunkTargetSize = (size_t)1 << (params.cParams.windowLog + 2); + unsigned const nbChunksMax = (unsigned)(srcSize / chunkTargetSize) + (srcSize < chunkTargetSize) /* min 1 */; + unsigned nbChunks = MIN(nbChunksMax, mtctx->nbThreads); + size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks; + size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0xFFFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */ + size_t remainingSrcSize = srcSize; + const char* const srcStart = (const char*)src; + size_t frameStartPos = 0; + + DEBUGLOG(3, "windowLog : %2u => chunkTargetSize : %u bytes ", params.cParams.windowLog, (U32)chunkTargetSize); + DEBUGLOG(2, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize); + params.fParams.contentSizeFlag = 1; + + if (nbChunks==1) { /* fallback to single-thread mode */ + ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; + return ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel); + } + + { unsigned u; + for (u=0; u<nbChunks; u++) { + size_t const chunkSize = MIN(remainingSrcSize, avgChunkSize); + size_t const dstBufferCapacity = u ? ZSTD_compressBound(chunkSize) : dstCapacity; + buffer_t const dstAsBuffer = { dst, dstCapacity }; + buffer_t const dstBuffer = u ? ZSTDMT_getBuffer(mtctx->buffPool, dstBufferCapacity) : dstAsBuffer; + ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(mtctx->cctxPool); + + if ((cctx==NULL) || (dstBuffer.start==NULL)) { + mtctx->jobs[u].cSize = ERROR(memory_allocation); /* job result */ + mtctx->jobs[u].jobCompleted = 1; + nbChunks = u+1; + break; /* let's wait for previous jobs to complete, but don't start new ones */ + } + + mtctx->jobs[u].srcStart = srcStart + frameStartPos; + mtctx->jobs[u].srcSize = chunkSize; + mtctx->jobs[u].fullFrameSize = srcSize; + mtctx->jobs[u].params = params; + mtctx->jobs[u].dstBuff = dstBuffer; + mtctx->jobs[u].cctx = cctx; + mtctx->jobs[u].firstChunk = (u==0); + mtctx->jobs[u].lastChunk = (u==nbChunks-1); + mtctx->jobs[u].jobCompleted = 0; + mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex; + mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond; + + DEBUGLOG(3, "posting job %u (%u bytes)", u, (U32)chunkSize); + DEBUG_PRINTHEX(3, mtctx->jobs[u].srcStart, 12); + POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]); + + frameStartPos += chunkSize; + remainingSrcSize -= chunkSize; + } } + /* note : since nbChunks <= nbThreads, all jobs should be running immediately in parallel */ + + { unsigned chunkID; + size_t error = 0, dstPos = 0; + for (chunkID=0; chunkID<nbChunks; chunkID++) { + DEBUGLOG(3, "waiting for chunk %u ", chunkID); + PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex); + while (mtctx->jobs[chunkID].jobCompleted==0) { + DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", chunkID); + pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex); + } + pthread_mutex_unlock(&mtctx->jobCompleted_mutex); + DEBUGLOG(3, "ready to write chunk %u ", chunkID); + + ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[chunkID].cctx); + mtctx->jobs[chunkID].cctx = NULL; + mtctx->jobs[chunkID].srcStart = NULL; + { size_t const cSize = mtctx->jobs[chunkID].cSize; + if (ZSTD_isError(cSize)) error = cSize; + if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall); + if (chunkID) { /* note : chunk 0 is already written directly into dst */ + if (!error) memcpy((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize); + ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff); + mtctx->jobs[chunkID].dstBuff = g_nullBuffer; + } + dstPos += cSize ; + } + } + if (!error) DEBUGLOG(3, "compressed size : %u ", (U32)dstPos); + return error ? error : dstPos; + } + +} + + +/* ====================================== */ +/* ======= Streaming API ======= */ +/* ====================================== */ + +static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs) { + while (zcs->doneJobID < zcs->nextJobID) { + unsigned const jobID = zcs->doneJobID & zcs->jobIDMask; + PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); + while (zcs->jobs[jobID].jobCompleted==0) { + DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID); /* we want to block when waiting for data to flush */ + pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); + } + pthread_mutex_unlock(&zcs->jobCompleted_mutex); + zcs->doneJobID++; + } +} + + +static size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, + const void* dict, size_t dictSize, unsigned updateDict, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + ZSTD_customMem const cmem = { NULL, NULL, NULL }; + DEBUGLOG(3, "Started new compression, with windowLog : %u", params.cParams.windowLog); + if (zcs->nbThreads==1) return ZSTD_initCStream_advanced(zcs->cstream, dict, dictSize, params, pledgedSrcSize); + if (zcs->allJobsCompleted == 0) { /* previous job not correctly finished */ + ZSTDMT_waitForAllJobsCompleted(zcs); + ZSTDMT_releaseAllJobResources(zcs); + zcs->allJobsCompleted = 1; + } + zcs->params = params; + if (updateDict) { + ZSTD_freeCDict(zcs->cdict); zcs->cdict = NULL; + if (dict && dictSize) { + zcs->cdict = ZSTD_createCDict_advanced(dict, dictSize, 0, params, cmem); + if (zcs->cdict == NULL) return ERROR(memory_allocation); + } } + zcs->frameContentSize = pledgedSrcSize; + zcs->targetDictSize = (zcs->overlapRLog>=9) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - zcs->overlapRLog); + DEBUGLOG(4, "overlapRLog : %u ", zcs->overlapRLog); + DEBUGLOG(3, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10)); + zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2); + zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize); + zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize); + DEBUGLOG(3, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10)); + zcs->marginSize = zcs->targetSectionSize >> 2; + zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize + zcs->marginSize; + zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize); + if (zcs->inBuff.buffer.start == NULL) return ERROR(memory_allocation); + zcs->inBuff.filled = 0; + zcs->dictSize = 0; + zcs->doneJobID = 0; + zcs->nextJobID = 0; + zcs->frameEnded = 0; + zcs->allJobsCompleted = 0; + if (params.fParams.checksumFlag) XXH64_reset(&zcs->xxhState, 0); + return 0; +} + +size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + return ZSTDMT_initCStream_internal(zcs, dict, dictSize, 1, params, pledgedSrcSize); +} + +/* ZSTDMT_resetCStream() : + * pledgedSrcSize is optional and can be zero == unknown */ +size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* zcs, unsigned long long pledgedSrcSize) +{ + if (zcs->nbThreads==1) return ZSTD_resetCStream(zcs->cstream, pledgedSrcSize); + return ZSTDMT_initCStream_internal(zcs, NULL, 0, 0, zcs->params, pledgedSrcSize); +} + +size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) { + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); + return ZSTDMT_initCStream_internal(zcs, NULL, 0, 1, params, 0); +} + + +static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsigned endFrame) +{ + size_t const dstBufferCapacity = ZSTD_compressBound(srcSize); + buffer_t const dstBuffer = ZSTDMT_getBuffer(zcs->buffPool, dstBufferCapacity); + ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(zcs->cctxPool); + unsigned const jobID = zcs->nextJobID & zcs->jobIDMask; + + if ((cctx==NULL) || (dstBuffer.start==NULL)) { + zcs->jobs[jobID].jobCompleted = 1; + zcs->nextJobID++; + ZSTDMT_waitForAllJobsCompleted(zcs); + ZSTDMT_releaseAllJobResources(zcs); + return ERROR(memory_allocation); + } + + DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ", zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize); + zcs->jobs[jobID].src = zcs->inBuff.buffer; + zcs->jobs[jobID].srcStart = zcs->inBuff.buffer.start; + zcs->jobs[jobID].srcSize = srcSize; + zcs->jobs[jobID].dictSize = zcs->dictSize; /* note : zcs->inBuff.filled is presumed >= srcSize + dictSize */ + zcs->jobs[jobID].params = zcs->params; + if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0; /* do not calculate checksum within sections, just keep it in header for first section */ + zcs->jobs[jobID].cdict = zcs->nextJobID==0 ? zcs->cdict : NULL; + zcs->jobs[jobID].fullFrameSize = zcs->frameContentSize; + zcs->jobs[jobID].dstBuff = dstBuffer; + zcs->jobs[jobID].cctx = cctx; + zcs->jobs[jobID].firstChunk = (zcs->nextJobID==0); + zcs->jobs[jobID].lastChunk = endFrame; + zcs->jobs[jobID].jobCompleted = 0; + zcs->jobs[jobID].dstFlushed = 0; + zcs->jobs[jobID].jobCompleted_mutex = &zcs->jobCompleted_mutex; + zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond; + + /* get a new buffer for next input */ + if (!endFrame) { + size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize); + zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize); + if (zcs->inBuff.buffer.start == NULL) { /* not enough memory to allocate next input buffer */ + zcs->jobs[jobID].jobCompleted = 1; + zcs->nextJobID++; + ZSTDMT_waitForAllJobsCompleted(zcs); + ZSTDMT_releaseAllJobResources(zcs); + return ERROR(memory_allocation); + } + DEBUGLOG(5, "inBuff filled to %u", (U32)zcs->inBuff.filled); + zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize; + DEBUGLOG(5, "new job : filled to %u, with %u dict and %u src", (U32)zcs->inBuff.filled, (U32)newDictSize, (U32)(zcs->inBuff.filled - newDictSize)); + memmove(zcs->inBuff.buffer.start, (const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize, zcs->inBuff.filled); + DEBUGLOG(5, "new inBuff pre-filled"); + zcs->dictSize = newDictSize; + } else { + zcs->inBuff.buffer = g_nullBuffer; + zcs->inBuff.filled = 0; + zcs->dictSize = 0; + zcs->frameEnded = 1; + if (zcs->nextJobID == 0) + zcs->params.fParams.checksumFlag = 0; /* single chunk : checksum is calculated directly within worker thread */ + } + + DEBUGLOG(3, "posting job %u : %u bytes (end:%u) (note : doneJob = %u=>%u)", zcs->nextJobID, (U32)zcs->jobs[jobID].srcSize, zcs->jobs[jobID].lastChunk, zcs->doneJobID, zcs->doneJobID & zcs->jobIDMask); + POOL_add(zcs->factory, ZSTDMT_compressChunk, &zcs->jobs[jobID]); /* this call is blocking when thread worker pool is exhausted */ + zcs->nextJobID++; + return 0; +} + + +/* ZSTDMT_flushNextJob() : + * output : will be updated with amount of data flushed . + * blockToFlush : if >0, the function will block and wait if there is no data available to flush . + * @return : amount of data remaining within internal buffer, 1 if unknown but > 0, 0 if no more, or an error code */ +static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned blockToFlush) +{ + unsigned const wJobID = zcs->doneJobID & zcs->jobIDMask; + if (zcs->doneJobID == zcs->nextJobID) return 0; /* all flushed ! */ + PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); + while (zcs->jobs[wJobID].jobCompleted==0) { + DEBUGLOG(5, "waiting for jobCompleted signal from job %u", zcs->doneJobID); + if (!blockToFlush) { pthread_mutex_unlock(&zcs->jobCompleted_mutex); return 0; } /* nothing ready to be flushed => skip */ + pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); /* block when nothing available to flush */ + } + pthread_mutex_unlock(&zcs->jobCompleted_mutex); + /* compression job completed : output can be flushed */ + { ZSTDMT_jobDescription job = zcs->jobs[wJobID]; + if (!job.jobScanned) { + if (ZSTD_isError(job.cSize)) { + DEBUGLOG(5, "compression error detected "); + ZSTDMT_waitForAllJobsCompleted(zcs); + ZSTDMT_releaseAllJobResources(zcs); + return job.cSize; + } + ZSTDMT_releaseCCtx(zcs->cctxPool, job.cctx); + zcs->jobs[wJobID].cctx = NULL; + DEBUGLOG(5, "zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag); + if (zcs->params.fParams.checksumFlag) { + XXH64_update(&zcs->xxhState, (const char*)job.srcStart + job.dictSize, job.srcSize); + if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) { /* write checksum at end of last section */ + U32 const checksum = (U32)XXH64_digest(&zcs->xxhState); + DEBUGLOG(4, "writing checksum : %08X \n", checksum); + MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum); + job.cSize += 4; + zcs->jobs[wJobID].cSize += 4; + } } + ZSTDMT_releaseBuffer(zcs->buffPool, job.src); + zcs->jobs[wJobID].srcStart = NULL; + zcs->jobs[wJobID].src = g_nullBuffer; + zcs->jobs[wJobID].jobScanned = 1; + } + { size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos); + DEBUGLOG(4, "Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID); + memcpy((char*)output->dst + output->pos, (const char*)job.dstBuff.start + job.dstFlushed, toWrite); + output->pos += toWrite; + job.dstFlushed += toWrite; + } + if (job.dstFlushed == job.cSize) { /* output buffer fully flushed => move to next one */ + ZSTDMT_releaseBuffer(zcs->buffPool, job.dstBuff); + zcs->jobs[wJobID].dstBuff = g_nullBuffer; + zcs->jobs[wJobID].jobCompleted = 0; + zcs->doneJobID++; + } else { + zcs->jobs[wJobID].dstFlushed = job.dstFlushed; + } + /* return value : how many bytes left in buffer ; fake it to 1 if unknown but >0 */ + if (job.cSize > job.dstFlushed) return (job.cSize - job.dstFlushed); + if (zcs->doneJobID < zcs->nextJobID) return 1; /* still some buffer to flush */ + zcs->allJobsCompleted = zcs->frameEnded; /* frame completed and entirely flushed */ + return 0; /* everything flushed */ +} } + + +size_t ZSTDMT_compressStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + size_t const newJobThreshold = zcs->dictSize + zcs->targetSectionSize + zcs->marginSize; + if (zcs->frameEnded) return ERROR(stage_wrong); /* current frame being ended. Only flush is allowed. Restart with init */ + if (zcs->nbThreads==1) return ZSTD_compressStream(zcs->cstream, output, input); + + /* fill input buffer */ + { size_t const toLoad = MIN(input->size - input->pos, zcs->inBuffSize - zcs->inBuff.filled); + memcpy((char*)zcs->inBuff.buffer.start + zcs->inBuff.filled, input->src, toLoad); + input->pos += toLoad; + zcs->inBuff.filled += toLoad; + } + + if ( (zcs->inBuff.filled >= newJobThreshold) /* filled enough : let's compress */ + && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) { /* avoid overwriting job round buffer */ + CHECK_F( ZSTDMT_createCompressionJob(zcs, zcs->targetSectionSize, 0) ); + } + + /* check for data to flush */ + CHECK_F( ZSTDMT_flushNextJob(zcs, output, (zcs->inBuff.filled == zcs->inBuffSize)) ); /* block if it wasn't possible to create new job due to saturation */ + + /* recommended next input size : fill current input buffer */ + return zcs->inBuffSize - zcs->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */ +} + + +static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned endFrame) +{ + size_t const srcSize = zcs->inBuff.filled - zcs->dictSize; + + if (srcSize) DEBUGLOG(4, "flushing : %u bytes left to compress", (U32)srcSize); + if ( ((srcSize > 0) || (endFrame && !zcs->frameEnded)) + && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) { + CHECK_F( ZSTDMT_createCompressionJob(zcs, srcSize, endFrame) ); + } + + /* check if there is any data available to flush */ + DEBUGLOG(5, "zcs->doneJobID : %u ; zcs->nextJobID : %u ", zcs->doneJobID, zcs->nextJobID); + return ZSTDMT_flushNextJob(zcs, output, 1); +} + + +size_t ZSTDMT_flushStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output) +{ + if (zcs->nbThreads==1) return ZSTD_flushStream(zcs->cstream, output); + return ZSTDMT_flushStream_internal(zcs, output, 0); +} + +size_t ZSTDMT_endStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output) +{ + if (zcs->nbThreads==1) return ZSTD_endStream(zcs->cstream, output); + return ZSTDMT_flushStream_internal(zcs, output, 1); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.h Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,78 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + #ifndef ZSTDMT_COMPRESS_H + #define ZSTDMT_COMPRESS_H + + #if defined (__cplusplus) + extern "C" { + #endif + + +/* Note : All prototypes defined in this file shall be considered experimental. + * There is no guarantee of API continuity (yet) on any of these prototypes */ + +/* === Dependencies === */ +#include <stddef.h> /* size_t */ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ +#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ + + +/* === Simple one-pass functions === */ + +typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx; +ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads); +ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* cctx); + +ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel); + + +/* === Streaming functions === */ + +ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel); +ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */ + +ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input); + +ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ +ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ + + +/* === Advanced functions and parameters === */ + +#ifndef ZSTDMT_SECTION_SIZE_MIN +# define ZSTDMT_SECTION_SIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */ +#endif + +ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, /**< dict can be released after init, a local copy is preserved within zcs */ + ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */ + +/* ZSDTMT_parameter : + * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */ +typedef enum { + ZSTDMT_p_sectionSize, /* size of input "section". Each section is compressed in parallel. 0 means default, which is dynamically determined within compression functions */ + ZSTDMT_p_overlapSectionLog /* Log of overlapped section; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */ +} ZSDTMT_parameter; + +/* ZSTDMT_setMTCtxParameter() : + * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter. + * The function must be called typically after ZSTD_createCCtx(). + * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions. + * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ +ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value); + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTDMT_COMPRESS_H */
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress.c Sat Mar 11 13:53:14 2017 -0500 @@ -1444,7 +1444,7 @@ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1) if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, dict, dictSize); #endif - ZSTD_decompressBegin_usingDict(dctx, dict, dictSize); + CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); ZSTD_checkContinuity(dctx, dst); return ZSTD_decompressFrame(dctx, dst, dstCapacity, src, srcSize); } @@ -1671,9 +1671,9 @@ } if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); - dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted); - dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted); - dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted); + dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted); + dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted); + dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted); dictPtr += 12; dctx->litEntropy = dctx->fseEntropy = 1; @@ -1713,39 +1713,44 @@ /* ====== ZSTD_DDict ====== */ struct ZSTD_DDict_s { - void* dict; + void* dictBuffer; + const void* dictContent; size_t dictSize; ZSTD_DCtx* refContext; }; /* typedef'd to ZSTD_DDict within "zstd.h" */ -ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_customMem customMem) +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem) { if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; if (!customMem.customAlloc || !customMem.customFree) return NULL; { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); - void* const dictContent = ZSTD_malloc(dictSize, customMem); ZSTD_DCtx* const dctx = ZSTD_createDCtx_advanced(customMem); - if (!dictContent || !ddict || !dctx) { - ZSTD_free(dictContent, customMem); + if (!ddict || !dctx) { ZSTD_free(ddict, customMem); ZSTD_free(dctx, customMem); return NULL; } - if (dictSize) { - memcpy(dictContent, dict, dictSize); + if ((byReference) || (!dict) || (!dictSize)) { + ddict->dictBuffer = NULL; + ddict->dictContent = dict; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, customMem); + if (!internalBuffer) { ZSTD_free(dctx, customMem); ZSTD_free(ddict, customMem); return NULL; } + memcpy(internalBuffer, dict, dictSize); + ddict->dictBuffer = internalBuffer; + ddict->dictContent = internalBuffer; } - { size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, dictContent, dictSize); + { size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, ddict->dictContent, dictSize); if (ZSTD_isError(errorCode)) { - ZSTD_free(dictContent, customMem); + ZSTD_free(ddict->dictBuffer, customMem); ZSTD_free(ddict, customMem); ZSTD_free(dctx, customMem); return NULL; } } - ddict->dict = dictContent; ddict->dictSize = dictSize; ddict->refContext = dctx; return ddict; @@ -1758,15 +1763,27 @@ ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; - return ZSTD_createDDict_advanced(dict, dictSize, allocator); + return ZSTD_createDDict_advanced(dict, dictSize, 0, allocator); } + +/*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, ready to start decompression operation without startup delay. + * Dictionary content is simply referenced, and therefore stays in dictBuffer. + * It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */ +ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + return ZSTD_createDDict_advanced(dictBuffer, dictSize, 1, allocator); +} + + size_t ZSTD_freeDDict(ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = ddict->refContext->customMem; ZSTD_freeDCtx(ddict->refContext); - ZSTD_free(ddict->dict, cMem); + ZSTD_free(ddict->dictBuffer, cMem); ZSTD_free(ddict, cMem); return 0; } @@ -1775,7 +1792,7 @@ size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support sizeof on NULL */ - return sizeof(*ddict) + sizeof(ddict->refContext) + ddict->dictSize; + return sizeof(*ddict) + ZSTD_sizeof_DCtx(ddict->refContext) + (ddict->dictBuffer ? ddict->dictSize : 0) ; } /*! ZSTD_getDictID_fromDict() : @@ -1796,7 +1813,7 @@ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; - return ZSTD_getDictID_fromDict(ddict->dict, ddict->dictSize); + return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); } /*! ZSTD_getDictID_fromFrame() : @@ -1827,7 +1844,7 @@ const ZSTD_DDict* ddict) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1) - if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, ddict->dict, ddict->dictSize); + if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, ddict->dictContent, ddict->dictSize); #endif ZSTD_refDCtx(dctx, ddict->refContext); ZSTD_checkContinuity(dctx, dst); @@ -1919,7 +1936,7 @@ zds->stage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; ZSTD_freeDDict(zds->ddictLocal); - if (dict) { + if (dict && dictSize >= 8) { zds->ddictLocal = ZSTD_createDDict(dict, dictSize); if (zds->ddictLocal == NULL) return ERROR(memory_allocation); } else zds->ddictLocal = NULL; @@ -1956,7 +1973,7 @@ switch(paramType) { default : return ERROR(parameter_unknown); - case ZSTDdsp_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break; + case DStream_p_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break; } return 0; } @@ -2007,7 +2024,7 @@ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) { U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); if (legacyVersion) { - const void* const dict = zds->ddict ? zds->ddict->dict : NULL; + const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL; size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0; CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion, dict, dictSize));
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/python-zstandard/zstd/dictBuilder/cover.c Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,1021 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +/*-************************************* +* Dependencies +***************************************/ +#include <stdio.h> /* fprintf */ +#include <stdlib.h> /* malloc, free, qsort */ +#include <string.h> /* memset */ +#include <time.h> /* clock */ + +#include "mem.h" /* read */ +#include "pool.h" +#include "threading.h" +#include "zstd_internal.h" /* includes zstd.h */ +#ifndef ZDICT_STATIC_LINKING_ONLY +#define ZDICT_STATIC_LINKING_ONLY +#endif +#include "zdict.h" + +/*-************************************* +* Constants +***************************************/ +#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) + +/*-************************************* +* Console display +***************************************/ +static int g_displayLevel = 2; +#define DISPLAY(...) \ + { \ + fprintf(stderr, __VA_ARGS__); \ + fflush(stderr); \ + } +#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ + if (displayLevel >= l) { \ + DISPLAY(__VA_ARGS__); \ + } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ +#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) + +#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ + if (displayLevel >= l) { \ + if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ + g_time = clock(); \ + DISPLAY(__VA_ARGS__); \ + if (displayLevel >= 4) \ + fflush(stdout); \ + } \ + } +#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) +static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; +static clock_t g_time = 0; + +/*-************************************* +* Hash table +*************************************** +* A small specialized hash map for storing activeDmers. +* The map does not resize, so if it becomes full it will loop forever. +* Thus, the map must be large enough to store every value. +* The map implements linear probing and keeps its load less than 0.5. +*/ + +#define MAP_EMPTY_VALUE ((U32)-1) +typedef struct COVER_map_pair_t_s { + U32 key; + U32 value; +} COVER_map_pair_t; + +typedef struct COVER_map_s { + COVER_map_pair_t *data; + U32 sizeLog; + U32 size; + U32 sizeMask; +} COVER_map_t; + +/** + * Clear the map. + */ +static void COVER_map_clear(COVER_map_t *map) { + memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t)); +} + +/** + * Initializes a map of the given size. + * Returns 1 on success and 0 on failure. + * The map must be destroyed with COVER_map_destroy(). + * The map is only guaranteed to be large enough to hold size elements. + */ +static int COVER_map_init(COVER_map_t *map, U32 size) { + map->sizeLog = ZSTD_highbit32(size) + 2; + map->size = (U32)1 << map->sizeLog; + map->sizeMask = map->size - 1; + map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t)); + if (!map->data) { + map->sizeLog = 0; + map->size = 0; + return 0; + } + COVER_map_clear(map); + return 1; +} + +/** + * Internal hash function + */ +static const U32 prime4bytes = 2654435761U; +static U32 COVER_map_hash(COVER_map_t *map, U32 key) { + return (key * prime4bytes) >> (32 - map->sizeLog); +} + +/** + * Helper function that returns the index that a key should be placed into. + */ +static U32 COVER_map_index(COVER_map_t *map, U32 key) { + const U32 hash = COVER_map_hash(map, key); + U32 i; + for (i = hash;; i = (i + 1) & map->sizeMask) { + COVER_map_pair_t *pos = &map->data[i]; + if (pos->value == MAP_EMPTY_VALUE) { + return i; + } + if (pos->key == key) { + return i; + } + } +} + +/** + * Returns the pointer to the value for key. + * If key is not in the map, it is inserted and the value is set to 0. + * The map must not be full. + */ +static U32 *COVER_map_at(COVER_map_t *map, U32 key) { + COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)]; + if (pos->value == MAP_EMPTY_VALUE) { + pos->key = key; + pos->value = 0; + } + return &pos->value; +} + +/** + * Deletes key from the map if present. + */ +static void COVER_map_remove(COVER_map_t *map, U32 key) { + U32 i = COVER_map_index(map, key); + COVER_map_pair_t *del = &map->data[i]; + U32 shift = 1; + if (del->value == MAP_EMPTY_VALUE) { + return; + } + for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) { + COVER_map_pair_t *const pos = &map->data[i]; + /* If the position is empty we are done */ + if (pos->value == MAP_EMPTY_VALUE) { + del->value = MAP_EMPTY_VALUE; + return; + } + /* If pos can be moved to del do so */ + if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) { + del->key = pos->key; + del->value = pos->value; + del = pos; + shift = 1; + } else { + ++shift; + } + } +} + +/** + * Destroyes a map that is inited with COVER_map_init(). + */ +static void COVER_map_destroy(COVER_map_t *map) { + if (map->data) { + free(map->data); + } + map->data = NULL; + map->size = 0; +} + +/*-************************************* +* Context +***************************************/ + +typedef struct { + const BYTE *samples; + size_t *offsets; + const size_t *samplesSizes; + size_t nbSamples; + U32 *suffix; + size_t suffixSize; + U32 *freqs; + U32 *dmerAt; + unsigned d; +} COVER_ctx_t; + +/* We need a global context for qsort... */ +static COVER_ctx_t *g_ctx = NULL; + +/*-************************************* +* Helper functions +***************************************/ + +/** + * Returns the sum of the sample sizes. + */ +static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { + size_t sum = 0; + size_t i; + for (i = 0; i < nbSamples; ++i) { + sum += samplesSizes[i]; + } + return sum; +} + +/** + * Returns -1 if the dmer at lp is less than the dmer at rp. + * Return 0 if the dmers at lp and rp are equal. + * Returns 1 if the dmer at lp is greater than the dmer at rp. + */ +static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) { + const U32 lhs = *(const U32 *)lp; + const U32 rhs = *(const U32 *)rp; + return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d); +} + +/** + * Same as COVER_cmp() except ties are broken by pointer value + * NOTE: g_ctx must be set to call this function. A global is required because + * qsort doesn't take an opaque pointer. + */ +static int COVER_strict_cmp(const void *lp, const void *rp) { + int result = COVER_cmp(g_ctx, lp, rp); + if (result == 0) { + result = lp < rp ? -1 : 1; + } + return result; +} + +/** + * Returns the first pointer in [first, last) whose element does not compare + * less than value. If no such element exists it returns last. + */ +static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, + size_t value) { + size_t count = last - first; + while (count != 0) { + size_t step = count / 2; + const size_t *ptr = first; + ptr += step; + if (*ptr < value) { + first = ++ptr; + count -= step + 1; + } else { + count = step; + } + } + return first; +} + +/** + * Generic groupBy function. + * Groups an array sorted by cmp into groups with equivalent values. + * Calls grp for each group. + */ +static void +COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx, + int (*cmp)(COVER_ctx_t *, const void *, const void *), + void (*grp)(COVER_ctx_t *, const void *, const void *)) { + const BYTE *ptr = (const BYTE *)data; + size_t num = 0; + while (num < count) { + const BYTE *grpEnd = ptr + size; + ++num; + while (num < count && cmp(ctx, ptr, grpEnd) == 0) { + grpEnd += size; + ++num; + } + grp(ctx, ptr, grpEnd); + ptr = grpEnd; + } +} + +/*-************************************* +* Cover functions +***************************************/ + +/** + * Called on each group of positions with the same dmer. + * Counts the frequency of each dmer and saves it in the suffix array. + * Fills `ctx->dmerAt`. + */ +static void COVER_group(COVER_ctx_t *ctx, const void *group, + const void *groupEnd) { + /* The group consists of all the positions with the same first d bytes. */ + const U32 *grpPtr = (const U32 *)group; + const U32 *grpEnd = (const U32 *)groupEnd; + /* The dmerId is how we will reference this dmer. + * This allows us to map the whole dmer space to a much smaller space, the + * size of the suffix array. + */ + const U32 dmerId = (U32)(grpPtr - ctx->suffix); + /* Count the number of samples this dmer shows up in */ + U32 freq = 0; + /* Details */ + const size_t *curOffsetPtr = ctx->offsets; + const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples; + /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a + * different sample than the last. + */ + size_t curSampleEnd = ctx->offsets[0]; + for (; grpPtr != grpEnd; ++grpPtr) { + /* Save the dmerId for this position so we can get back to it. */ + ctx->dmerAt[*grpPtr] = dmerId; + /* Dictionaries only help for the first reference to the dmer. + * After that zstd can reference the match from the previous reference. + * So only count each dmer once for each sample it is in. + */ + if (*grpPtr < curSampleEnd) { + continue; + } + freq += 1; + /* Binary search to find the end of the sample *grpPtr is in. + * In the common case that grpPtr + 1 == grpEnd we can skip the binary + * search because the loop is over. + */ + if (grpPtr + 1 != grpEnd) { + const size_t *sampleEndPtr = + COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr); + curSampleEnd = *sampleEndPtr; + curOffsetPtr = sampleEndPtr + 1; + } + } + /* At this point we are never going to look at this segment of the suffix + * array again. We take advantage of this fact to save memory. + * We store the frequency of the dmer in the first position of the group, + * which is dmerId. + */ + ctx->suffix[dmerId] = freq; +} + +/** + * A segment is a range in the source as well as the score of the segment. + */ +typedef struct { + U32 begin; + U32 end; + double score; +} COVER_segment_t; + +/** + * Selects the best segment in an epoch. + * Segments of are scored according to the function: + * + * Let F(d) be the frequency of dmer d. + * Let S_i be the dmer at position i of segment S which has length k. + * + * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) + * + * Once the dmer d is in the dictionay we set F(d) = 0. + */ +static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs, + COVER_map_t *activeDmers, U32 begin, + U32 end, COVER_params_t parameters) { + /* Constants */ + const U32 k = parameters.k; + const U32 d = parameters.d; + const U32 dmersInK = k - d + 1; + /* Try each segment (activeSegment) and save the best (bestSegment) */ + COVER_segment_t bestSegment = {0, 0, 0}; + COVER_segment_t activeSegment; + /* Reset the activeDmers in the segment */ + COVER_map_clear(activeDmers); + /* The activeSegment starts at the beginning of the epoch. */ + activeSegment.begin = begin; + activeSegment.end = begin; + activeSegment.score = 0; + /* Slide the activeSegment through the whole epoch. + * Save the best segment in bestSegment. + */ + while (activeSegment.end < end) { + /* The dmerId for the dmer at the next position */ + U32 newDmer = ctx->dmerAt[activeSegment.end]; + /* The entry in activeDmers for this dmerId */ + U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer); + /* If the dmer isn't already present in the segment add its score. */ + if (*newDmerOcc == 0) { + /* The paper suggest using the L-0.5 norm, but experiments show that it + * doesn't help. + */ + activeSegment.score += freqs[newDmer]; + } + /* Add the dmer to the segment */ + activeSegment.end += 1; + *newDmerOcc += 1; + + /* If the window is now too large, drop the first position */ + if (activeSegment.end - activeSegment.begin == dmersInK + 1) { + U32 delDmer = ctx->dmerAt[activeSegment.begin]; + U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer); + activeSegment.begin += 1; + *delDmerOcc -= 1; + /* If this is the last occurence of the dmer, subtract its score */ + if (*delDmerOcc == 0) { + COVER_map_remove(activeDmers, delDmer); + activeSegment.score -= freqs[delDmer]; + } + } + + /* If this segment is the best so far save it */ + if (activeSegment.score > bestSegment.score) { + bestSegment = activeSegment; + } + } + { + /* Trim off the zero frequency head and tail from the segment. */ + U32 newBegin = bestSegment.end; + U32 newEnd = bestSegment.begin; + U32 pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { + U32 freq = freqs[ctx->dmerAt[pos]]; + if (freq != 0) { + newBegin = MIN(newBegin, pos); + newEnd = pos + 1; + } + } + bestSegment.begin = newBegin; + bestSegment.end = newEnd; + } + { + /* Zero out the frequency of each dmer covered by the chosen segment. */ + U32 pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { + freqs[ctx->dmerAt[pos]] = 0; + } + } + return bestSegment; +} + +/** + * Check the validity of the parameters. + * Returns non-zero if the parameters are valid and 0 otherwise. + */ +static int COVER_checkParameters(COVER_params_t parameters) { + /* k and d are required parameters */ + if (parameters.d == 0 || parameters.k == 0) { + return 0; + } + /* d <= k */ + if (parameters.d > parameters.k) { + return 0; + } + return 1; +} + +/** + * Clean up a context initialized with `COVER_ctx_init()`. + */ +static void COVER_ctx_destroy(COVER_ctx_t *ctx) { + if (!ctx) { + return; + } + if (ctx->suffix) { + free(ctx->suffix); + ctx->suffix = NULL; + } + if (ctx->freqs) { + free(ctx->freqs); + ctx->freqs = NULL; + } + if (ctx->dmerAt) { + free(ctx->dmerAt); + ctx->dmerAt = NULL; + } + if (ctx->offsets) { + free(ctx->offsets); + ctx->offsets = NULL; + } +} + +/** + * Prepare a context for dictionary building. + * The context is only dependent on the parameter `d` and can used multiple + * times. + * Returns 1 on success or zero on error. + * The context must be destroyed with `COVER_ctx_destroy()`. + */ +static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, + const size_t *samplesSizes, unsigned nbSamples, + unsigned d) { + const BYTE *const samples = (const BYTE *)samplesBuffer; + const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); + /* Checks */ + if (totalSamplesSize < d || + totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { + DISPLAYLEVEL(1, "Total samples size is too large, maximum size is %u MB\n", + (COVER_MAX_SAMPLES_SIZE >> 20)); + return 0; + } + /* Zero the context */ + memset(ctx, 0, sizeof(*ctx)); + DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples, + (U32)totalSamplesSize); + ctx->samples = samples; + ctx->samplesSizes = samplesSizes; + ctx->nbSamples = nbSamples; + /* Partial suffix array */ + ctx->suffixSize = totalSamplesSize - d + 1; + ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + /* Maps index to the dmerID */ + ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + /* The offsets of each file */ + ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); + if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) { + DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); + COVER_ctx_destroy(ctx); + return 0; + } + ctx->freqs = NULL; + ctx->d = d; + + /* Fill offsets from the samlesSizes */ + { + U32 i; + ctx->offsets[0] = 0; + for (i = 1; i <= nbSamples; ++i) { + ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; + } + } + DISPLAYLEVEL(2, "Constructing partial suffix array\n"); + { + /* suffix is a partial suffix array. + * It only sorts suffixes by their first parameters.d bytes. + * The sort is stable, so each dmer group is sorted by position in input. + */ + U32 i; + for (i = 0; i < ctx->suffixSize; ++i) { + ctx->suffix[i] = i; + } + /* qsort doesn't take an opaque pointer, so pass as a global */ + g_ctx = ctx; + qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), &COVER_strict_cmp); + } + DISPLAYLEVEL(2, "Computing frequencies\n"); + /* For each dmer group (group of positions with the same first d bytes): + * 1. For each position we set dmerAt[position] = dmerID. The dmerID is + * (groupBeginPtr - suffix). This allows us to go from position to + * dmerID so we can look up values in freq. + * 2. We calculate how many samples the dmer occurs in and save it in + * freqs[dmerId]. + */ + COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, &COVER_cmp, + &COVER_group); + ctx->freqs = ctx->suffix; + ctx->suffix = NULL; + return 1; +} + +/** + * Given the prepared context build the dictionary. + */ +static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, + COVER_map_t *activeDmers, void *dictBuffer, + size_t dictBufferCapacity, + COVER_params_t parameters) { + BYTE *const dict = (BYTE *)dictBuffer; + size_t tail = dictBufferCapacity; + /* Divide the data up into epochs of equal size. + * We will select at least one segment from each epoch. + */ + const U32 epochs = (U32)(dictBufferCapacity / parameters.k); + const U32 epochSize = (U32)(ctx->suffixSize / epochs); + size_t epoch; + DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, + epochSize); + /* Loop through the epochs until there are no more segments or the dictionary + * is full. + */ + for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) { + const U32 epochBegin = (U32)(epoch * epochSize); + const U32 epochEnd = epochBegin + epochSize; + size_t segmentSize; + /* Select a segment */ + COVER_segment_t segment = COVER_selectSegment( + ctx, freqs, activeDmers, epochBegin, epochEnd, parameters); + /* Trim the segment if necessary and if it is empty then we are done */ + segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); + if (segmentSize == 0) { + break; + } + /* We fill the dictionary from the back to allow the best segments to be + * referenced with the smallest offsets. + */ + tail -= segmentSize; + memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); + DISPLAYUPDATE( + 2, "\r%u%% ", + (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); + } + DISPLAYLEVEL(2, "\r%79s\r", ""); + return tail; +} + +/** + * Translate from COVER_params_t to ZDICT_params_t required for finalizing the + * dictionary. + */ +static ZDICT_params_t COVER_translateParams(COVER_params_t parameters) { + ZDICT_params_t zdictParams; + memset(&zdictParams, 0, sizeof(zdictParams)); + zdictParams.notificationLevel = 1; + zdictParams.dictID = parameters.dictID; + zdictParams.compressionLevel = parameters.compressionLevel; + return zdictParams; +} + +/** + * Constructs a dictionary using a heuristic based on the following paper: + * + * Liao, Petri, Moffat, Wirth + * Effective Construction of Relative Lempel-Ziv Dictionaries + * Published in WWW 2016. + */ +ZDICTLIB_API size_t COVER_trainFromBuffer( + void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, + const size_t *samplesSizes, unsigned nbSamples, COVER_params_t parameters) { + BYTE *const dict = (BYTE *)dictBuffer; + COVER_ctx_t ctx; + COVER_map_t activeDmers; + /* Checks */ + if (!COVER_checkParameters(parameters)) { + DISPLAYLEVEL(1, "Cover parameters incorrect\n"); + return ERROR(GENERIC); + } + if (nbSamples == 0) { + DISPLAYLEVEL(1, "Cover must have at least one input file\n"); + return ERROR(GENERIC); + } + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", + ZDICT_DICTSIZE_MIN); + return ERROR(dstSize_tooSmall); + } + /* Initialize global data */ + g_displayLevel = parameters.notificationLevel; + /* Initialize context and activeDmers */ + if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, + parameters.d)) { + return ERROR(GENERIC); + } + if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { + DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); + COVER_ctx_destroy(&ctx); + return ERROR(GENERIC); + } + + DISPLAYLEVEL(2, "Building dictionary\n"); + { + const size_t tail = + COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer, + dictBufferCapacity, parameters); + ZDICT_params_t zdictParams = COVER_translateParams(parameters); + const size_t dictionarySize = ZDICT_finalizeDictionary( + dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, + samplesBuffer, samplesSizes, nbSamples, zdictParams); + if (!ZSTD_isError(dictionarySize)) { + DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", + (U32)dictionarySize); + } + COVER_ctx_destroy(&ctx); + COVER_map_destroy(&activeDmers); + return dictionarySize; + } +} + +/** + * COVER_best_t is used for two purposes: + * 1. Synchronizing threads. + * 2. Saving the best parameters and dictionary. + * + * All of the methods except COVER_best_init() are thread safe if zstd is + * compiled with multithreaded support. + */ +typedef struct COVER_best_s { + pthread_mutex_t mutex; + pthread_cond_t cond; + size_t liveJobs; + void *dict; + size_t dictSize; + COVER_params_t parameters; + size_t compressedSize; +} COVER_best_t; + +/** + * Initialize the `COVER_best_t`. + */ +static void COVER_best_init(COVER_best_t *best) { + if (!best) { + return; + } + pthread_mutex_init(&best->mutex, NULL); + pthread_cond_init(&best->cond, NULL); + best->liveJobs = 0; + best->dict = NULL; + best->dictSize = 0; + best->compressedSize = (size_t)-1; + memset(&best->parameters, 0, sizeof(best->parameters)); +} + +/** + * Wait until liveJobs == 0. + */ +static void COVER_best_wait(COVER_best_t *best) { + if (!best) { + return; + } + pthread_mutex_lock(&best->mutex); + while (best->liveJobs != 0) { + pthread_cond_wait(&best->cond, &best->mutex); + } + pthread_mutex_unlock(&best->mutex); +} + +/** + * Call COVER_best_wait() and then destroy the COVER_best_t. + */ +static void COVER_best_destroy(COVER_best_t *best) { + if (!best) { + return; + } + COVER_best_wait(best); + if (best->dict) { + free(best->dict); + } + pthread_mutex_destroy(&best->mutex); + pthread_cond_destroy(&best->cond); +} + +/** + * Called when a thread is about to be launched. + * Increments liveJobs. + */ +static void COVER_best_start(COVER_best_t *best) { + if (!best) { + return; + } + pthread_mutex_lock(&best->mutex); + ++best->liveJobs; + pthread_mutex_unlock(&best->mutex); +} + +/** + * Called when a thread finishes executing, both on error or success. + * Decrements liveJobs and signals any waiting threads if liveJobs == 0. + * If this dictionary is the best so far save it and its parameters. + */ +static void COVER_best_finish(COVER_best_t *best, size_t compressedSize, + COVER_params_t parameters, void *dict, + size_t dictSize) { + if (!best) { + return; + } + { + size_t liveJobs; + pthread_mutex_lock(&best->mutex); + --best->liveJobs; + liveJobs = best->liveJobs; + /* If the new dictionary is better */ + if (compressedSize < best->compressedSize) { + /* Allocate space if necessary */ + if (!best->dict || best->dictSize < dictSize) { + if (best->dict) { + free(best->dict); + } + best->dict = malloc(dictSize); + if (!best->dict) { + best->compressedSize = ERROR(GENERIC); + best->dictSize = 0; + return; + } + } + /* Save the dictionary, parameters, and size */ + memcpy(best->dict, dict, dictSize); + best->dictSize = dictSize; + best->parameters = parameters; + best->compressedSize = compressedSize; + } + pthread_mutex_unlock(&best->mutex); + if (liveJobs == 0) { + pthread_cond_broadcast(&best->cond); + } + } +} + +/** + * Parameters for COVER_tryParameters(). + */ +typedef struct COVER_tryParameters_data_s { + const COVER_ctx_t *ctx; + COVER_best_t *best; + size_t dictBufferCapacity; + COVER_params_t parameters; +} COVER_tryParameters_data_t; + +/** + * Tries a set of parameters and upates the COVER_best_t with the results. + * This function is thread safe if zstd is compiled with multithreaded support. + * It takes its parameters as an *OWNING* opaque pointer to support threading. + */ +static void COVER_tryParameters(void *opaque) { + /* Save parameters as local variables */ + COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque; + const COVER_ctx_t *const ctx = data->ctx; + const COVER_params_t parameters = data->parameters; + size_t dictBufferCapacity = data->dictBufferCapacity; + size_t totalCompressedSize = ERROR(GENERIC); + /* Allocate space for hash table, dict, and freqs */ + COVER_map_t activeDmers; + BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); + U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { + DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); + goto _cleanup; + } + if (!dict || !freqs) { + DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); + goto _cleanup; + } + /* Copy the frequencies because we need to modify them */ + memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32)); + /* Build the dictionary */ + { + const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, + dictBufferCapacity, parameters); + const ZDICT_params_t zdictParams = COVER_translateParams(parameters); + dictBufferCapacity = ZDICT_finalizeDictionary( + dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, + ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, zdictParams); + if (ZDICT_isError(dictBufferCapacity)) { + DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); + goto _cleanup; + } + } + /* Check total compressed size */ + { + /* Pointers */ + ZSTD_CCtx *cctx; + ZSTD_CDict *cdict; + void *dst; + /* Local variables */ + size_t dstCapacity; + size_t i; + /* Allocate dst with enough space to compress the maximum sized sample */ + { + size_t maxSampleSize = 0; + for (i = 0; i < ctx->nbSamples; ++i) { + maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize); + } + dstCapacity = ZSTD_compressBound(maxSampleSize); + dst = malloc(dstCapacity); + } + /* Create the cctx and cdict */ + cctx = ZSTD_createCCtx(); + cdict = + ZSTD_createCDict(dict, dictBufferCapacity, parameters.compressionLevel); + if (!dst || !cctx || !cdict) { + goto _compressCleanup; + } + /* Compress each sample and sum their sizes (or error) */ + totalCompressedSize = 0; + for (i = 0; i < ctx->nbSamples; ++i) { + const size_t size = ZSTD_compress_usingCDict( + cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i], + ctx->samplesSizes[i], cdict); + if (ZSTD_isError(size)) { + totalCompressedSize = ERROR(GENERIC); + goto _compressCleanup; + } + totalCompressedSize += size; + } + _compressCleanup: + ZSTD_freeCCtx(cctx); + ZSTD_freeCDict(cdict); + if (dst) { + free(dst); + } + } + +_cleanup: + COVER_best_finish(data->best, totalCompressedSize, parameters, dict, + dictBufferCapacity); + free(data); + COVER_map_destroy(&activeDmers); + if (dict) { + free(dict); + } + if (freqs) { + free(freqs); + } +} + +ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void *dictBuffer, + size_t dictBufferCapacity, + const void *samplesBuffer, + const size_t *samplesSizes, + unsigned nbSamples, + COVER_params_t *parameters) { + /* constants */ + const unsigned nbThreads = parameters->nbThreads; + const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; + const unsigned kMaxD = parameters->d == 0 ? 16 : parameters->d; + const unsigned kMinK = parameters->k == 0 ? kMaxD : parameters->k; + const unsigned kMaxK = parameters->k == 0 ? 2048 : parameters->k; + const unsigned kSteps = parameters->steps == 0 ? 32 : parameters->steps; + const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); + const unsigned kIterations = + (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); + /* Local variables */ + const int displayLevel = parameters->notificationLevel; + unsigned iteration = 1; + unsigned d; + unsigned k; + COVER_best_t best; + POOL_ctx *pool = NULL; + /* Checks */ + if (kMinK < kMaxD || kMaxK < kMinK) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); + return ERROR(GENERIC); + } + if (nbSamples == 0) { + DISPLAYLEVEL(1, "Cover must have at least one input file\n"); + return ERROR(GENERIC); + } + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", + ZDICT_DICTSIZE_MIN); + return ERROR(dstSize_tooSmall); + } + if (nbThreads > 1) { + pool = POOL_create(nbThreads, 1); + if (!pool) { + return ERROR(memory_allocation); + } + } + /* Initialization */ + COVER_best_init(&best); + /* Turn down global display level to clean up display at level 2 and below */ + g_displayLevel = parameters->notificationLevel - 1; + /* Loop through d first because each new value needs a new context */ + LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", + kIterations); + for (d = kMinD; d <= kMaxD; d += 2) { + /* Initialize the context for this value of d */ + COVER_ctx_t ctx; + LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); + if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); + COVER_best_destroy(&best); + return ERROR(GENERIC); + } + /* Loop through k reusing the same context */ + for (k = kMinK; k <= kMaxK; k += kStepSize) { + /* Prepare the arguments */ + COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( + sizeof(COVER_tryParameters_data_t)); + LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); + if (!data) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); + COVER_best_destroy(&best); + COVER_ctx_destroy(&ctx); + return ERROR(GENERIC); + } + data->ctx = &ctx; + data->best = &best; + data->dictBufferCapacity = dictBufferCapacity; + data->parameters = *parameters; + data->parameters.k = k; + data->parameters.d = d; + data->parameters.steps = kSteps; + /* Check the parameters */ + if (!COVER_checkParameters(data->parameters)) { + DISPLAYLEVEL(1, "Cover parameters incorrect\n"); + continue; + } + /* Call the function and pass ownership of data to it */ + COVER_best_start(&best); + if (pool) { + POOL_add(pool, &COVER_tryParameters, data); + } else { + COVER_tryParameters(data); + } + /* Print status */ + LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", + (U32)((iteration * 100) / kIterations)); + ++iteration; + } + COVER_best_wait(&best); + COVER_ctx_destroy(&ctx); + } + LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); + /* Fill the output buffer and parameters with output of the best parameters */ + { + const size_t dictSize = best.dictSize; + if (ZSTD_isError(best.compressedSize)) { + COVER_best_destroy(&best); + return best.compressedSize; + } + *parameters = best.parameters; + memcpy(dictBuffer, best.dict, dictSize); + COVER_best_destroy(&best); + POOL_free(pool); + return dictSize; + } +}
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.c Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.c Sat Mar 11 13:53:14 2017 -0500 @@ -36,12 +36,11 @@ #include <time.h> /* clock */ #include "mem.h" /* read */ -#include "error_private.h" #include "fse.h" /* FSE_normalizeCount, FSE_writeNCount */ #define HUF_STATIC_LINKING_ONLY -#include "huf.h" +#include "huf.h" /* HUF_buildCTable, HUF_writeCTable */ #include "zstd_internal.h" /* includes zstd.h */ -#include "xxhash.h" +#include "xxhash.h" /* XXH64 */ #include "divsufsort.h" #ifndef ZDICT_STATIC_LINKING_ONLY # define ZDICT_STATIC_LINKING_ONLY @@ -61,7 +60,7 @@ #define NOISELENGTH 32 #define MINRATIO 4 -static const int g_compressionLevel_default = 5; +static const int g_compressionLevel_default = 6; static const U32 g_selectivity_default = 9; static const size_t g_provision_entropySize = 200; static const size_t g_min_fast_dictContent = 192; @@ -307,13 +306,13 @@ } while (length >=MINMATCHLENGTH); /* look backward */ - length = MINMATCHLENGTH; - while ((length >= MINMATCHLENGTH) & (start > 0)) { - length = ZDICT_count(b + pos, b + suffix[start - 1]); - if (length >= LLIMIT) length = LLIMIT - 1; - lengthList[length]++; - if (length >= MINMATCHLENGTH) start--; - } + length = MINMATCHLENGTH; + while ((length >= MINMATCHLENGTH) & (start > 0)) { + length = ZDICT_count(b + pos, b + suffix[start - 1]); + if (length >= LLIMIT) length = LLIMIT - 1; + lengthList[length]++; + if (length >= MINMATCHLENGTH) start--; + } /* largest useful length */ memset(cumulLength, 0, sizeof(cumulLength)); @@ -570,7 +569,7 @@ if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; } } cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_ABSOLUTEMAX, src, srcSize); - if (ZSTD_isError(cSize)) { DISPLAYLEVEL(1, "warning : could not compress sample size %u \n", (U32)srcSize); return; } + if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; } if (cSize) { /* if == 0; block is not compressible */ const seqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc); @@ -825,6 +824,55 @@ } + +size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, + const void* customDictContent, size_t dictContentSize, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_params_t params) +{ + size_t hSize; +#define HBUFFSIZE 256 + BYTE header[HBUFFSIZE]; + int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel; + U32 const notificationLevel = params.notificationLevel; + + /* check conditions */ + if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall); + if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong); + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall); + + /* dictionary header */ + MEM_writeLE32(header, ZSTD_DICT_MAGIC); + { U64 const randomID = XXH64(customDictContent, dictContentSize, 0); + U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768; + U32 const dictID = params.dictID ? params.dictID : compliantID; + MEM_writeLE32(header+4, dictID); + } + hSize = 8; + + /* entropy tables */ + DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ + DISPLAYLEVEL(2, "statistics ... \n"); + { size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize, + compressionLevel, + samplesBuffer, samplesSizes, nbSamples, + customDictContent, dictContentSize, + notificationLevel); + if (ZDICT_isError(eSize)) return eSize; + hSize += eSize; + } + + /* copy elements in final buffer ; note : src and dst buffer can overlap */ + if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize; + { size_t const dictSize = hSize + dictContentSize; + char* dictEnd = (char*)dictBuffer + dictSize; + memmove(dictEnd - dictContentSize, customDictContent, dictContentSize); + memcpy(dictBuffer, header, hSize); + return dictSize; + } +} + + size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t params)
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.h Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.h Sat Mar 11 13:53:14 2017 -0500 @@ -19,15 +19,18 @@ #include <stddef.h> /* size_t */ -/*====== Export for Windows ======*/ -/*! -* ZSTD_DLL_EXPORT : -* Enable exporting of functions when building a Windows DLL -*/ -#if defined(_WIN32) && defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZDICTLIB_API __declspec(dllexport) +/* ===== ZDICTLIB_API : control library symbols visibility ===== */ +#if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) #else -# define ZDICTLIB_API +# define ZDICTLIB_VISIBILITY +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZDICTLIB_API ZDICTLIB_VISIBILITY #endif @@ -79,27 +82,114 @@ or an error code, which can be tested by ZDICT_isError(). note : ZDICT_trainFromBuffer_advanced() will send notifications into stderr if instructed to, using notificationLevel>0. */ -size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity, +ZDICTLIB_API size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_params_t parameters); + +/*! COVER_params_t : + For all values 0 means default. + kMin and d are the only required parameters. +*/ +typedef struct { + unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ + unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ + unsigned steps; /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */ + + unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ + unsigned notificationLevel; /* Write to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + unsigned dictID; /* 0 means auto mode (32-bits random value); other : force dictID value */ + int compressionLevel; /* 0 means default; target a specific zstd compression level */ +} COVER_params_t; + + +/*! COVER_trainFromBuffer() : + Train a dictionary from an array of samples using the COVER algorithm. + Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + The resulting dictionary will be saved into `dictBuffer`. + @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + or an error code, which can be tested with ZDICT_isError(). + Note : COVER_trainFromBuffer() requires about 9 bytes of memory for each input byte. + Tips : In general, a reasonable dictionary has a size of ~ 100 KB. + It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`. + In general, it's recommended to provide a few thousands samples, but this can vary a lot. + It's recommended that total size of all samples be about ~x100 times the target size of dictionary. +*/ +ZDICTLIB_API size_t COVER_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + COVER_params_t parameters); + +/*! COVER_optimizeTrainFromBuffer() : + The same requirements as above hold for all the parameters except `parameters`. + This function tries many parameter combinations and picks the best parameters. + `*parameters` is filled with the best parameters found, and the dictionary + constructed with those parameters is stored in `dictBuffer`. + + All of the parameters d, k, steps are optional. + If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}. + if steps is zero it defaults to its default value. + If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048]. + + @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + or an error code, which can be tested with ZDICT_isError(). + On success `*parameters` contains the parameters selected. + Note : COVER_optimizeTrainFromBuffer() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. +*/ +ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, + COVER_params_t *parameters); + +/*! ZDICT_finalizeDictionary() : + + Given a custom content as a basis for dictionary, and a set of samples, + finalize dictionary by adding headers and statistics. + + Samples must be stored concatenated in a flat buffer `samplesBuffer`, + supplied with an array of sizes `samplesSizes`, providing the size of each sample in order. + + dictContentSize must be > ZDICT_CONTENTSIZE_MIN bytes. + maxDictSize must be >= dictContentSize, and must be > ZDICT_DICTSIZE_MIN bytes. + + @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`), + or an error code, which can be tested by ZDICT_isError(). + note : ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0. + note 2 : dictBuffer and customDictContent can overlap +*/ +#define ZDICT_CONTENTSIZE_MIN 256 +#define ZDICT_DICTSIZE_MIN 512 +ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, + const void* customDictContent, size_t dictContentSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t parameters); -/*! ZDICT_addEntropyTablesFromBuffer() : - - Given a content-only dictionary (built using any 3rd party algorithm), - add entropy tables computed from an array of samples. - Samples must be stored concatenated in a flat buffer `samplesBuffer`, - supplied with an array of sizes `samplesSizes`, providing the size of each sample in order. - The input dictionary content must be stored *at the end* of `dictBuffer`. - Its size is `dictContentSize`. - The resulting dictionary with added entropy tables will be *written back to `dictBuffer`*, - starting from its beginning. - @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`). -*/ +/* Deprecation warnings */ +/* It is generally possible to disable deprecation warnings from compiler, + for example with -Wno-deprecated-declarations for gcc + or _CRT_SECURE_NO_WARNINGS in Visual. + Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ +#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS +# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ +#else +# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define ZDICT_DEPRECATED(message) ZDICTLIB_API [[deprecated(message)]] +# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) +# elif (ZDICT_GCC_VERSION >= 301) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") +# define ZDICT_DEPRECATED(message) ZDICTLIB_API +# endif +#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ + +ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); - + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); #endif /* ZDICT_STATIC_LINKING_ONLY */
--- a/contrib/python-zstandard/zstd/zstd.h Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd/zstd.h Sat Mar 11 13:53:14 2017 -0500 @@ -20,13 +20,16 @@ /* ===== ZSTDLIB_API : control library symbols visibility ===== */ #if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDLIB_API __attribute__ ((visibility ("default"))) -#elif defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDLIB_API __declspec(dllexport) +# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#else +# define ZSTDLIB_VISIBILITY +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDLIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZSTDLIB_API +# define ZSTDLIB_API ZSTDLIB_VISIBILITY #endif @@ -53,7 +56,7 @@ /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MINOR 1 -#define ZSTD_VERSION_RELEASE 2 +#define ZSTD_VERSION_RELEASE 3 #define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE #define ZSTD_QUOTE(str) #str @@ -170,8 +173,8 @@ * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. * ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. * ZSTD_CDict can be created once and used by multiple threads concurrently, as its usage is read-only. -* `dict` can be released after ZSTD_CDict creation. */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel); +* `dictBuffer` can be released after ZSTD_CDict creation, as its content is copied within CDict */ +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_freeCDict() : * Function frees memory allocated by ZSTD_createCDict(). */ @@ -191,8 +194,8 @@ /*! ZSTD_createDDict() : * Create a digested dictionary, ready to start decompression operation without startup delay. -* `dict` can be released after creation. */ -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize); +* dictBuffer can be released after DDict creation, as its content is copied inside DDict */ +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize); /*! ZSTD_freeDDict() : * Function frees memory allocated with ZSTD_createDDict() */ @@ -325,7 +328,7 @@ * ***************************************************************************************/ /* --- Constants ---*/ -#define ZSTD_MAGICNUMBER 0xFD2FB528 /* v0.8 */ +#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ #define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U #define ZSTD_WINDOWLOG_MAX_32 25 @@ -345,8 +348,9 @@ #define ZSTD_TARGETLENGTH_MAX 999 #define ZSTD_FRAMEHEADERSIZE_MAX 18 /* for static allocation */ +#define ZSTD_FRAMEHEADERSIZE_MIN 6 static const size_t ZSTD_frameHeaderSize_prefix = 5; -static const size_t ZSTD_frameHeaderSize_min = 6; +static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; static const size_t ZSTD_skippableHeaderSize = 8; /* magic number + skippable frame length */ @@ -365,9 +369,9 @@ } ZSTD_compressionParameters; typedef struct { - unsigned contentSizeFlag; /**< 1: content size will be in frame header (if known). */ - unsigned checksumFlag; /**< 1: will generate a 22-bits checksum at end of frame, to be used for error detection by decompressor */ - unsigned noDictIDFlag; /**< 1: no dict ID will be saved into frame header (if dictionary compression) */ + unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */ + unsigned checksumFlag; /**< 1: generate a 32-bits checksum at end of frame, for error detection */ + unsigned noDictIDFlag; /**< 1: no dictID will be saved into frame header (if dictionary compression) */ } ZSTD_frameParameters; typedef struct { @@ -397,9 +401,23 @@ * Gives the amount of memory used by a given ZSTD_CCtx */ ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); +typedef enum { + ZSTD_p_forceWindow /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0)*/ +} ZSTD_CCtxParameter; +/*! ZSTD_setCCtxParameter() : + * Set advanced parameters, selected through enum ZSTD_CCtxParameter + * @result : 0, or an error code (which can be tested with ZSTD_isError()) */ +ZSTDLIB_API size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value); + +/*! ZSTD_createCDict_byReference() : + * Create a digested dictionary for compression + * Dictionary content is simply referenced, and therefore stays in dictBuffer. + * It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */ +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); + /*! ZSTD_createCDict_advanced() : * Create a ZSTD_CDict using external alloc and free, and customized compression parameters */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem); /*! ZSTD_sizeof_CDict() : @@ -455,6 +473,15 @@ * Gives the amount of memory used by a given ZSTD_DCtx */ ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx); +/*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, ready to start decompression operation without startup delay. + * Dictionary content is simply referenced, and therefore stays in dictBuffer. + * It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */ +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); + +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, + unsigned byReference, ZSTD_customMem customMem); + /*! ZSTD_sizeof_DDict() : * Gives the amount of memory used by a given ZSTD_DDict */ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); @@ -463,13 +490,13 @@ * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ -unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); +ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); /*! ZSTD_getDictID_fromDDict() : * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ -unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); +ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); /*! ZSTD_getDictID_fromFrame() : * Provides the dictID required to decompressed the frame stored within `src`. @@ -481,7 +508,7 @@ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). * - This is not a Zstandard frame. * When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */ -unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); +ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); /******************************************************************** @@ -491,7 +518,7 @@ /*===== Advanced Streaming compression functions =====*/ ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct */ -ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); +ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */ ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /**< note : cdict will just be referenced, and must outlive compression session */ @@ -500,9 +527,9 @@ /*===== Advanced Streaming decompression functions =====*/ -typedef enum { ZSTDdsp_maxWindowSize } ZSTD_DStreamParameter_e; +typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e; ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); +ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */ ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue); ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict will just be referenced, and must outlive decompression session */ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompression parameters from previous init; saves dictionary loading */ @@ -542,10 +569,10 @@ In which case, it will "discard" the relevant memory section from its history. Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum. - It's possible to use a NULL,0 src content, in which case, it will write a final empty block to end the frame, - Without last block mark, frames will be considered unfinished (broken) by decoders. + It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. + Without last block mark, frames will be considered unfinished (corrupted) by decoders. - You can then reuse `ZSTD_CCtx` (ZSTD_compressBegin()) to compress some new frame. + `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new frame. */ /*===== Buffer-less streaming compression functions =====*/ @@ -553,6 +580,7 @@ ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize); ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
--- a/contrib/python-zstandard/zstd_cffi.py Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/python-zstandard/zstd_cffi.py Sat Mar 11 13:53:14 2017 -0500 @@ -8,145 +8,1035 @@ from __future__ import absolute_import, unicode_literals -import io +import sys from _zstd_cffi import ( ffi, lib, ) +if sys.version_info[0] == 2: + bytes_type = str + int_type = long +else: + bytes_type = bytes + int_type = int -_CSTREAM_IN_SIZE = lib.ZSTD_CStreamInSize() -_CSTREAM_OUT_SIZE = lib.ZSTD_CStreamOutSize() + +COMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_CStreamInSize() +COMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_CStreamOutSize() +DECOMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_DStreamInSize() +DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_DStreamOutSize() + +new_nonzero = ffi.new_allocator(should_clear_after_alloc=False) + + +MAX_COMPRESSION_LEVEL = lib.ZSTD_maxCLevel() +MAGIC_NUMBER = lib.ZSTD_MAGICNUMBER +FRAME_HEADER = b'\x28\xb5\x2f\xfd' +ZSTD_VERSION = (lib.ZSTD_VERSION_MAJOR, lib.ZSTD_VERSION_MINOR, lib.ZSTD_VERSION_RELEASE) + +WINDOWLOG_MIN = lib.ZSTD_WINDOWLOG_MIN +WINDOWLOG_MAX = lib.ZSTD_WINDOWLOG_MAX +CHAINLOG_MIN = lib.ZSTD_CHAINLOG_MIN +CHAINLOG_MAX = lib.ZSTD_CHAINLOG_MAX +HASHLOG_MIN = lib.ZSTD_HASHLOG_MIN +HASHLOG_MAX = lib.ZSTD_HASHLOG_MAX +HASHLOG3_MAX = lib.ZSTD_HASHLOG3_MAX +SEARCHLOG_MIN = lib.ZSTD_SEARCHLOG_MIN +SEARCHLOG_MAX = lib.ZSTD_SEARCHLOG_MAX +SEARCHLENGTH_MIN = lib.ZSTD_SEARCHLENGTH_MIN +SEARCHLENGTH_MAX = lib.ZSTD_SEARCHLENGTH_MAX +TARGETLENGTH_MIN = lib.ZSTD_TARGETLENGTH_MIN +TARGETLENGTH_MAX = lib.ZSTD_TARGETLENGTH_MAX + +STRATEGY_FAST = lib.ZSTD_fast +STRATEGY_DFAST = lib.ZSTD_dfast +STRATEGY_GREEDY = lib.ZSTD_greedy +STRATEGY_LAZY = lib.ZSTD_lazy +STRATEGY_LAZY2 = lib.ZSTD_lazy2 +STRATEGY_BTLAZY2 = lib.ZSTD_btlazy2 +STRATEGY_BTOPT = lib.ZSTD_btopt + +COMPRESSOBJ_FLUSH_FINISH = 0 +COMPRESSOBJ_FLUSH_BLOCK = 1 + + +class ZstdError(Exception): + pass -class _ZstdCompressionWriter(object): - def __init__(self, cstream, writer): - self._cstream = cstream +class CompressionParameters(object): + def __init__(self, window_log, chain_log, hash_log, search_log, + search_length, target_length, strategy): + if window_log < WINDOWLOG_MIN or window_log > WINDOWLOG_MAX: + raise ValueError('invalid window log value') + + if chain_log < CHAINLOG_MIN or chain_log > CHAINLOG_MAX: + raise ValueError('invalid chain log value') + + if hash_log < HASHLOG_MIN or hash_log > HASHLOG_MAX: + raise ValueError('invalid hash log value') + + if search_log < SEARCHLOG_MIN or search_log > SEARCHLOG_MAX: + raise ValueError('invalid search log value') + + if search_length < SEARCHLENGTH_MIN or search_length > SEARCHLENGTH_MAX: + raise ValueError('invalid search length value') + + if target_length < TARGETLENGTH_MIN or target_length > TARGETLENGTH_MAX: + raise ValueError('invalid target length value') + + if strategy < STRATEGY_FAST or strategy > STRATEGY_BTOPT: + raise ValueError('invalid strategy value') + + self.window_log = window_log + self.chain_log = chain_log + self.hash_log = hash_log + self.search_log = search_log + self.search_length = search_length + self.target_length = target_length + self.strategy = strategy + + def as_compression_parameters(self): + p = ffi.new('ZSTD_compressionParameters *')[0] + p.windowLog = self.window_log + p.chainLog = self.chain_log + p.hashLog = self.hash_log + p.searchLog = self.search_log + p.searchLength = self.search_length + p.targetLength = self.target_length + p.strategy = self.strategy + + return p + +def get_compression_parameters(level, source_size=0, dict_size=0): + params = lib.ZSTD_getCParams(level, source_size, dict_size) + return CompressionParameters(window_log=params.windowLog, + chain_log=params.chainLog, + hash_log=params.hashLog, + search_log=params.searchLog, + search_length=params.searchLength, + target_length=params.targetLength, + strategy=params.strategy) + + +def estimate_compression_context_size(params): + if not isinstance(params, CompressionParameters): + raise ValueError('argument must be a CompressionParameters') + + cparams = params.as_compression_parameters() + return lib.ZSTD_estimateCCtxSize(cparams) + + +def estimate_decompression_context_size(): + return lib.ZSTD_estimateDCtxSize() + + +class ZstdCompressionWriter(object): + def __init__(self, compressor, writer, source_size, write_size): + self._compressor = compressor self._writer = writer + self._source_size = source_size + self._write_size = write_size + self._entered = False def __enter__(self): + if self._entered: + raise ZstdError('cannot __enter__ multiple times') + + self._cstream = self._compressor._get_cstream(self._source_size) + self._entered = True return self def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + if not exc_type and not exc_value and not exc_tb: out_buffer = ffi.new('ZSTD_outBuffer *') - out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE) - out_buffer.size = _CSTREAM_OUT_SIZE + dst_buffer = ffi.new('char[]', self._write_size) + out_buffer.dst = dst_buffer + out_buffer.size = self._write_size out_buffer.pos = 0 while True: - res = lib.ZSTD_endStream(self._cstream, out_buffer) - if lib.ZSTD_isError(res): - raise Exception('error ending compression stream: %s' % lib.ZSTD_getErrorName) + zresult = lib.ZSTD_endStream(self._cstream, out_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('error ending compression stream: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) if out_buffer.pos: - self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) + self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) out_buffer.pos = 0 - if res == 0: + if zresult == 0: break + self._cstream = None + self._compressor = None + return False + def memory_size(self): + if not self._entered: + raise ZstdError('cannot determine size of an inactive compressor; ' + 'call when a context manager is active') + + return lib.ZSTD_sizeof_CStream(self._cstream) + def write(self, data): + if not self._entered: + raise ZstdError('write() must be called from an active context ' + 'manager') + + total_write = 0 + + data_buffer = ffi.from_buffer(data) + + in_buffer = ffi.new('ZSTD_inBuffer *') + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + out_buffer = ffi.new('ZSTD_outBuffer *') - out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE) - out_buffer.size = _CSTREAM_OUT_SIZE + dst_buffer = ffi.new('char[]', self._write_size) + out_buffer.dst = dst_buffer + out_buffer.size = self._write_size + out_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_compressStream(self._cstream, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd compress error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if out_buffer.pos: + self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + total_write += out_buffer.pos + out_buffer.pos = 0 + + return total_write + + def flush(self): + if not self._entered: + raise ZstdError('flush must be called from an active context manager') + + total_write = 0 + + out_buffer = ffi.new('ZSTD_outBuffer *') + dst_buffer = ffi.new('char[]', self._write_size) + out_buffer.dst = dst_buffer + out_buffer.size = self._write_size out_buffer.pos = 0 - # TODO can we reuse existing memory? - in_buffer = ffi.new('ZSTD_inBuffer *') - in_buffer.src = ffi.new('char[]', data) - in_buffer.size = len(data) - in_buffer.pos = 0 - while in_buffer.pos < in_buffer.size: - res = lib.ZSTD_compressStream(self._cstream, out_buffer, in_buffer) - if lib.ZSTD_isError(res): - raise Exception('zstd compress error: %s' % lib.ZSTD_getErrorName(res)) + while True: + zresult = lib.ZSTD_flushStream(self._cstream, out_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd compress error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if not out_buffer.pos: + break + + self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + total_write += out_buffer.pos + out_buffer.pos = 0 + + return total_write + + +class ZstdCompressionObj(object): + def compress(self, data): + if self._finished: + raise ZstdError('cannot call compress() after compressor finished') + + data_buffer = ffi.from_buffer(data) + source = ffi.new('ZSTD_inBuffer *') + source.src = data_buffer + source.size = len(data_buffer) + source.pos = 0 + + chunks = [] + + while source.pos < len(data): + zresult = lib.ZSTD_compressStream(self._cstream, self._out, source) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd compress error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if self._out.pos: + chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:]) + self._out.pos = 0 + + return b''.join(chunks) - if out_buffer.pos: - self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) - out_buffer.pos = 0 + def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH): + if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK): + raise ValueError('flush mode not recognized') + + if self._finished: + raise ZstdError('compressor object already finished') + + assert self._out.pos == 0 + + if flush_mode == COMPRESSOBJ_FLUSH_BLOCK: + zresult = lib.ZSTD_flushStream(self._cstream, self._out) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd compress error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + # Output buffer is guaranteed to hold full block. + assert zresult == 0 + + if self._out.pos: + result = ffi.buffer(self._out.dst, self._out.pos)[:] + self._out.pos = 0 + return result + else: + return b'' + + assert flush_mode == COMPRESSOBJ_FLUSH_FINISH + self._finished = True + + chunks = [] + + while True: + zresult = lib.ZSTD_endStream(self._cstream, self._out) + if lib.ZSTD_isError(zresult): + raise ZstdError('error ending compression stream: %s' % + ffi.string(lib.ZSTD_getErroName(zresult))) + + if self._out.pos: + chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:]) + self._out.pos = 0 + + if not zresult: + break + + # GC compression stream immediately. + self._cstream = None + + return b''.join(chunks) class ZstdCompressor(object): - def __init__(self, level=3, dict_data=None, compression_params=None): - if dict_data: - raise Exception('dict_data not yet supported') - if compression_params: - raise Exception('compression_params not yet supported') + def __init__(self, level=3, dict_data=None, compression_params=None, + write_checksum=False, write_content_size=False, + write_dict_id=True): + if level < 1: + raise ValueError('level must be greater than 0') + elif level > lib.ZSTD_maxCLevel(): + raise ValueError('level must be less than %d' % lib.ZSTD_maxCLevel()) self._compression_level = level + self._dict_data = dict_data + self._cparams = compression_params + self._fparams = ffi.new('ZSTD_frameParameters *')[0] + self._fparams.checksumFlag = write_checksum + self._fparams.contentSizeFlag = write_content_size + self._fparams.noDictIDFlag = not write_dict_id - def compress(self, data): - # Just use the stream API for now. - output = io.BytesIO() - with self.write_to(output) as compressor: - compressor.write(data) - return output.getvalue() + cctx = lib.ZSTD_createCCtx() + if cctx == ffi.NULL: + raise MemoryError() + + self._cctx = ffi.gc(cctx, lib.ZSTD_freeCCtx) + + def compress(self, data, allow_empty=False): + if len(data) == 0 and self._fparams.contentSizeFlag and not allow_empty: + raise ValueError('cannot write empty inputs when writing content sizes') + + # TODO use a CDict for performance. + dict_data = ffi.NULL + dict_size = 0 + + if self._dict_data: + dict_data = self._dict_data.as_bytes() + dict_size = len(self._dict_data) + + params = ffi.new('ZSTD_parameters *')[0] + if self._cparams: + params.cParams = self._cparams.as_compression_parameters() + else: + params.cParams = lib.ZSTD_getCParams(self._compression_level, len(data), + dict_size) + params.fParams = self._fparams + + dest_size = lib.ZSTD_compressBound(len(data)) + out = new_nonzero('char[]', dest_size) - def copy_stream(self, ifh, ofh): - cstream = self._get_cstream() + zresult = lib.ZSTD_compress_advanced(self._cctx, + ffi.addressof(out), dest_size, + data, len(data), + dict_data, dict_size, + params) + + if lib.ZSTD_isError(zresult): + raise ZstdError('cannot compress: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + return ffi.buffer(out, zresult)[:] + + def compressobj(self, size=0): + cstream = self._get_cstream(size) + cobj = ZstdCompressionObj() + cobj._cstream = cstream + cobj._out = ffi.new('ZSTD_outBuffer *') + cobj._dst_buffer = ffi.new('char[]', COMPRESSION_RECOMMENDED_OUTPUT_SIZE) + cobj._out.dst = cobj._dst_buffer + cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE + cobj._out.pos = 0 + cobj._compressor = self + cobj._finished = False + + return cobj + + def copy_stream(self, ifh, ofh, size=0, + read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE): + + if not hasattr(ifh, 'read'): + raise ValueError('first argument must have a read() method') + if not hasattr(ofh, 'write'): + raise ValueError('second argument must have a write() method') + + cstream = self._get_cstream(size) in_buffer = ffi.new('ZSTD_inBuffer *') out_buffer = ffi.new('ZSTD_outBuffer *') - out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE) - out_buffer.size = _CSTREAM_OUT_SIZE + dst_buffer = ffi.new('char[]', write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size out_buffer.pos = 0 total_read, total_write = 0, 0 while True: - data = ifh.read(_CSTREAM_IN_SIZE) + data = ifh.read(read_size) if not data: break - total_read += len(data) - - in_buffer.src = ffi.new('char[]', data) - in_buffer.size = len(data) + data_buffer = ffi.from_buffer(data) + total_read += len(data_buffer) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) in_buffer.pos = 0 while in_buffer.pos < in_buffer.size: - res = lib.ZSTD_compressStream(cstream, out_buffer, in_buffer) - if lib.ZSTD_isError(res): - raise Exception('zstd compress error: %s' % - lib.ZSTD_getErrorName(res)) + zresult = lib.ZSTD_compressStream(cstream, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd compress error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) if out_buffer.pos: ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) - total_write = out_buffer.pos + total_write += out_buffer.pos out_buffer.pos = 0 # We've finished reading. Flush the compressor. while True: - res = lib.ZSTD_endStream(cstream, out_buffer) - if lib.ZSTD_isError(res): - raise Exception('error ending compression stream: %s' % - lib.ZSTD_getErrorName(res)) + zresult = lib.ZSTD_endStream(cstream, out_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('error ending compression stream: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) if out_buffer.pos: ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) total_write += out_buffer.pos out_buffer.pos = 0 - if res == 0: + if zresult == 0: break return total_read, total_write - def write_to(self, writer): - return _ZstdCompressionWriter(self._get_cstream(), writer) + def write_to(self, writer, size=0, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE): + + if not hasattr(writer, 'write'): + raise ValueError('must pass an object with a write() method') + + return ZstdCompressionWriter(self, writer, size, write_size) + + def read_from(self, reader, size=0, + read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE): + if hasattr(reader, 'read'): + have_read = True + elif hasattr(reader, '__getitem__'): + have_read = False + buffer_offset = 0 + size = len(reader) + else: + raise ValueError('must pass an object with a read() method or ' + 'conforms to buffer protocol') + + cstream = self._get_cstream(size) + + in_buffer = ffi.new('ZSTD_inBuffer *') + out_buffer = ffi.new('ZSTD_outBuffer *') + + in_buffer.src = ffi.NULL + in_buffer.size = 0 + in_buffer.pos = 0 + + dst_buffer = ffi.new('char[]', write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size + out_buffer.pos = 0 + + while True: + # We should never have output data sitting around after a previous + # iteration. + assert out_buffer.pos == 0 + + # Collect input data. + if have_read: + read_result = reader.read(read_size) + else: + remaining = len(reader) - buffer_offset + slice_size = min(remaining, read_size) + read_result = reader[buffer_offset:buffer_offset + slice_size] + buffer_offset += slice_size - def _get_cstream(self): + # No new input data. Break out of the read loop. + if not read_result: + break + + # Feed all read data into the compressor and emit output until + # exhausted. + read_buffer = ffi.from_buffer(read_result) + in_buffer.src = read_buffer + in_buffer.size = len(read_buffer) + in_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_compressStream(cstream, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd compress error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + assert out_buffer.pos == 0 + + # And repeat the loop to collect more data. + continue + + # If we get here, input is exhausted. End the stream and emit what + # remains. + while True: + assert out_buffer.pos == 0 + zresult = lib.ZSTD_endStream(cstream, out_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('error ending compression stream: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + if zresult == 0: + break + + def _get_cstream(self, size): cstream = lib.ZSTD_createCStream() + if cstream == ffi.NULL: + raise MemoryError() + cstream = ffi.gc(cstream, lib.ZSTD_freeCStream) - res = lib.ZSTD_initCStream(cstream, self._compression_level) - if lib.ZSTD_isError(res): + dict_data = ffi.NULL + dict_size = 0 + if self._dict_data: + dict_data = self._dict_data.as_bytes() + dict_size = len(self._dict_data) + + zparams = ffi.new('ZSTD_parameters *')[0] + if self._cparams: + zparams.cParams = self._cparams.as_compression_parameters() + else: + zparams.cParams = lib.ZSTD_getCParams(self._compression_level, + size, dict_size) + zparams.fParams = self._fparams + + zresult = lib.ZSTD_initCStream_advanced(cstream, dict_data, dict_size, + zparams, size) + if lib.ZSTD_isError(zresult): raise Exception('cannot init CStream: %s' % - lib.ZSTD_getErrorName(res)) + ffi.string(lib.ZSTD_getErrorName(zresult))) return cstream + + +class FrameParameters(object): + def __init__(self, fparams): + self.content_size = fparams.frameContentSize + self.window_size = fparams.windowSize + self.dict_id = fparams.dictID + self.has_checksum = bool(fparams.checksumFlag) + + +def get_frame_parameters(data): + if not isinstance(data, bytes_type): + raise TypeError('argument must be bytes') + + params = ffi.new('ZSTD_frameParams *') + + zresult = lib.ZSTD_getFrameParams(params, data, len(data)) + if lib.ZSTD_isError(zresult): + raise ZstdError('cannot get frame parameters: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if zresult: + raise ZstdError('not enough data for frame parameters; need %d bytes' % + zresult) + + return FrameParameters(params[0]) + + +class ZstdCompressionDict(object): + def __init__(self, data): + assert isinstance(data, bytes_type) + self._data = data + + def __len__(self): + return len(self._data) + + def dict_id(self): + return int_type(lib.ZDICT_getDictID(self._data, len(self._data))) + + def as_bytes(self): + return self._data + + +def train_dictionary(dict_size, samples, parameters=None): + if not isinstance(samples, list): + raise TypeError('samples must be a list') + + total_size = sum(map(len, samples)) + + samples_buffer = new_nonzero('char[]', total_size) + sample_sizes = new_nonzero('size_t[]', len(samples)) + + offset = 0 + for i, sample in enumerate(samples): + if not isinstance(sample, bytes_type): + raise ValueError('samples must be bytes') + + l = len(sample) + ffi.memmove(samples_buffer + offset, sample, l) + offset += l + sample_sizes[i] = l + + dict_data = new_nonzero('char[]', dict_size) + + zresult = lib.ZDICT_trainFromBuffer(ffi.addressof(dict_data), dict_size, + ffi.addressof(samples_buffer), + ffi.addressof(sample_sizes, 0), + len(samples)) + if lib.ZDICT_isError(zresult): + raise ZstdError('Cannot train dict: %s' % + ffi.string(lib.ZDICT_getErrorName(zresult))) + + return ZstdCompressionDict(ffi.buffer(dict_data, zresult)[:]) + + +class ZstdDecompressionObj(object): + def __init__(self, decompressor): + self._decompressor = decompressor + self._dstream = self._decompressor._get_dstream() + self._finished = False + + def decompress(self, data): + if self._finished: + raise ZstdError('cannot use a decompressobj multiple times') + + in_buffer = ffi.new('ZSTD_inBuffer *') + out_buffer = ffi.new('ZSTD_outBuffer *') + + data_buffer = ffi.from_buffer(data) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + dst_buffer = ffi.new('char[]', DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + chunks = [] + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_decompressStream(self._dstream, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd decompressor error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if zresult == 0: + self._finished = True + self._dstream = None + self._decompressor = None + + if out_buffer.pos: + chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + out_buffer.pos = 0 + + return b''.join(chunks) + + +class ZstdDecompressionWriter(object): + def __init__(self, decompressor, writer, write_size): + self._decompressor = decompressor + self._writer = writer + self._write_size = write_size + self._dstream = None + self._entered = False + + def __enter__(self): + if self._entered: + raise ZstdError('cannot __enter__ multiple times') + + self._dstream = self._decompressor._get_dstream() + self._entered = True + + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self._dstream = None + + def memory_size(self): + if not self._dstream: + raise ZstdError('cannot determine size of inactive decompressor ' + 'call when context manager is active') + + return lib.ZSTD_sizeof_DStream(self._dstream) + + def write(self, data): + if not self._entered: + raise ZstdError('write must be called from an active context manager') + + total_write = 0 + + in_buffer = ffi.new('ZSTD_inBuffer *') + out_buffer = ffi.new('ZSTD_outBuffer *') + + data_buffer = ffi.from_buffer(data) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + dst_buffer = ffi.new('char[]', self._write_size) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_decompressStream(self._dstream, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd decompress error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if out_buffer.pos: + self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + total_write += out_buffer.pos + out_buffer.pos = 0 + + return total_write + + +class ZstdDecompressor(object): + def __init__(self, dict_data=None): + self._dict_data = dict_data + + dctx = lib.ZSTD_createDCtx() + if dctx == ffi.NULL: + raise MemoryError() + + self._refdctx = ffi.gc(dctx, lib.ZSTD_freeDCtx) + + @property + def _ddict(self): + if self._dict_data: + dict_data = self._dict_data.as_bytes() + dict_size = len(self._dict_data) + + ddict = lib.ZSTD_createDDict(dict_data, dict_size) + if ddict == ffi.NULL: + raise ZstdError('could not create decompression dict') + else: + ddict = None + + self.__dict__['_ddict'] = ddict + return ddict + + def decompress(self, data, max_output_size=0): + data_buffer = ffi.from_buffer(data) + + orig_dctx = new_nonzero('char[]', lib.ZSTD_sizeof_DCtx(self._refdctx)) + dctx = ffi.cast('ZSTD_DCtx *', orig_dctx) + lib.ZSTD_copyDCtx(dctx, self._refdctx) + + ddict = self._ddict + + output_size = lib.ZSTD_getDecompressedSize(data_buffer, len(data_buffer)) + if output_size: + result_buffer = ffi.new('char[]', output_size) + result_size = output_size + else: + if not max_output_size: + raise ZstdError('input data invalid or missing content size ' + 'in frame header') + + result_buffer = ffi.new('char[]', max_output_size) + result_size = max_output_size + + if ddict: + zresult = lib.ZSTD_decompress_usingDDict(dctx, + result_buffer, result_size, + data_buffer, len(data_buffer), + ddict) + else: + zresult = lib.ZSTD_decompressDCtx(dctx, + result_buffer, result_size, + data_buffer, len(data_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError('decompression error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + elif output_size and zresult != output_size: + raise ZstdError('decompression error: decompressed %d bytes; expected %d' % + (zresult, output_size)) + + return ffi.buffer(result_buffer, zresult)[:] + + def decompressobj(self): + return ZstdDecompressionObj(self) + + def read_from(self, reader, read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + skip_bytes=0): + if skip_bytes >= read_size: + raise ValueError('skip_bytes must be smaller than read_size') + + if hasattr(reader, 'read'): + have_read = True + elif hasattr(reader, '__getitem__'): + have_read = False + buffer_offset = 0 + size = len(reader) + else: + raise ValueError('must pass an object with a read() method or ' + 'conforms to buffer protocol') + + if skip_bytes: + if have_read: + reader.read(skip_bytes) + else: + if skip_bytes > size: + raise ValueError('skip_bytes larger than first input chunk') + + buffer_offset = skip_bytes + + dstream = self._get_dstream() + + in_buffer = ffi.new('ZSTD_inBuffer *') + out_buffer = ffi.new('ZSTD_outBuffer *') + + dst_buffer = ffi.new('char[]', write_size) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + while True: + assert out_buffer.pos == 0 + + if have_read: + read_result = reader.read(read_size) + else: + remaining = size - buffer_offset + slice_size = min(remaining, read_size) + read_result = reader[buffer_offset:buffer_offset + slice_size] + buffer_offset += slice_size + + # No new input. Break out of read loop. + if not read_result: + break + + # Feed all read data into decompressor and emit output until + # exhausted. + read_buffer = ffi.from_buffer(read_result) + in_buffer.src = read_buffer + in_buffer.size = len(read_buffer) + in_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + assert out_buffer.pos == 0 + + zresult = lib.ZSTD_decompressStream(dstream, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd decompress error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + if zresult == 0: + return + + # Repeat loop to collect more input data. + continue + + # If we get here, input is exhausted. + + def write_to(self, writer, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE): + if not hasattr(writer, 'write'): + raise ValueError('must pass an object with a write() method') + + return ZstdDecompressionWriter(self, writer, write_size) + + def copy_stream(self, ifh, ofh, + read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE): + if not hasattr(ifh, 'read'): + raise ValueError('first argument must have a read() method') + if not hasattr(ofh, 'write'): + raise ValueError('second argument must have a write() method') + + dstream = self._get_dstream() + + in_buffer = ffi.new('ZSTD_inBuffer *') + out_buffer = ffi.new('ZSTD_outBuffer *') + + dst_buffer = ffi.new('char[]', write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size + out_buffer.pos = 0 + + total_read, total_write = 0, 0 + + # Read all available input. + while True: + data = ifh.read(read_size) + if not data: + break + + data_buffer = ffi.from_buffer(data) + total_read += len(data_buffer) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + # Flush all read data to output. + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_decompressStream(dstream, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError('zstd decompressor error: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + if out_buffer.pos: + ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) + total_write += out_buffer.pos + out_buffer.pos = 0 + + # Continue loop to keep reading. + + return total_read, total_write + + def decompress_content_dict_chain(self, frames): + if not isinstance(frames, list): + raise TypeError('argument must be a list') + + if not frames: + raise ValueError('empty input chain') + + # First chunk should not be using a dictionary. We handle it specially. + chunk = frames[0] + if not isinstance(chunk, bytes_type): + raise ValueError('chunk 0 must be bytes') + + # All chunks should be zstd frames and should have content size set. + chunk_buffer = ffi.from_buffer(chunk) + params = ffi.new('ZSTD_frameParams *') + zresult = lib.ZSTD_getFrameParams(params, chunk_buffer, len(chunk_buffer)) + if lib.ZSTD_isError(zresult): + raise ValueError('chunk 0 is not a valid zstd frame') + elif zresult: + raise ValueError('chunk 0 is too small to contain a zstd frame') + + if not params.frameContentSize: + raise ValueError('chunk 0 missing content size in frame') + + dctx = lib.ZSTD_createDCtx() + if dctx == ffi.NULL: + raise MemoryError() + + dctx = ffi.gc(dctx, lib.ZSTD_freeDCtx) + + last_buffer = ffi.new('char[]', params.frameContentSize) + + zresult = lib.ZSTD_decompressDCtx(dctx, last_buffer, len(last_buffer), + chunk_buffer, len(chunk_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError('could not decompress chunk 0: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + # Special case of chain length of 1 + if len(frames) == 1: + return ffi.buffer(last_buffer, len(last_buffer))[:] + + i = 1 + while i < len(frames): + chunk = frames[i] + if not isinstance(chunk, bytes_type): + raise ValueError('chunk %d must be bytes' % i) + + chunk_buffer = ffi.from_buffer(chunk) + zresult = lib.ZSTD_getFrameParams(params, chunk_buffer, len(chunk_buffer)) + if lib.ZSTD_isError(zresult): + raise ValueError('chunk %d is not a valid zstd frame' % i) + elif zresult: + raise ValueError('chunk %d is too small to contain a zstd frame' % i) + + if not params.frameContentSize: + raise ValueError('chunk %d missing content size in frame' % i) + + dest_buffer = ffi.new('char[]', params.frameContentSize) + + zresult = lib.ZSTD_decompress_usingDict(dctx, dest_buffer, len(dest_buffer), + chunk_buffer, len(chunk_buffer), + last_buffer, len(last_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError('could not decompress chunk %d' % i) + + last_buffer = dest_buffer + i += 1 + + return ffi.buffer(last_buffer, len(last_buffer))[:] + + def _get_dstream(self): + dstream = lib.ZSTD_createDStream() + if dstream == ffi.NULL: + raise MemoryError() + + dstream = ffi.gc(dstream, lib.ZSTD_freeDStream) + + if self._dict_data: + zresult = lib.ZSTD_initDStream_usingDict(dstream, + self._dict_data.as_bytes(), + len(self._dict_data)) + else: + zresult = lib.ZSTD_initDStream(dstream) + + if lib.ZSTD_isError(zresult): + raise ZstdError('could not initialize DStream: %s' % + ffi.string(lib.ZSTD_getErrorName(zresult))) + + return dstream
--- a/contrib/undumprevlog Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/undumprevlog Sat Mar 11 13:53:14 2017 -0500 @@ -9,15 +9,15 @@ from mercurial import ( node, revlog, - scmutil, transaction, util, + vfs as vfsmod, ) for fp in (sys.stdin, sys.stdout, sys.stderr): util.setbinary(fp) -opener = scmutil.opener('.', False) +opener = vfsmod.vfs('.', False) tr = transaction.transaction(sys.stderr.write, opener, {'store': opener}, "undump.journal") while True:
--- a/contrib/win32/mercurial.ini Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/win32/mercurial.ini Sat Mar 11 13:53:14 2017 -0500 @@ -19,6 +19,8 @@ editor = notepad ; show changed files and be a bit more verbose if True ; verbose = True +; colorize commands output +; color = auto ; username data to appear in commits ; it usually takes the form: Joe User <joe.user@host.com> @@ -40,7 +42,6 @@ ;bugzilla = ;children = ;churn = -;color = ;convert = ;eol = ;extdiff =
--- a/contrib/wix/help.wxs Tue Mar 07 13:24:24 2017 -0500 +++ b/contrib/wix/help.wxs Sat Mar 11 13:53:14 2017 -0500 @@ -15,6 +15,7 @@ <DirectoryRef Id="INSTALLDIR"> <Directory Id="helpdir" Name="help" FileSource="$(var.SourceDir)"> <Component Id="help.root" Guid="$(var.help.root.guid)" Win64='$(var.IsX64)'> + <File Name="color.txt" /> <File Name="config.txt" KeyPath="yes" /> <File Name="dates.txt" /> <File Name="diffs.txt" /> @@ -25,6 +26,7 @@ <File Name="hgignore.txt" /> <File Name="hgweb.txt" /> <File Name="merge-tools.txt" /> + <File Name="pager.txt" /> <File Name="patterns.txt" /> <File Name="phases.txt" /> <File Name="revisions.txt" /> @@ -37,6 +39,7 @@ <Directory Id="help.internaldir" Name="internals"> <Component Id="help.internals" Guid="$(var.help.internals.guid)" Win64='$(var.IsX64)'> <File Id="internals.bundles.txt" Name="bundles.txt" KeyPath="yes" /> + <File Id="internals.censor.txt" Name="censor.txt" KeyPath="yes" /> <File Id="internals.changegroups.txt" Name="changegroups.txt" /> <File Id="internals.requirements.txt" Name="requirements.txt" /> <File Id="internals.revlogs.txt" Name="revlogs.txt" />
--- a/hgext/bugzilla.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/bugzilla.py Sat Mar 11 13:53:14 2017 -0500 @@ -15,14 +15,16 @@ The bug references can optionally include an update for Bugzilla of the hours spent working on the bug. Bugs can also be marked fixed. -Three basic modes of access to Bugzilla are provided: +Four basic modes of access to Bugzilla are provided: + +1. Access via the Bugzilla REST-API. Requires bugzilla 5.0 or later. -1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later. +2. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later. -2. Check data via the Bugzilla XMLRPC interface and submit bug change +3. Check data via the Bugzilla XMLRPC interface and submit bug change via email to Bugzilla email interface. Requires Bugzilla 3.4 or later. -3. Writing directly to the Bugzilla database. Only Bugzilla installations +4. Writing directly to the Bugzilla database. Only Bugzilla installations using MySQL are supported. Requires Python MySQLdb. Writing directly to the database is susceptible to schema changes, and @@ -50,11 +52,16 @@ Bugzilla is used instead as the source of the comment. Marking bugs fixed works on all supported Bugzilla versions. +Access via the REST-API needs either a Bugzilla username and password +or an apikey specified in the configuration. Comments are made under +the given username or the user assoicated with the apikey in Bugzilla. + Configuration items common to all access modes: bugzilla.version The access type to use. Values recognized are: + :``restapi``: Bugzilla REST-API, Bugzilla 5.0 and later. :``xmlrpc``: Bugzilla XMLRPC interface. :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces. :``3.0``: MySQL access, Bugzilla 3.0 and later. @@ -135,7 +142,7 @@ committer email to Bugzilla user email. See also ``bugzilla.usermap``. Contains entries of the form ``committer = Bugzilla user``. -XMLRPC access mode configuration: +XMLRPC and REST-API access mode configuration: bugzilla.bzurl The base URL for the Bugzilla installation. @@ -148,6 +155,13 @@ bugzilla.password The password for Bugzilla login. +REST-API access mode uses the options listed above as well as: + +bugzilla.apikey + An apikey generated on the Bugzilla instance for api access. + Using an apikey removes the need to store the user and password + options. + XMLRPC+email access mode uses the XMLRPC access mode configuration items, and also: @@ -279,6 +293,7 @@ from __future__ import absolute_import +import json import re import time @@ -288,6 +303,7 @@ cmdutil, error, mail, + url, util, ) @@ -773,6 +789,136 @@ cmds.append(self.makecommandline("resolution", self.fixresolution)) self.send_bug_modify_email(bugid, cmds, text, committer) +class NotFound(LookupError): + pass + +class bzrestapi(bzaccess): + """Read and write bugzilla data using the REST API available since + Bugzilla 5.0. + """ + def __init__(self, ui): + bzaccess.__init__(self, ui) + bz = self.ui.config('bugzilla', 'bzurl', + 'http://localhost/bugzilla/') + self.bzroot = '/'.join([bz, 'rest']) + self.apikey = self.ui.config('bugzilla', 'apikey', '') + self.user = self.ui.config('bugzilla', 'user', 'bugs') + self.passwd = self.ui.config('bugzilla', 'password') + self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED') + self.fixresolution = self.ui.config('bugzilla', 'fixresolution', + 'FIXED') + + def apiurl(self, targets, include_fields=None): + url = '/'.join([self.bzroot] + [str(t) for t in targets]) + qv = {} + if self.apikey: + qv['api_key'] = self.apikey + elif self.user and self.passwd: + qv['login'] = self.user + qv['password'] = self.passwd + if include_fields: + qv['include_fields'] = include_fields + if qv: + url = '%s?%s' % (url, util.urlreq.urlencode(qv)) + return url + + def _fetch(self, burl): + try: + resp = url.open(self.ui, burl) + return json.loads(resp.read()) + except util.urlerr.httperror as inst: + if inst.code == 401: + raise error.Abort(_('authorization failed')) + if inst.code == 404: + raise NotFound() + else: + raise + + def _submit(self, burl, data, method='POST'): + data = json.dumps(data) + if method == 'PUT': + class putrequest(util.urlreq.request): + def get_method(self): + return 'PUT' + request_type = putrequest + else: + request_type = util.urlreq.request + req = request_type(burl, data, + {'Content-Type': 'application/json'}) + try: + resp = url.opener(self.ui).open(req) + return json.loads(resp.read()) + except util.urlerr.httperror as inst: + if inst.code == 401: + raise error.Abort(_('authorization failed')) + if inst.code == 404: + raise NotFound() + else: + raise + + def filter_real_bug_ids(self, bugs): + '''remove bug IDs that do not exist in Bugzilla from bugs.''' + badbugs = set() + for bugid in bugs: + burl = self.apiurl(('bug', bugid), include_fields='status') + try: + self._fetch(burl) + except NotFound: + badbugs.add(bugid) + for bugid in badbugs: + del bugs[bugid] + + def filter_cset_known_bug_ids(self, node, bugs): + '''remove bug IDs where node occurs in comment text from bugs.''' + sn = short(node) + for bugid in bugs.keys(): + burl = self.apiurl(('bug', bugid, 'comment'), include_fields='text') + result = self._fetch(burl) + comments = result['bugs'][str(bugid)]['comments'] + if any(sn in c['text'] for c in comments): + self.ui.status(_('bug %d already knows about changeset %s\n') % + (bugid, sn)) + del bugs[bugid] + + def updatebug(self, bugid, newstate, text, committer): + '''update the specified bug. Add comment text and set new states. + + If possible add the comment as being from the committer of + the changeset. Otherwise use the default Bugzilla user. + ''' + bugmod = {} + if 'hours' in newstate: + bugmod['work_time'] = newstate['hours'] + if 'fix' in newstate: + bugmod['status'] = self.fixstatus + bugmod['resolution'] = self.fixresolution + if bugmod: + # if we have to change the bugs state do it here + bugmod['comment'] = { + 'comment': text, + 'is_private': False, + 'is_markdown': False, + } + burl = self.apiurl(('bug', bugid)) + self._submit(burl, bugmod, method='PUT') + self.ui.debug('updated bug %s\n' % bugid) + else: + burl = self.apiurl(('bug', bugid, 'comment')) + self._submit(burl, { + 'comment': text, + 'is_private': False, + 'is_markdown': False, + }) + self.ui.debug('added comment to bug %s\n' % bugid) + + def notify(self, bugs, committer): + '''Force sending of Bugzilla notification emails. + + Only required if the access method does not trigger notification + emails automatically. + ''' + pass + class bugzilla(object): # supported versions of bugzilla. different versions have # different schemas. @@ -781,7 +927,8 @@ '2.18': bzmysql_2_18, '3.0': bzmysql_3_0, 'xmlrpc': bzxmlrpc, - 'xmlrpc+email': bzxmlrpcemail + 'xmlrpc+email': bzxmlrpcemail, + 'restapi': bzrestapi, } _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
--- a/hgext/clonebundles.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/clonebundles.py Sat Mar 11 13:53:14 2017 -0500 @@ -177,7 +177,7 @@ # Only advertise if a manifest exists. This does add some I/O to requests. # But this should be cheaper than a wasted network round trip due to # missing file. - if repo.opener.exists('clonebundles.manifest'): + if repo.vfs.exists('clonebundles.manifest'): caps.append('clonebundles') return caps
--- a/hgext/color.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/color.py Sat Mar 11 13:53:14 2017 -0500 @@ -5,652 +5,27 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -'''colorize output from some commands - -The color extension colorizes output from several Mercurial commands. -For example, the diff command shows additions in green and deletions -in red, while the status command shows modified files in magenta. Many -other commands have analogous colors. It is possible to customize -these colors. - -Effects -------- - -Other effects in addition to color, like bold and underlined text, are -also available. By default, the terminfo database is used to find the -terminal codes used to change color and effect. If terminfo is not -available, then effects are rendered with the ECMA-48 SGR control -function (aka ANSI escape codes). - -The available effects in terminfo mode are 'blink', 'bold', 'dim', -'inverse', 'invisible', 'italic', 'standout', and 'underline'; in -ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and -'underline'. How each is rendered depends on the terminal emulator. -Some may not be available for a given terminal type, and will be -silently ignored. - -If the terminfo entry for your terminal is missing codes for an effect -or has the wrong codes, you can add or override those codes in your -configuration:: - - [color] - terminfo.dim = \E[2m - -where '\E' is substituted with an escape character. +'''enable Mercurial color mode (DEPRECATED) -Labels ------- - -Text receives color effects depending on the labels that it has. Many -default Mercurial commands emit labelled text. You can also define -your own labels in templates using the label function, see :hg:`help -templates`. A single portion of text may have more than one label. In -that case, effects given to the last label will override any other -effects. This includes the special "none" effect, which nullifies -other effects. - -Labels are normally invisible. In order to see these labels and their -position in the text, use the global --color=debug option. The same -anchor text may be associated to multiple labels, e.g. - - [log.changeset changeset.secret|changeset: 22611:6f0a53c8f587] - -The following are the default effects for some default labels. Default -effects may be overridden from your configuration file:: - - [color] - status.modified = blue bold underline red_background - status.added = green bold - status.removed = red bold blue_background - status.deleted = cyan bold underline - status.unknown = magenta bold underline - status.ignored = black bold - - # 'none' turns off all effects - status.clean = none - status.copied = none - - qseries.applied = blue bold underline - qseries.unapplied = black bold - qseries.missing = red bold +This extensions enable Mercurial color mode. The feature is now directly +available in Mercurial core. You can access it using:: - diff.diffline = bold - diff.extended = cyan bold - diff.file_a = red bold - diff.file_b = green bold - diff.hunk = magenta - diff.deleted = red - diff.inserted = green - diff.changed = white - diff.tab = - diff.trailingwhitespace = bold red_background - - # Blank so it inherits the style of the surrounding label - changeset.public = - changeset.draft = - changeset.secret = - - resolve.unresolved = red bold - resolve.resolved = green bold - - bookmarks.active = green - - branches.active = none - branches.closed = black bold - branches.current = green - branches.inactive = none - - tags.normal = green - tags.local = black bold - - rebase.rebased = blue - rebase.remaining = red bold - - shelve.age = cyan - shelve.newest = green bold - shelve.name = blue bold - - histedit.remaining = red bold - -Custom colors -------------- + [ui] + color = auto -Because there are only eight standard colors, this module allows you -to define color names for other color slots which might be available -for your terminal type, assuming terminfo mode. For instance:: - - color.brightblue = 12 - color.pink = 207 - color.orange = 202 - -to set 'brightblue' to color slot 12 (useful for 16 color terminals -that have brighter colors defined in the upper eight) and, 'pink' and -'orange' to colors in 256-color xterm's default color cube. These -defined colors may then be used as any of the pre-defined eight, -including appending '_background' to set the background to that color. - -Modes ------ - -By default, the color extension will use ANSI mode (or win32 mode on -Windows) if it detects a terminal. To override auto mode (to enable -terminfo mode, for example), set the following configuration option:: - - [color] - mode = terminfo - -Any value other than 'ansi', 'win32', 'terminfo', or 'auto' will -disable color. - -Note that on some systems, terminfo mode may cause problems when using -color with the pager extension and less -R. less with the -R option -will only display ECMA-48 color codes, and terminfo mode may sometimes -emit codes that less doesn't understand. You can work around this by -either using ansi mode (or auto mode), or by using less -r (which will -pass through all terminal control codes, not just color control -codes). - -On some systems (such as MSYS in Windows), the terminal may support -a different color mode than the pager (activated via the "pager" -extension). It is possible to define separate modes depending on whether -the pager is active:: - - [color] - mode = auto - pagermode = ansi - -If ``pagermode`` is not defined, the ``mode`` will be used. +See :hg:`help color` for details. ''' from __future__ import absolute_import -from mercurial.i18n import _ -from mercurial import ( - cmdutil, - color, - commands, - dispatch, - encoding, - extensions, - pycompat, - subrepo, - ui as uimod, - util, -) +from mercurial import color -cmdtable = {} -command = cmdutil.command(cmdtable) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should # be specifying the version(s) of Mercurial they are tested with, or # leave the attribute unspecified. testedwith = 'ships-with-hg-core' -# start and stop parameters for effects -_effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33, - 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1, - 'italic': 3, 'underline': 4, 'inverse': 7, 'dim': 2, - 'black_background': 40, 'red_background': 41, - 'green_background': 42, 'yellow_background': 43, - 'blue_background': 44, 'purple_background': 45, - 'cyan_background': 46, 'white_background': 47} - -def _terminfosetup(ui, mode): - '''Initialize terminfo data and the terminal if we're in terminfo mode.''' - - # If we failed to load curses, we go ahead and return. - if not _terminfo_params: - return - # Otherwise, see what the config file says. - if mode not in ('auto', 'terminfo'): - return - - _terminfo_params.update((key[6:], (False, int(val), '')) - for key, val in ui.configitems('color') - if key.startswith('color.')) - _terminfo_params.update((key[9:], (True, '', val.replace('\\E', '\x1b'))) - for key, val in ui.configitems('color') - if key.startswith('terminfo.')) - - try: - curses.setupterm() - except curses.error as e: - _terminfo_params.clear() - return - - for key, (b, e, c) in _terminfo_params.items(): - if not b: - continue - if not c and not curses.tigetstr(e): - # Most terminals don't support dim, invis, etc, so don't be - # noisy and use ui.debug(). - ui.debug("no terminfo entry for %s\n" % e) - del _terminfo_params[key] - if not curses.tigetstr('setaf') or not curses.tigetstr('setab'): - # Only warn about missing terminfo entries if we explicitly asked for - # terminfo mode. - if mode == "terminfo": - ui.warn(_("no terminfo entry for setab/setaf: reverting to " - "ECMA-48 color\n")) - _terminfo_params.clear() - -def _modesetup(ui, coloropt): - if coloropt == 'debug': - return 'debug' - - auto = (coloropt == 'auto') - always = not auto and util.parsebool(coloropt) - if not always and not auto: - return None - - formatted = (always or (encoding.environ.get('TERM') != 'dumb' - and ui.formatted())) - - mode = ui.config('color', 'mode', 'auto') - - # If pager is active, color.pagermode overrides color.mode. - if getattr(ui, 'pageractive', False): - mode = ui.config('color', 'pagermode', mode) - - realmode = mode - if mode == 'auto': - if pycompat.osname == 'nt': - term = encoding.environ.get('TERM') - # TERM won't be defined in a vanilla cmd.exe environment. - - # UNIX-like environments on Windows such as Cygwin and MSYS will - # set TERM. They appear to make a best effort attempt at setting it - # to something appropriate. However, not all environments with TERM - # defined support ANSI. Since "ansi" could result in terminal - # gibberish, we error on the side of selecting "win32". However, if - # w32effects is not defined, we almost certainly don't support - # "win32", so don't even try. - if (term and 'xterm' in term) or not w32effects: - realmode = 'ansi' - else: - realmode = 'win32' - else: - realmode = 'ansi' - - def modewarn(): - # only warn if color.mode was explicitly set and we're in - # a formatted terminal - if mode == realmode and ui.formatted(): - ui.warn(_('warning: failed to set color mode to %s\n') % mode) - - if realmode == 'win32': - _terminfo_params.clear() - if not w32effects: - modewarn() - return None - _effects.update(w32effects) - elif realmode == 'ansi': - _terminfo_params.clear() - elif realmode == 'terminfo': - _terminfosetup(ui, mode) - if not _terminfo_params: - ## FIXME Shouldn't we return None in this case too? - modewarn() - realmode = 'ansi' - else: - return None - - if always or (auto and formatted): - return realmode - return None - -try: - import curses - # Mapping from effect name to terminfo attribute name (or raw code) or - # color number. This will also force-load the curses module. - _terminfo_params = {'none': (True, 'sgr0', ''), - 'standout': (True, 'smso', ''), - 'underline': (True, 'smul', ''), - 'reverse': (True, 'rev', ''), - 'inverse': (True, 'rev', ''), - 'blink': (True, 'blink', ''), - 'dim': (True, 'dim', ''), - 'bold': (True, 'bold', ''), - 'invisible': (True, 'invis', ''), - 'italic': (True, 'sitm', ''), - 'black': (False, curses.COLOR_BLACK, ''), - 'red': (False, curses.COLOR_RED, ''), - 'green': (False, curses.COLOR_GREEN, ''), - 'yellow': (False, curses.COLOR_YELLOW, ''), - 'blue': (False, curses.COLOR_BLUE, ''), - 'magenta': (False, curses.COLOR_MAGENTA, ''), - 'cyan': (False, curses.COLOR_CYAN, ''), - 'white': (False, curses.COLOR_WHITE, '')} -except ImportError: - _terminfo_params = {} - -def _effect_str(effect): - '''Helper function for render_effects().''' - - bg = False - if effect.endswith('_background'): - bg = True - effect = effect[:-11] - try: - attr, val, termcode = _terminfo_params[effect] - except KeyError: - return '' - if attr: - if termcode: - return termcode - else: - return curses.tigetstr(val) - elif bg: - return curses.tparm(curses.tigetstr('setab'), val) - else: - return curses.tparm(curses.tigetstr('setaf'), val) - -def render_effects(text, effects): - 'Wrap text in commands to turn on each effect.' - if not text: - return text - if not _terminfo_params: - start = [str(_effects[e]) for e in ['none'] + effects.split()] - start = '\033[' + ';'.join(start) + 'm' - stop = '\033[' + str(_effects['none']) + 'm' - else: - start = ''.join(_effect_str(effect) - for effect in ['none'] + effects.split()) - stop = _effect_str('none') - return ''.join([start, text, stop]) - -def valideffect(effect): - 'Determine if the effect is valid or not.' - good = False - if not _terminfo_params and effect in _effects: - good = True - elif effect in _terminfo_params or effect[:-11] in _terminfo_params: - good = True - return good - -def configstyles(ui): - for status, cfgeffects in ui.configitems('color'): - if '.' not in status or status.startswith(('color.', 'terminfo.')): - continue - cfgeffects = ui.configlist('color', status) - if cfgeffects: - good = [] - for e in cfgeffects: - if valideffect(e): - good.append(e) - else: - ui.warn(_("ignoring unknown color/effect %r " - "(configured in color.%s)\n") - % (e, status)) - color._styles[status] = ' '.join(good) - -class colorui(uimod.ui): - _colormode = 'ansi' - def write(self, *args, **opts): - if self._colormode is None: - return super(colorui, self).write(*args, **opts) - - label = opts.get('label', '') - if self._buffers and not opts.get('prompt', False): - if self._bufferapplylabels: - self._buffers[-1].extend(self.label(a, label) for a in args) - else: - self._buffers[-1].extend(args) - elif self._colormode == 'win32': - for a in args: - win32print(a, super(colorui, self).write, **opts) - else: - return super(colorui, self).write( - *[self.label(a, label) for a in args], **opts) - - def write_err(self, *args, **opts): - if self._colormode is None: - return super(colorui, self).write_err(*args, **opts) - - label = opts.get('label', '') - if self._bufferstates and self._bufferstates[-1][0]: - return self.write(*args, **opts) - if self._colormode == 'win32': - for a in args: - win32print(a, super(colorui, self).write_err, **opts) - else: - return super(colorui, self).write_err( - *[self.label(a, label) for a in args], **opts) - - def showlabel(self, msg, label): - if label and msg: - if msg[-1] == '\n': - return "[%s|%s]\n" % (label, msg[:-1]) - else: - return "[%s|%s]" % (label, msg) - else: - return msg - - def label(self, msg, label): - if self._colormode is None: - return super(colorui, self).label(msg, label) - - if self._colormode == 'debug': - return self.showlabel(msg, label) - - effects = [] - for l in label.split(): - s = color._styles.get(l, '') - if s: - effects.append(s) - elif valideffect(l): - effects.append(l) - effects = ' '.join(effects) - if effects: - return '\n'.join([render_effects(line, effects) - for line in msg.split('\n')]) - return msg - -def uisetup(ui): - if ui.plain(): - return - if not isinstance(ui, colorui): - colorui.__bases__ = (ui.__class__,) - ui.__class__ = colorui - def colorcmd(orig, ui_, opts, cmd, cmdfunc): - mode = _modesetup(ui_, opts['color']) - colorui._colormode = mode - if mode and mode != 'debug': - configstyles(ui_) - return orig(ui_, opts, cmd, cmdfunc) - def colorgit(orig, gitsub, commands, env=None, stream=False, cwd=None): - if gitsub.ui._colormode and len(commands) and commands[0] == "diff": - # insert the argument in the front, - # the end of git diff arguments is used for paths - commands.insert(1, '--color') - return orig(gitsub, commands, env, stream, cwd) - extensions.wrapfunction(dispatch, '_runcommand', colorcmd) - extensions.wrapfunction(subrepo.gitsubrepo, '_gitnodir', colorgit) - def extsetup(ui): - commands.globalopts.append( - ('', 'color', 'auto', - # i18n: 'always', 'auto', 'never', and 'debug' are keywords - # and should not be translated - _("when to colorize (boolean, always, auto, never, or debug)"), - _('TYPE'))) - -@command('debugcolor', - [('', 'style', None, _('show all configured styles'))], - 'hg debugcolor') -def debugcolor(ui, repo, **opts): - """show available color, effects or style""" - ui.write(('color mode: %s\n') % ui._colormode) - if opts.get('style'): - return _debugdisplaystyle(ui) - else: - return _debugdisplaycolor(ui) - -def _debugdisplaycolor(ui): - oldstyle = color._styles.copy() - try: - color._styles.clear() - for effect in _effects.keys(): - color._styles[effect] = effect - if _terminfo_params: - for k, v in ui.configitems('color'): - if k.startswith('color.'): - color._styles[k] = k[6:] - elif k.startswith('terminfo.'): - color._styles[k] = k[9:] - ui.write(_('available colors:\n')) - # sort label with a '_' after the other to group '_background' entry. - items = sorted(color._styles.items(), - key=lambda i: ('_' in i[0], i[0], i[1])) - for colorname, label in items: - ui.write(('%s\n') % colorname, label=label) - finally: - color._styles.clear() - color._styles.update(oldstyle) - -def _debugdisplaystyle(ui): - ui.write(_('available style:\n')) - width = max(len(s) for s in color._styles) - for label, effects in sorted(color._styles.items()): - ui.write('%s' % label, label=label) - if effects: - # 50 - ui.write(': ') - ui.write(' ' * (max(0, width - len(label)))) - ui.write(', '.join(ui.label(e, e) for e in effects.split())) - ui.write('\n') - -if pycompat.osname != 'nt': - w32effects = None -else: - import ctypes - import re - - _kernel32 = ctypes.windll.kernel32 - - _WORD = ctypes.c_ushort - - _INVALID_HANDLE_VALUE = -1 - - class _COORD(ctypes.Structure): - _fields_ = [('X', ctypes.c_short), - ('Y', ctypes.c_short)] - - class _SMALL_RECT(ctypes.Structure): - _fields_ = [('Left', ctypes.c_short), - ('Top', ctypes.c_short), - ('Right', ctypes.c_short), - ('Bottom', ctypes.c_short)] - - class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): - _fields_ = [('dwSize', _COORD), - ('dwCursorPosition', _COORD), - ('wAttributes', _WORD), - ('srWindow', _SMALL_RECT), - ('dwMaximumWindowSize', _COORD)] - - _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11 - _STD_ERROR_HANDLE = 0xfffffff4 # (DWORD)-12 - - _FOREGROUND_BLUE = 0x0001 - _FOREGROUND_GREEN = 0x0002 - _FOREGROUND_RED = 0x0004 - _FOREGROUND_INTENSITY = 0x0008 - - _BACKGROUND_BLUE = 0x0010 - _BACKGROUND_GREEN = 0x0020 - _BACKGROUND_RED = 0x0040 - _BACKGROUND_INTENSITY = 0x0080 - - _COMMON_LVB_REVERSE_VIDEO = 0x4000 - _COMMON_LVB_UNDERSCORE = 0x8000 - - # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx - w32effects = { - 'none': -1, - 'black': 0, - 'red': _FOREGROUND_RED, - 'green': _FOREGROUND_GREEN, - 'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN, - 'blue': _FOREGROUND_BLUE, - 'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED, - 'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN, - 'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE, - 'bold': _FOREGROUND_INTENSITY, - 'black_background': 0x100, # unused value > 0x0f - 'red_background': _BACKGROUND_RED, - 'green_background': _BACKGROUND_GREEN, - 'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN, - 'blue_background': _BACKGROUND_BLUE, - 'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED, - 'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN, - 'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN | - _BACKGROUND_BLUE), - 'bold_background': _BACKGROUND_INTENSITY, - 'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only - 'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only - } - - passthrough = set([_FOREGROUND_INTENSITY, - _BACKGROUND_INTENSITY, - _COMMON_LVB_UNDERSCORE, - _COMMON_LVB_REVERSE_VIDEO]) - - stdout = _kernel32.GetStdHandle( - _STD_OUTPUT_HANDLE) # don't close the handle returned - if stdout is None or stdout == _INVALID_HANDLE_VALUE: - w32effects = None - else: - csbi = _CONSOLE_SCREEN_BUFFER_INFO() - if not _kernel32.GetConsoleScreenBufferInfo( - stdout, ctypes.byref(csbi)): - # stdout may not support GetConsoleScreenBufferInfo() - # when called from subprocess or redirected - w32effects = None - else: - origattr = csbi.wAttributes - ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)', - re.MULTILINE | re.DOTALL) - - def win32print(text, orig, **opts): - label = opts.get('label', '') - attr = origattr - - def mapcolor(val, attr): - if val == -1: - return origattr - elif val in passthrough: - return attr | val - elif val > 0x0f: - return (val & 0x70) | (attr & 0x8f) - else: - return (val & 0x07) | (attr & 0xf8) - - # determine console attributes based on labels - for l in label.split(): - style = color._styles.get(l, '') - for effect in style.split(): - try: - attr = mapcolor(w32effects[effect], attr) - except KeyError: - # w32effects could not have certain attributes so we skip - # them if not found - pass - # hack to ensure regexp finds data - if not text.startswith('\033['): - text = '\033[m' + text - - # Look for ANSI-like codes embedded in text - m = re.match(ansire, text) - - try: - while m: - for sattr in m.group(1).split(';'): - if sattr: - attr = mapcolor(int(sattr), attr) - _kernel32.SetConsoleTextAttribute(stdout, attr) - orig(m.group(2), **opts) - m = re.match(ansire, m.group(3)) - finally: - # Explicitly reset original attributes - _kernel32.SetConsoleTextAttribute(stdout, origattr) + # change default color config + color._enabledbydefault = True
--- a/hgext/convert/subversion.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/convert/subversion.py Sat Mar 11 13:53:14 2017 -0500 @@ -13,8 +13,8 @@ encoding, error, pycompat, - scmutil, util, + vfs as vfsmod, ) from . import common @@ -1146,8 +1146,8 @@ self.run0('checkout', path, wcpath) self.wc = wcpath - self.opener = scmutil.opener(self.wc) - self.wopener = scmutil.opener(self.wc) + self.opener = vfsmod.vfs(self.wc) + self.wopener = vfsmod.vfs(self.wc) self.childmap = mapfile(ui, self.join('hg-childmap')) if util.checkexec(self.wc): self.is_exec = util.isexec @@ -1306,7 +1306,7 @@ self.setexec = [] fd, messagefile = tempfile.mkstemp(prefix='hg-convert-') - fp = os.fdopen(fd, 'w') + fp = os.fdopen(fd, pycompat.sysstr('w')) fp.write(commit.desc) fp.close() try:
--- a/hgext/extdiff.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/extdiff.py Sat Mar 11 13:53:14 2017 -0500 @@ -273,7 +273,7 @@ cmdline = re.sub(regex, quote, cmdline) ui.debug('running %r in %s\n' % (cmdline, tmproot)) - ui.system(cmdline, cwd=tmproot) + ui.system(cmdline, cwd=tmproot, blockedtag='extdiff') for copy_fn, working_fn, mtime in fns_and_mtime: if os.lstat(copy_fn).st_mtime != mtime:
--- a/hgext/fsmonitor/state.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/fsmonitor/state.py Sat Mar 11 13:53:14 2017 -0500 @@ -20,7 +20,7 @@ class state(object): def __init__(self, repo): - self._opener = repo.opener + self._vfs = repo.vfs self._ui = repo.ui self._rootdir = pathutil.normasprefix(repo.root) self._lastclock = None @@ -33,7 +33,7 @@ def get(self): try: - file = self._opener('fsmonitor.state', 'rb') + file = self._vfs('fsmonitor.state', 'rb') except IOError as inst: if inst.errno != errno.ENOENT: raise @@ -91,7 +91,7 @@ return try: - file = self._opener('fsmonitor.state', 'wb', atomictemp=True) + file = self._vfs('fsmonitor.state', 'wb', atomictemp=True) except (IOError, OSError): self._ui.warn(_("warning: unable to write out fsmonitor state\n")) return
--- a/hgext/gpg.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/gpg.py Sat Mar 11 13:53:14 2017 -0500 @@ -18,6 +18,7 @@ error, match, node as hgnode, + pycompat, util, ) @@ -44,11 +45,11 @@ try: # create temporary files fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig") - fp = os.fdopen(fd, 'wb') + fp = os.fdopen(fd, pycompat.sysstr('wb')) fp.write(sig) fp.close() fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt") - fp = os.fdopen(fd, 'wb') + fp = os.fdopen(fd, pycompat.sysstr('wb')) fp.write(data) fp.close() gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
--- a/hgext/hgk.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/hgk.py Sat Mar 11 13:53:14 2017 -0500 @@ -345,4 +345,4 @@ cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc)) ui.debug("running %s\n" % cmd) - ui.system(cmd) + ui.system(cmd, blockedtag='hgk_view')
--- a/hgext/histedit.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/histedit.py Sat Mar 11 13:53:14 2017 -0500 @@ -36,7 +36,7 @@ # p, pick = use commit # e, edit = use commit, but stop for amending # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # d, drop = remove commit from history # m, mess = edit commit message without changing commit content # @@ -58,7 +58,7 @@ # p, pick = use commit # e, edit = use commit, but stop for amending # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # d, drop = remove commit from history # m, mess = edit commit message without changing commit content # @@ -71,11 +71,11 @@ *** Add delta -Edit the commit message to your liking, then close the editor. For -this example, let's assume that the commit message was changed to -``Add beta and delta.`` After histedit has run and had a chance to -remove any old or temporary revisions it needed, the history looks -like this:: +Edit the commit message to your liking, then close the editor. The date used +for the commit will be the later of the two commits' dates. For this example, +let's assume that the commit message was changed to ``Add beta and delta.`` +After histedit has run and had a chance to remove any old or temporary +revisions it needed, the history looks like this:: @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42 | Add beta and delta. @@ -97,9 +97,10 @@ allowing you to edit files freely, or even use ``hg record`` to commit some changes as a separate commit. When you're done, any remaining uncommitted changes will be committed as well. When done, run ``hg -histedit --continue`` to finish this step. You'll be prompted for a -new commit message, but the default commit message will be the -original message for the ``edit`` ed revision. +histedit --continue`` to finish this step. If there are uncommitted +changes, you'll be prompted for a new commit message, but the default +commit message will be the original message for the ``edit`` ed +revision, and the date of the original commit will be preserved. The ``message`` operation will give you a chance to revise a commit message without changing the contents. It's a shortcut for doing @@ -724,6 +725,15 @@ """ return True + def firstdate(self): + """Returns true if the rule should preserve the date of the first + change. + + This exists mainly so that 'rollup' rules can be a subclass of + 'fold'. + """ + return False + def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges): parent = ctx.parents()[0].node() repo.ui.pushbuffer() @@ -742,7 +752,10 @@ [oldctx.description()]) + '\n' commitopts['message'] = newmessage # date - commitopts['date'] = max(ctx.date(), oldctx.date()) + if self.firstdate(): + commitopts['date'] = ctx.date() + else: + commitopts['date'] = max(ctx.date(), oldctx.date()) extra = ctx.extra().copy() # histedit_source # note: ctx is likely a temporary commit but that the best we can do @@ -809,7 +822,7 @@ return True @action(["roll", "r"], - _("like fold, but discard this commit's description")) + _("like fold, but discard this commit's description and date")) class rollup(fold): def mergedescs(self): return False @@ -817,6 +830,9 @@ def skipprompt(self): return True + def firstdate(self): + return True + @action(["drop", "d"], _('remove commit from history')) class drop(histeditaction): @@ -884,11 +900,11 @@ - `mess` to reword the changeset commit message - - `fold` to combine it with the preceding changeset + - `fold` to combine it with the preceding changeset (using the later date) - - `roll` like fold, but discarding this commit's description + - `roll` like fold, but discarding this commit's description and date - - `edit` to edit this changeset + - `edit` to edit this changeset (preserving date) There are a number of ways to select the root changeset: @@ -992,7 +1008,8 @@ def _readfile(ui, path): if path == '-': - return ui.fin.read() + with ui.timeblockedsection('histedit'): + return ui.fin.read() else: with open(path, 'rb') as f: return f.read()
--- a/hgext/largefiles/lfutil.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/largefiles/lfutil.py Sat Mar 11 13:53:14 2017 -0500 @@ -27,6 +27,7 @@ pycompat, scmutil, util, + vfs as vfsmod, ) shortname = '.hglf' @@ -144,7 +145,7 @@ ''' vfs = repo.vfs lfstoredir = longname - opener = scmutil.opener(vfs.join(lfstoredir)) + opener = vfsmod.vfs(vfs.join(lfstoredir)) lfdirstate = largefilesdirstate(opener, ui, repo.root, repo.dirstate._validate)
--- a/hgext/largefiles/overrides.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/largefiles/overrides.py Sat Mar 11 13:53:14 2017 -0500 @@ -22,8 +22,8 @@ match as matchmod, pathutil, registrar, - revset, scmutil, + smartset, util, ) @@ -855,7 +855,7 @@ firstpulled = repo.firstpulled except AttributeError: raise error.Abort(_("pulled() only available in --lfrev")) - return revset.baseset([r for r in subset if r >= firstpulled]) + return smartset.baseset([r for r in subset if r >= firstpulled]) def overrideclone(orig, ui, source, dest=None, **opts): d = dest @@ -993,9 +993,9 @@ archiver.done() -def hgsubrepoarchive(orig, repo, archiver, prefix, match=None): +def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True): if not repo._repo.lfstatus: - return orig(repo, archiver, prefix, match) + return orig(repo, archiver, prefix, match, decode) repo._get(repo._state + ('hg',)) rev = repo._state[1] @@ -1010,6 +1010,8 @@ if match and not match(f): return data = getdata() + if decode: + data = repo._repo.wwritedata(name, data) archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data) @@ -1037,7 +1039,7 @@ sub = ctx.workingsub(subpath) submatch = matchmod.subdirmatcher(subpath, match) sub._repo.lfstatus = True - sub.archive(archiver, prefix + repo._path + '/', submatch) + sub.archive(archiver, prefix + repo._path + '/', submatch, decode) # If a largefile is modified, the change is not reflected in its # standin until a commit. cmdutil.bailifchanged() raises an exception
--- a/hgext/mq.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/mq.py Sat Mar 11 13:53:14 2017 -0500 @@ -14,7 +14,7 @@ Known patches are represented as patch files in the .hg/patches directory. Applied patches are both patch files and changesets. -Common tasks (use :hg:`help command` for more details):: +Common tasks (use :hg:`help COMMAND` for more details):: create new patch qnew import existing patch qimport @@ -89,10 +89,12 @@ phases, pycompat, registrar, - revset, + revsetlang, scmutil, + smartset, subrepo, util, + vfs as vfsmod, ) release = lockmod.release @@ -433,7 +435,7 @@ except IOError: curpath = os.path.join(path, 'patches') self.path = patchdir or curpath - self.opener = scmutil.opener(self.path) + self.opener = vfsmod.vfs(self.path) self.ui = ui self.baseui = baseui self.applieddirty = False @@ -2675,6 +2677,7 @@ Returns 0 on success. """ + ui.pager('qdiff') repo.mq.diff(repo, pats, opts) return 0 @@ -3567,9 +3570,9 @@ def revsetmq(repo, subset, x): """Changesets managed by MQ. """ - revset.getargs(x, 0, 0, _("mq takes no arguments")) + revsetlang.getargs(x, 0, 0, _("mq takes no arguments")) applied = set([repo[r.node].rev() for r in repo.mq.applied]) - return revset.baseset([r for r in subset if r in applied]) + return smartset.baseset([r for r in subset if r in applied]) # tell hggettext to extract docstrings from these functions: i18nfunctions = [revsetmq]
--- a/hgext/pager.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/pager.py Sat Mar 11 13:53:14 2017 -0500 @@ -12,68 +12,22 @@ # # Run 'hg help pager' to get info on configuration. -'''browse command output with an external pager - -To set the pager that should be used, set the application variable:: - - [pager] - pager = less -FRX - -If no pager is set, the pager extensions uses the environment variable -$PAGER. If neither pager.pager, nor $PAGER is set, no pager is used. - -You can disable the pager for certain commands by adding them to the -pager.ignore list:: +'''browse command output with an external pager (DEPRECATED) - [pager] - ignore = version, help, update - -You can also enable the pager only for certain commands using -pager.attend. Below is the default list of commands to be paged:: - - [pager] - attend = annotate, cat, diff, export, glog, log, qdiff - -Setting pager.attend to an empty value will cause all commands to be -paged. - -If pager.attend is present, pager.ignore will be ignored. - -Lastly, you can enable and disable paging for individual commands with -the attend-<command> option. This setting takes precedence over -existing attend and ignore options and defaults:: +Forcibly enable paging for individual commands that don't typically +request pagination with the attend-<command> option. This setting +takes precedence over ignore options and defaults:: [pager] attend-cat = false - -To ignore global commands like :hg:`version` or :hg:`help`, you have -to specify them in your user configuration file. - -To control whether the pager is used at all for an individual command, -you can use --pager=<value>:: - - - use as needed: `auto`. - - require the pager: `yes` or `on`. - - suppress the pager: `no` or `off` (any unrecognized value - will also work). - ''' from __future__ import absolute_import -import atexit -import os -import signal -import subprocess -import sys - -from mercurial.i18n import _ from mercurial import ( cmdutil, commands, dispatch, - encoding, extensions, - util, ) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for @@ -82,58 +36,12 @@ # leave the attribute unspecified. testedwith = 'ships-with-hg-core' -def _runpager(ui, p): - pager = subprocess.Popen(p, shell=True, bufsize=-1, - close_fds=util.closefds, stdin=subprocess.PIPE, - stdout=util.stdout, stderr=util.stderr) - - # back up original file objects and descriptors - olduifout = ui.fout - oldstdout = util.stdout - stdoutfd = os.dup(util.stdout.fileno()) - stderrfd = os.dup(util.stderr.fileno()) - - # create new line-buffered stdout so that output can show up immediately - ui.fout = util.stdout = newstdout = os.fdopen(util.stdout.fileno(), 'wb', 1) - os.dup2(pager.stdin.fileno(), util.stdout.fileno()) - if ui._isatty(util.stderr): - os.dup2(pager.stdin.fileno(), util.stderr.fileno()) - - @atexit.register - def killpager(): - if util.safehasattr(signal, "SIGINT"): - signal.signal(signal.SIGINT, signal.SIG_IGN) - pager.stdin.close() - ui.fout = olduifout - util.stdout = oldstdout - # close new stdout while it's associated with pager; otherwise stdout - # fd would be closed when newstdout is deleted - newstdout.close() - # restore original fds: stdout is open again - os.dup2(stdoutfd, util.stdout.fileno()) - os.dup2(stderrfd, util.stderr.fileno()) - pager.wait() - def uisetup(ui): - class pagerui(ui.__class__): - def _runpager(self, pagercmd): - _runpager(self, pagercmd) - - ui.__class__ = pagerui def pagecmd(orig, ui, options, cmd, cmdfunc): - p = ui.config("pager", "pager", encoding.environ.get("PAGER")) - usepager = False - always = util.parsebool(options['pager']) auto = options['pager'] == 'auto' - - if not p or '--debugger' in sys.argv or not ui.formatted(): - pass - elif always: - usepager = True - elif not auto: + if auto and not ui.pageractive: usepager = False - else: attend = ui.configlist('pager', 'attend', attended) ignore = ui.configlist('pager', 'ignore') cmds, _ = cmdutil.findcmd(cmd, commands.table) @@ -148,27 +56,18 @@ usepager = True break - setattr(ui, 'pageractive', usepager) - - if usepager: - ui.setconfig('ui', 'formatted', ui.formatted(), 'pager') - ui.setconfig('ui', 'interactive', False, 'pager') - if util.safehasattr(signal, "SIGPIPE"): - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - ui._runpager(p) + if usepager: + # Slight hack: the attend list is supposed to override + # the ignore list for the pager extension, but the + # core code doesn't know about attend, so we have to + # lobotomize the ignore list so that the extension's + # behavior is preserved. + ui.setconfig('pager', 'ignore', '', 'pager') + ui.pager('extension-via-attend-' + cmd) return orig(ui, options, cmd, cmdfunc) - # Wrap dispatch._runcommand after color is loaded so color can see - # ui.pageractive. Otherwise, if we loaded first, color's wrapped - # dispatch._runcommand would run without having access to ui.pageractive. - def afterloaded(loaded): - extensions.wrapfunction(dispatch, '_runcommand', pagecmd) - extensions.afterloaded('color', afterloaded) + extensions.wrapfunction(dispatch, '_runcommand', pagecmd) -def extsetup(ui): - commands.globalopts.append( - ('', 'pager', 'auto', - _("when to paginate (boolean, always, auto, or never)"), - _('TYPE'))) - -attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff'] +attended = [ + 'the-default-attend-list-is-now-empty-but-that-breaks-the-extension', +]
--- a/hgext/patchbomb.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/patchbomb.py Sat Mar 11 13:53:14 2017 -0500 @@ -60,6 +60,14 @@ intro=never # never include an introduction message intro=always # always include an introduction message +You can specify a template for flags to be added in subject prefixes. Flags +specified by --flag option are exported as ``{flags}`` keyword:: + + [patchbomb] + flagtemplate = "{separate(' ', + ifeq(branch, 'default', '', branch|upper), + flags)}" + You can set patchbomb to always ask for confirmation by setting ``patchbomb.confirm`` to true. ''' @@ -77,11 +85,13 @@ commands, encoding, error, + formatter, hg, mail, node as nodemod, patch, scmutil, + templater, util, ) stringio = util.stringio @@ -135,7 +145,32 @@ intro = 1 < number return intro -def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered, +def _formatflags(ui, repo, rev, flags): + """build flag string optionally by template""" + tmpl = ui.config('patchbomb', 'flagtemplate') + if not tmpl: + return ' '.join(flags) + out = util.stringio() + opts = {'template': templater.unquotestring(tmpl)} + with formatter.templateformatter(ui, out, 'patchbombflag', opts) as fm: + fm.startitem() + fm.context(ctx=repo[rev]) + fm.write('flags', '%s', fm.formatlist(flags, name='flag')) + return out.getvalue() + +def _formatprefix(ui, repo, rev, flags, idx, total, numbered): + """build prefix to patch subject""" + flag = _formatflags(ui, repo, rev, flags) + if flag: + flag = ' ' + flag + + if not numbered: + return '[PATCH%s]' % flag + else: + tlen = len(str(total)) + return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag) + +def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered, patchname=None): desc = [] @@ -202,16 +237,13 @@ else: msg = mail.mimetextpatch(body, display=opts.get('test')) - flag = ' '.join(opts.get('flag')) - if flag: - flag = ' ' + flag - + prefix = _formatprefix(ui, repo, rev, opts.get('flag'), idx, total, + numbered) subj = desc[0].strip().rstrip('. ') if not numbered: - subj = '[PATCH%s] %s' % (flag, opts.get('subject') or subj) + subj = ' '.join([prefix, opts.get('subject') or subj]) else: - tlen = len(str(total)) - subj = '[PATCH %0*d of %d%s] %s' % (tlen, idx, total, flag, subj) + subj = ' '.join([prefix, subj]) msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test')) msg['X-Mercurial-Node'] = node msg['X-Mercurial-Series-Index'] = '%i' % idx @@ -303,19 +335,16 @@ msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test')) return [(msg, subj, None)] -def _makeintro(repo, sender, patches, **opts): +def _makeintro(repo, sender, revs, patches, **opts): """make an introduction email, asking the user for content if needed email is returned as (subject, body, cumulative-diffstat)""" ui = repo.ui _charsets = mail._charsets(ui) - tlen = len(str(len(patches))) - flag = opts.get('flag') or '' - if flag: - flag = ' ' + ' '.join(flag) - prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag) - + # use the last revision which is likely to be a bookmarked head + prefix = _formatprefix(ui, repo, revs.last(), opts.get('flag'), + 0, len(patches), numbered=True) subj = (opts.get('subject') or prompt(ui, '(optional) Subject: ', rest=prefix, default='')) if not subj: @@ -337,7 +366,7 @@ opts.get('test')) return (msg, subj, diffstat) -def _getpatchmsgs(repo, sender, patches, patchnames=None, **opts): +def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts): """return a list of emails from a list of patches This involves introduction message creation if necessary. @@ -346,6 +375,7 @@ """ ui = repo.ui _charsets = mail._charsets(ui) + patches = list(_getpatches(repo, revs, **opts)) msgs = [] ui.write(_('this patch series consists of %d patches.\n\n') @@ -353,7 +383,7 @@ # build the intro message, or skip it if the user declines if introwanted(ui, opts, len(patches)): - msg = _makeintro(repo, sender, patches, **opts) + msg = _makeintro(repo, sender, revs, patches, **opts) if msg: msgs.append(msg) @@ -362,10 +392,11 @@ # now generate the actual patch messages name = None - for i, p in enumerate(patches): + assert len(revs) == len(patches) + for i, (r, p) in enumerate(zip(revs, patches)): if patchnames: name = patchnames[i] - msg = makepatch(ui, repo, p, opts, _charsets, i + 1, + msg = makepatch(ui, repo, r, p, opts, _charsets, i + 1, len(patches), numbered, name) msgs.append(msg) @@ -511,14 +542,12 @@ mbox = opts.get('mbox') outgoing = opts.get('outgoing') rev = opts.get('rev') - # internal option used by pbranches - patches = opts.get('patches') if not (opts.get('test') or mbox): # really sending mail.validateconfig(ui) - if not (revs or rev or outgoing or bundle or patches): + if not (revs or rev or outgoing or bundle): raise error.Abort(_('specify at least one changeset with -r or -o')) if outgoing and bundle: @@ -590,17 +619,13 @@ ui.config('patchbomb', 'from') or prompt(ui, 'From', ui.username())) - if patches: - msgs = _getpatchmsgs(repo, sender, patches, opts.get('patchnames'), - **opts) - elif bundle: + if bundle: bundledata = _getbundle(repo, dest, **opts) bundleopts = opts.copy() bundleopts.pop('bundle', None) # already processed msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts) else: - _patches = list(_getpatches(repo, revs, **opts)) - msgs = _getpatchmsgs(repo, sender, _patches, **opts) + msgs = _getpatchmsgs(repo, sender, revs, **opts) showaddrs = []
--- a/hgext/rebase.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/rebase.py Sat Mar 11 13:53:14 2017 -0500 @@ -47,6 +47,7 @@ repoview, revset, scmutil, + smartset, util, ) @@ -118,8 +119,8 @@ # i18n: "_rebasedefaultdest" is a keyword sourceset = None if x is not None: - sourceset = revset.getset(repo, revset.fullreposet(repo), x) - return subset & revset.baseset([_destrebase(repo, sourceset)]) + sourceset = revset.getset(repo, smartset.fullreposet(repo), x) + return subset & smartset.baseset([_destrebase(repo, sourceset)]) class rebaseruntime(object): """This class is a container for rebase runtime state""" @@ -158,6 +159,37 @@ self.keepopen = opts.get('keepopen', False) self.obsoletenotrebased = {} + def storestatus(self, tr=None): + """Store the current status to allow recovery""" + if tr: + tr.addfilegenerator('rebasestate', ('rebasestate',), + self._writestatus, location='plain') + else: + with self.repo.vfs("rebasestate", "w") as f: + self._writestatus(f) + + def _writestatus(self, f): + repo = self.repo + f.write(repo[self.originalwd].hex() + '\n') + f.write(repo[self.target].hex() + '\n') + f.write(repo[self.external].hex() + '\n') + f.write('%d\n' % int(self.collapsef)) + f.write('%d\n' % int(self.keepf)) + f.write('%d\n' % int(self.keepbranchesf)) + f.write('%s\n' % (self.activebookmark or '')) + for d, v in self.state.iteritems(): + oldrev = repo[d].hex() + if v >= 0: + newrev = repo[v].hex() + elif v == revtodo: + # To maintain format compatibility, we have to use nullid. + # Please do remove this special case when upgrading the format. + newrev = hex(nullid) + else: + newrev = v + f.write("%s:%s\n" % (oldrev, newrev)) + repo.ui.debug('rebase status stored\n') + def restorestatus(self): """Restore a previously stored status""" repo = self.repo @@ -218,7 +250,7 @@ repo.ui.debug('computed skipped revs: %s\n' % (' '.join(str(r) for r in sorted(skipped)) or None)) repo.ui.debug('rebase status resumed\n') - _setrebasesetvisibility(repo, state.keys()) + _setrebasesetvisibility(repo, set(state.keys()) | set([originalwd])) self.originalwd = originalwd self.target = target @@ -251,7 +283,7 @@ def _prepareabortorcontinue(self, isabort): try: self.restorestatus() - self.collapsemsg = restorecollapsemsg(self.repo) + self.collapsemsg = restorecollapsemsg(self.repo, isabort) except error.RepoLookupError: if isabort: clearstatus(self.repo) @@ -311,7 +343,7 @@ if dest.closesbranch() and not self.keepbranchesf: self.ui.status(_('reopening closed branch head %s\n') % dest) - def _performrebase(self): + def _performrebase(self, tr): repo, ui, opts = self.repo, self.ui, self.opts if self.keepbranchesf: # insert _savebranch at the start of extrafns so if @@ -337,6 +369,10 @@ if self.activebookmark: bookmarks.deactivate(repo) + # Store the state before we begin so users can run 'hg rebase --abort' + # if we fail before the transaction closes. + self.storestatus() + sortedrevs = repo.revs('sort(%ld, -topo)', self.state) cands = [k for k, v in self.state.iteritems() if v == revtodo] total = len(cands) @@ -357,10 +393,7 @@ self.state, self.targetancestors, self.obsoletenotrebased) - storestatus(repo, self.originalwd, self.target, - self.state, self.collapsef, self.keepf, - self.keepbranchesf, self.external, - self.activebookmark) + self.storestatus(tr=tr) storecollapsemsg(repo, self.collapsemsg) if len(repo[None].parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') @@ -678,7 +711,12 @@ if retcode is not None: return retcode - rbsrt._performrebase() + with repo.transaction('rebase') as tr: + try: + rbsrt._performrebase(tr) + except error.InterventionRequired: + tr.close() + raise rbsrt._finishrebase() finally: release(lock, wlock) @@ -1063,7 +1101,7 @@ 'Remove collapse message file' util.unlinkpath(repo.join("last-message.txt"), ignoremissing=True) -def restorecollapsemsg(repo): +def restorecollapsemsg(repo, isabort): 'Restore previously stored collapse message' try: f = repo.vfs("last-message.txt") @@ -1072,34 +1110,13 @@ except IOError as err: if err.errno != errno.ENOENT: raise - raise error.Abort(_('no rebase in progress')) + if isabort: + # Oh well, just abort like normal + collapsemsg = '' + else: + raise error.Abort(_('missing .hg/last-message.txt for rebase')) return collapsemsg -def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches, - external, activebookmark): - 'Store the current status to allow recovery' - f = repo.vfs("rebasestate", "w") - f.write(repo[originalwd].hex() + '\n') - f.write(repo[target].hex() + '\n') - f.write(repo[external].hex() + '\n') - f.write('%d\n' % int(collapse)) - f.write('%d\n' % int(keep)) - f.write('%d\n' % int(keepbranches)) - f.write('%s\n' % (activebookmark or '')) - for d, v in state.iteritems(): - oldrev = repo[d].hex() - if v >= 0: - newrev = repo[v].hex() - elif v == revtodo: - # To maintain format compatibility, we have to use nullid. - # Please do remove this special case when upgrading the format. - newrev = hex(nullid) - else: - newrev = v - f.write("%s:%s\n" % (oldrev, newrev)) - f.close() - repo.ui.debug('rebase status stored\n') - def clearstatus(repo): 'Remove the status files' _clearrebasesetvisibiliy(repo) @@ -1155,8 +1172,11 @@ if rebased: strippoints = [ c.node() for c in repo.set('roots(%ld)', rebased)] - shouldupdate = len([ - c.node() for c in repo.set('. & (%ld)', rebased)]) > 0 + + updateifonnodes = set(rebased) + updateifonnodes.add(target) + updateifonnodes.add(originalwd) + shouldupdate = repo['.'].rev() in updateifonnodes # Update away from the rebase if necessary if shouldupdate or needupdate(repo, state): @@ -1183,7 +1203,8 @@ dest: context rebaseset: set of rev ''' - _setrebasesetvisibility(repo, rebaseset) + originalwd = repo['.'].rev() + _setrebasesetvisibility(repo, set(rebaseset) | set([originalwd])) # This check isn't strictly necessary, since mq detects commits over an # applied patch. But it prevents messing up the working directory when @@ -1268,7 +1289,7 @@ state[r] = revpruned else: state[r] = revprecursor - return repo['.'].rev(), dest.rev(), state + return originalwd, dest.rev(), state def clearrebased(ui, repo, state, skipped, collapsedas=None): """dispose of rebased revision at the end of the rebase @@ -1367,9 +1388,8 @@ """store the currently rebased set on the repo object This is used by another function to prevent rebased revision to because - hidden (see issue4505)""" + hidden (see issue4504)""" repo = repo.unfiltered() - revs = set(revs) repo._rebaseset = revs # invalidate cache if visibility changes hiddens = repo.filteredrevcache.get('visible', set()) @@ -1383,7 +1403,7 @@ del repo._rebaseset def _rebasedvisible(orig, repo): - """ensure rebased revs stay visible (see issue4505)""" + """ensure rebased revs stay visible (see issue4504)""" blockers = orig(repo) blockers.update(getattr(repo, '_rebaseset', ())) return blockers
--- a/hgext/schemes.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/schemes.py Sat Mar 11 13:53:14 2017 -0500 @@ -63,6 +63,7 @@ # leave the attribute unspecified. testedwith = 'ships-with-hg-core' +_partre = re.compile(br'\{(\d+)\}') class ShortRepository(object): def __init__(self, url, scheme, templater): @@ -70,7 +71,7 @@ self.templater = templater self.url = url try: - self.parts = max(map(int, re.findall(r'\{(\d+)\}', self.url))) + self.parts = max(map(int, _partre.findall(self.url))) except ValueError: self.parts = 0
--- a/hgext/share.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/share.py Sat Mar 11 13:53:14 2017 -0500 @@ -48,6 +48,7 @@ error, extensions, hg, + txnutil, util, ) @@ -64,10 +65,14 @@ @command('share', [('U', 'noupdate', None, _('do not create a working directory')), - ('B', 'bookmarks', None, _('also share bookmarks'))], + ('B', 'bookmarks', None, _('also share bookmarks')), + ('', 'relative', None, _('point to source using a relative path ' + '(EXPERIMENTAL)')), + ], _('[-U] [-B] SOURCE [DEST]'), norepo=True) -def share(ui, source, dest=None, noupdate=False, bookmarks=False): +def share(ui, source, dest=None, noupdate=False, bookmarks=False, + relative=False): """create a new shared repository Initialize a new repository and working directory that shares its @@ -86,7 +91,7 @@ """ return hg.share(ui, source, dest=dest, update=not noupdate, - bookmarks=bookmarks) + bookmarks=bookmarks, relative=relative) @command('unshare', [], '') def unshare(ui, repo): @@ -111,7 +116,8 @@ sharefile = repo.join('sharedpath') util.rename(sharefile, sharefile + '.old') - repo.requirements.discard('sharedpath') + repo.requirements.discard('shared') + repo.requirements.discard('relshared') repo._writerequirements() finally: destlock and destlock.release() @@ -171,7 +177,28 @@ if _hassharedbookmarks(repo): srcrepo = _getsrcrepo(repo) if srcrepo is not None: + # just orig(srcrepo) doesn't work as expected, because + # HG_PENDING refers repo.root. + try: + fp, pending = txnutil.trypending(repo.root, repo.vfs, + 'bookmarks') + if pending: + # only in this case, bookmark information in repo + # is up-to-date. + return fp + fp.close() + except IOError as inst: + if inst.errno != errno.ENOENT: + raise + + # otherwise, we should read bookmarks from srcrepo, + # because .hg/bookmarks in srcrepo might be already + # changed via another sharing repo repo = srcrepo + + # TODO: Pending changes in repo are still invisible in + # srcrepo, because bookmarks.pending is written only into repo. + # See also https://www.mercurial-scm.org/wiki/SharedRepository return orig(repo) def recordchange(orig, self, tr):
--- a/hgext/shelve.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/shelve.py Sat Mar 11 13:53:14 2017 -0500 @@ -46,6 +46,7 @@ scmutil, templatefilters, util, + vfs as vfsmod, ) from . import ( @@ -78,8 +79,8 @@ def __init__(self, repo, name, filetype=None): self.repo = repo self.name = name - self.vfs = scmutil.vfs(repo.join(shelvedir)) - self.backupvfs = scmutil.vfs(repo.join(backupdir)) + self.vfs = vfsmod.vfs(repo.join(shelvedir)) + self.backupvfs = vfsmod.vfs(repo.join(backupdir)) self.ui = self.repo.ui if filetype: self.fname = name + '.' + filetype @@ -220,7 +221,7 @@ util.unlinkpath(repo.join(cls._filename), ignoremissing=True) def cleanupoldbackups(repo): - vfs = scmutil.vfs(repo.join(backupdir)) + vfs = vfsmod.vfs(repo.join(backupdir)) maxbackups = repo.ui.configint('shelve', 'maxbackups', 10) hgfiles = [f for f in vfs.listdir() if f.endswith('.' + patchextension)] @@ -485,6 +486,7 @@ if not ui.plain(): width = ui.termwidth() namelabel = 'shelve.newest' + ui.pager('shelve') for mtime, name in listshelves(repo): sname = util.split(name)[1] if pats and sname not in pats: @@ -747,10 +749,12 @@ _('continue an incomplete unshelve operation')), ('k', 'keep', None, _('keep shelve after unshelving')), + ('n', 'name', '', + _('restore shelved change with given name'), _('NAME')), ('t', 'tool', '', _('specify merge tool')), ('', 'date', '', _('set date for temporary commits (DEPRECATED)'), _('DATE'))], - _('hg unshelve [SHELVED]')) + _('hg unshelve [[-n] SHELVED]')) def unshelve(ui, repo, *shelved, **opts): """restore a shelved change to the working directory @@ -795,6 +799,9 @@ continuef = opts.get('continue') if not abortf and not continuef: cmdutil.checkunfinished(repo) + shelved = list(shelved) + if opts.get("name"): + shelved.append(opts["name"]) if abortf or continuef: if abortf and continuef:
--- a/hgext/transplant.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/transplant.py Sat Mar 11 13:53:14 2017 -0500 @@ -28,11 +28,14 @@ merge, node as nodemod, patch, + pycompat, registrar, revlog, revset, scmutil, + smartset, util, + vfs as vfsmod, ) class TransplantError(error.Abort): @@ -58,7 +61,7 @@ self.opener = opener if not opener: - self.opener = scmutil.opener(self.path) + self.opener = vfsmod.vfs(self.path) self.transplants = {} self.dirty = False self.read() @@ -101,7 +104,7 @@ def __init__(self, ui, repo, opts): self.ui = ui self.path = repo.join('transplant') - self.opener = scmutil.opener(self.path) + self.opener = vfsmod.vfs(self.path) self.transplants = transplants(self.path, 'transplants', opener=self.opener) def getcommiteditor(): @@ -197,7 +200,7 @@ patchfile = None else: fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-') - fp = os.fdopen(fd, 'w') + fp = os.fdopen(fd, pycompat.sysstr('w')) gen = patch.diff(source, parent, node, opts=diffopts) for chunk in gen: fp.write(chunk) @@ -245,7 +248,7 @@ self.ui.status(_('filtering %s\n') % patchfile) user, date, msg = (changelog[1], changelog[2], changelog[4]) fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-') - fp = os.fdopen(fd, 'w') + fp = os.fdopen(fd, pycompat.sysstr('w')) fp.write("# HG changeset patch\n") fp.write("# User %s\n" % user) fp.write("# Date %d %d\n" % date) @@ -258,7 +261,8 @@ environ={'HGUSER': changelog[1], 'HGREVISION': nodemod.hex(node), }, - onerr=error.Abort, errprefix=_('filter failed')) + onerr=error.Abort, errprefix=_('filter failed'), + blockedtag='transplant_filter') user, date, msg = self.parselog(file(headerfile))[1:4] finally: os.unlink(headerfile) @@ -722,7 +726,7 @@ s = revset.getset(repo, subset, x) else: s = subset - return revset.baseset([r for r in s if + return smartset.baseset([r for r in s if repo[r].extra().get('transplant_source')]) templatekeyword = registrar.templatekeyword()
--- a/hgext/zeroconf/__init__.py Tue Mar 07 13:24:24 2017 -0500 +++ b/hgext/zeroconf/__init__.py Sat Mar 11 13:53:14 2017 -0500 @@ -64,7 +64,9 @@ # Generic method, sometimes gives useless results try: dumbip = socket.gethostbyaddr(socket.gethostname())[2][0] - if not dumbip.startswith('127.') and ':' not in dumbip: + if ':' in dumbip: + dumbip = '127.0.0.1' + if not dumbip.startswith('127.'): return dumbip except (socket.gaierror, socket.herror): dumbip = '127.0.0.1'
--- a/mercurial/__init__.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/__init__.py Sat Mar 11 13:53:14 2017 -0500 @@ -280,7 +280,7 @@ continue r, c = t.start l = (b'; from mercurial.pycompat import ' - b'delattr, getattr, hasattr, setattr, xrange\n') + b'delattr, getattr, hasattr, setattr, xrange, open\n') for u in tokenize.tokenize(io.BytesIO(l).readline): if u.type in (tokenize.ENCODING, token.ENDMARKER): continue @@ -307,13 +307,6 @@ if argidx is not None: _ensureunicode(argidx) - # Bare open call (not an attribute on something else), the - # second argument (mode) must be a string, not bytes - elif fn == 'open' and not _isop(i - 1, '.'): - arg1idx = _findargnofcall(1) - if arg1idx is not None: - _ensureunicode(arg1idx) - # It changes iteritems to items as iteritems is not # present in Python 3 world. elif fn == 'iteritems': @@ -327,7 +320,7 @@ # ``replacetoken`` or any mechanism that changes semantics of module # loading is changed. Otherwise cached bytecode may get loaded without # the new transformation mechanisms applied. - BYTECODEHEADER = b'HG\x00\x06' + BYTECODEHEADER = b'HG\x00\x08' class hgloader(importlib.machinery.SourceFileLoader): """Custom module loader that transforms source code.
--- a/mercurial/archival.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/archival.py Sat Mar 11 13:53:14 2017 -0500 @@ -22,8 +22,8 @@ encoding, error, match as matchmod, - scmutil, util, + vfs as vfsmod, ) stringio = util.stringio @@ -249,7 +249,7 @@ def __init__(self, name, mtime): self.basedir = name - self.opener = scmutil.opener(self.basedir) + self.opener = vfsmod.vfs(self.basedir) def addfile(self, name, mode, islink, data): if islink: @@ -331,7 +331,7 @@ for subpath in sorted(ctx.substate): sub = ctx.workingsub(subpath) submatch = matchmod.subdirmatcher(subpath, matchfn) - total += sub.archive(archiver, prefix, submatch) + total += sub.archive(archiver, prefix, submatch, decode) if total == 0: raise error.Abort(_('no files match the archive pattern'))
--- a/mercurial/bookmarks.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/bookmarks.py Sat Mar 11 13:53:14 2017 -0500 @@ -19,6 +19,7 @@ error, lock as lockmod, obsolete, + txnutil, util, ) @@ -29,17 +30,8 @@ bookmarks or the committed ones. Other extensions (like share) may need to tweak this behavior further. """ - bkfile = None - if 'HG_PENDING' in encoding.environ: - try: - bkfile = repo.vfs('bookmarks.pending') - except IOError as inst: - if inst.errno != errno.ENOENT: - raise - if bkfile is None: - bkfile = repo.vfs('bookmarks') - return bkfile - + fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks') + return fp class bmstore(dict): """Storage for bookmarks.
--- a/mercurial/branchmap.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/branchmap.py Sat Mar 11 13:53:14 2017 -0500 @@ -9,7 +9,6 @@ import array import struct -import time from .node import ( bin, @@ -21,6 +20,7 @@ encoding, error, scmutil, + util, ) array = array.array @@ -261,7 +261,7 @@ missing heads, and a generator of nodes that are strictly a superset of heads missing, this function updates self to be correct. """ - starttime = time.time() + starttime = util.timer() cl = repo.changelog # collect new branch entries newbranches = {} @@ -314,7 +314,7 @@ self.tiprev = tiprev self.filteredhash = scmutil.filteredhash(repo, self.tiprev) - duration = time.time() - starttime + duration = util.timer() - starttime repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n', repo.filtername, duration)
--- a/mercurial/bundle2.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/bundle2.py Sat Mar 11 13:53:14 2017 -0500 @@ -320,9 +320,6 @@ It iterates over each part then searches for and uses the proper handling code to process the part. Parts are processed in order. - This is very early version of this function that will be strongly reworked - before final usage. - Unknown Mandatory part will abort the process. It is temporarily possible to provide a prebuilt bundleoperation to the @@ -865,6 +862,11 @@ self._generated = None self.mandatory = mandatory + def __repr__(self): + cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) + return ('<%s object at %x; id: %s; type: %s; mandatory: %s>' + % (cls, id(self), self.id, self.type, self.mandatory)) + def copy(self): """return a copy of the part
--- a/mercurial/bundlerepo.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/bundlerepo.py Sat Mar 11 13:53:14 2017 -0500 @@ -37,8 +37,8 @@ phases, pycompat, revlog, - scmutil, util, + vfs as vfsmod, ) class bundlerevlog(revlog.revlog): @@ -50,7 +50,7 @@ # # To differentiate a rev in the bundle from a rev in the revlog, we # check revision against repotiprev. - opener = scmutil.readonlyvfs(opener) + opener = vfsmod.readonlyvfs(opener) revlog.revlog.__init__(self, opener, indexfile) self.bundle = bundle n = len(self) @@ -239,7 +239,7 @@ def __init__(self, *args, **kwargs): super(bundlephasecache, self).__init__(*args, **kwargs) if util.safehasattr(self, 'opener'): - self.opener = scmutil.readonlyvfs(self.opener) + self.opener = vfsmod.readonlyvfs(self.opener) def write(self): raise NotImplementedError @@ -272,7 +272,7 @@ suffix=".hg10un") self.tempfile = temp - with os.fdopen(fdtemp, 'wb') as fptemp: + with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp: fptemp.write(header) while True: chunk = read(2**18)
--- a/mercurial/changegroup.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/changegroup.py Sat Mar 11 13:53:14 2017 -0500 @@ -26,6 +26,7 @@ error, mdiff, phases, + pycompat, util, ) @@ -98,7 +99,7 @@ fh = open(filename, "wb", 131072) else: fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") - fh = os.fdopen(fd, "wb") + fh = os.fdopen(fd, pycompat.sysstr("wb")) cleanup = filename for c in chunks: fh.write(c)
--- a/mercurial/chgserver.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/chgserver.py Sat Mar 11 13:53:14 2017 -0500 @@ -31,8 +31,11 @@ :: [chgserver] - idletimeout = 3600 # seconds, after which an idle server will exit - skiphash = False # whether to skip config or env change checks + # how long (in seconds) should an idle chg server exit + idletimeout = 3600 + + # whether to skip config or env change checks + skiphash = False """ from __future__ import absolute_import @@ -176,26 +179,17 @@ else: self._csystem = csystem - def system(self, cmd, environ=None, cwd=None, onerr=None, - errprefix=None): + def _runsystem(self, cmd, environ, cwd, out): # fallback to the original system method if the output needs to be # captured (to self._buffers), or the output stream is not stdout # (e.g. stderr, cStringIO), because the chg client is not aware of # these situations and will behave differently (write to stdout). - if (any(s[1] for s in self._bufferstates) + if (out is not self.fout or not util.safehasattr(self.fout, 'fileno') or self.fout.fileno() != util.stdout.fileno()): - return super(chgui, self).system(cmd, environ, cwd, onerr, - errprefix) + return util.system(cmd, environ=environ, cwd=cwd, out=out) self.flush() - rc = self._csystem(cmd, util.shellenviron(environ), cwd) - if rc and onerr: - errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]), - util.explainexit(rc)[0]) - if errprefix: - errmsg = '%s: %s' % (errprefix, errmsg) - raise onerr(errmsg) - return rc + return self._csystem(cmd, util.shellenviron(environ), cwd) def _runpager(self, cmd): self._csystem(cmd, util.shellenviron(), type='pager', @@ -287,9 +281,9 @@ _iochannels = [ # server.ch, ui.fp, mode - ('cin', 'fin', 'rb'), - ('cout', 'fout', 'wb'), - ('cerr', 'ferr', 'wb'), + ('cin', 'fin', pycompat.sysstr('rb')), + ('cout', 'fout', pycompat.sysstr('wb')), + ('cerr', 'ferr', pycompat.sysstr('wb')), ] class chgcmdserver(commandserver.server):
--- a/mercurial/cmdutil.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/cmdutil.py Sat Mar 11 13:53:14 2017 -0500 @@ -26,14 +26,12 @@ changelog, copies, crecord as crecordmod, - dirstateguard as dirstateguardmod, encoding, error, formatter, graphmod, lock as lockmod, match as matchmod, - mergeutil, obsolete, patch, pathutil, @@ -43,9 +41,11 @@ revlog, revset, scmutil, + smartset, templatekw, templater, util, + vfs as vfsmod, ) stringio = util.stringio @@ -584,7 +584,7 @@ raise error.CommandError(cmd, _('invalid arguments')) if not os.path.isfile(file_): raise error.Abort(_("revlog '%s' not found") % file_) - r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False), + r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), file_[:-2] + ".i") return r @@ -1443,24 +1443,13 @@ def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered): changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered) - formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12]) - filters = {'formatnode': formatnode} - defaulttempl = { - 'parent': '{rev}:{node|formatnode} ', - 'manifest': '{rev}:{node|formatnode}', - 'file_copy': '{name} ({source})', - 'envvar': '{key}={value}', - 'extra': '{key}={value|stringescape}' - } - # filecopy is preserved for compatibility reasons - defaulttempl['filecopy'] = defaulttempl['file_copy'] assert not (tmpl and mapfile) + defaulttempl = templatekw.defaulttempl if mapfile: - self.t = templater.templater.frommapfile(mapfile, filters=filters, + self.t = templater.templater.frommapfile(mapfile, cache=defaulttempl) else: self.t = formatter.maketemplater(ui, 'changeset', tmpl, - filters=filters, cache=defaulttempl) self.cache = {} @@ -2092,11 +2081,11 @@ if opts.get('rev'): revs = scmutil.revrange(repo, opts['rev']) elif follow and repo.dirstate.p1() == nullid: - revs = revset.baseset() + revs = smartset.baseset() elif follow: revs = repo.revs('reverse(:.)') else: - revs = revset.spanset(repo) + revs = smartset.spanset(repo) revs.reverse() return revs @@ -2111,7 +2100,7 @@ limit = loglimit(opts) revs = _logrevs(repo, opts) if not revs: - return revset.baseset(), None, None + return smartset.baseset(), None, None expr, filematcher = _makelogrevset(repo, pats, opts, revs) if opts.get('rev'): # User-specified revs might be unsorted, but don't sort before @@ -2127,7 +2116,7 @@ if idx >= limit: break limitedrevs.append(rev) - revs = revset.baseset(limitedrevs) + revs = smartset.baseset(limitedrevs) return revs, expr, filematcher @@ -2142,7 +2131,7 @@ limit = loglimit(opts) revs = _logrevs(repo, opts) if not revs: - return revset.baseset([]), None, None + return smartset.baseset([]), None, None expr, filematcher = _makelogrevset(repo, pats, opts, revs) if expr: matcher = revset.match(repo.ui, expr, order=revset.followorder) @@ -2153,7 +2142,7 @@ if limit <= idx: break limitedrevs.append(r) - revs = revset.baseset(limitedrevs) + revs = smartset.baseset(limitedrevs) return revs, expr, filematcher @@ -2236,6 +2225,8 @@ if opts.get('rev'): endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) + + ui.pager('log') displayer = show_changeset(ui, repo, opts, buffered=True) displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed, filematcher) @@ -2975,13 +2966,6 @@ clean = set(changes.clean) modadded = set() - # split between files known in target manifest and the others - smf = set(mf) - - # determine the exact nature of the deleted changesets - deladded = _deleted - smf - deleted = _deleted - deladded - # We need to account for the state of the file in the dirstate, # even when we revert against something else than parent. This will # slightly alter the behavior of revert (doing back up or not, delete @@ -3023,7 +3007,10 @@ # in case of merge, files that are actually added can be reported as # modified, we need to post process the result if p2 != nullid: - mergeadd = dsmodified - smf + mergeadd = set(dsmodified) + for path in dsmodified: + if path in mf: + mergeadd.remove(path) dsadded |= mergeadd dsmodified -= mergeadd @@ -3036,6 +3023,13 @@ dsremoved.add(src) names[src] = (repo.pathto(src, cwd), True) + # determine the exact nature of the deleted changesets + deladded = set(_deleted) + for path in _deleted: + if path in mf: + deladded.remove(path) + deleted = _deleted - deladded + # distinguish between file to forget and the other added = set() for abs in dsadded: @@ -3254,15 +3248,18 @@ diffopts = patch.difffeatureopts(repo.ui, whitespace=True) diffopts.nodates = True diffopts.git = True - reversehunks = repo.ui.configbool('experimental', - 'revertalternateinteractivemode', - True) + operation = 'discard' + reversehunks = True + if node != parent: + operation = 'revert' + reversehunks = repo.ui.configbool('experimental', + 'revertalternateinteractivemode', + True) if reversehunks: diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts) else: diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts) originalchunks = patch.parsepatch(diff) - operation = 'discard' if node == parent else 'revert' try: @@ -3366,11 +3363,6 @@ return cmd -def checkunresolved(ms): - ms._repo.ui.deprecwarn('checkunresolved moved from cmdutil to mergeutil', - '4.1') - return mergeutil.checkunresolved(ms) - # a list of (ui, repo, otherpeer, opts, missing) functions called by # commands.outgoing. "missing" is "missing" of the result of # "findcommonoutgoing()" @@ -3477,10 +3469,3 @@ if after[1]: hint = after[0] raise error.Abort(_('no %s in progress') % task, hint=hint) - -class dirstateguard(dirstateguardmod.dirstateguard): - def __init__(self, repo, name): - dirstateguardmod.dirstateguard.__init__(self, repo, name) - repo.ui.deprecwarn( - 'dirstateguard has moved from cmdutil to dirstateguard', - '4.1')
--- a/mercurial/color.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/color.py Sat Mar 11 13:53:14 2017 -0500 @@ -7,59 +7,465 @@ from __future__ import absolute_import -_styles = {'grep.match': 'red bold', - 'grep.linenumber': 'green', - 'grep.rev': 'green', - 'grep.change': 'green', - 'grep.sep': 'cyan', - 'grep.filename': 'magenta', - 'grep.user': 'magenta', - 'grep.date': 'magenta', - 'bookmarks.active': 'green', - 'branches.active': 'none', - 'branches.closed': 'black bold', - 'branches.current': 'green', - 'branches.inactive': 'none', - 'diff.changed': 'white', - 'diff.deleted': 'red', - 'diff.diffline': 'bold', - 'diff.extended': 'cyan bold', - 'diff.file_a': 'red bold', - 'diff.file_b': 'green bold', - 'diff.hunk': 'magenta', - 'diff.inserted': 'green', - 'diff.tab': '', - 'diff.trailingwhitespace': 'bold red_background', - 'changeset.public' : '', - 'changeset.draft' : '', - 'changeset.secret' : '', - 'diffstat.deleted': 'red', - 'diffstat.inserted': 'green', - 'histedit.remaining': 'red bold', - 'ui.prompt': 'yellow', - 'log.changeset': 'yellow', - 'patchbomb.finalsummary': '', - 'patchbomb.from': 'magenta', - 'patchbomb.to': 'cyan', - 'patchbomb.subject': 'green', - 'patchbomb.diffstats': '', - 'rebase.rebased': 'blue', - 'rebase.remaining': 'red bold', - 'resolve.resolved': 'green bold', - 'resolve.unresolved': 'red bold', - 'shelve.age': 'cyan', - 'shelve.newest': 'green bold', - 'shelve.name': 'blue bold', - 'status.added': 'green bold', - 'status.clean': 'none', - 'status.copied': 'none', - 'status.deleted': 'cyan bold underline', - 'status.ignored': 'black bold', - 'status.modified': 'blue bold', - 'status.removed': 'red bold', - 'status.unknown': 'magenta bold underline', - 'tags.normal': 'green', - 'tags.local': 'black bold'} +from .i18n import _ + +from . import ( + encoding, + pycompat, + util +) + +try: + import curses + # Mapping from effect name to terminfo attribute name (or raw code) or + # color number. This will also force-load the curses module. + _baseterminfoparams = { + 'none': (True, 'sgr0', ''), + 'standout': (True, 'smso', ''), + 'underline': (True, 'smul', ''), + 'reverse': (True, 'rev', ''), + 'inverse': (True, 'rev', ''), + 'blink': (True, 'blink', ''), + 'dim': (True, 'dim', ''), + 'bold': (True, 'bold', ''), + 'invisible': (True, 'invis', ''), + 'italic': (True, 'sitm', ''), + 'black': (False, curses.COLOR_BLACK, ''), + 'red': (False, curses.COLOR_RED, ''), + 'green': (False, curses.COLOR_GREEN, ''), + 'yellow': (False, curses.COLOR_YELLOW, ''), + 'blue': (False, curses.COLOR_BLUE, ''), + 'magenta': (False, curses.COLOR_MAGENTA, ''), + 'cyan': (False, curses.COLOR_CYAN, ''), + 'white': (False, curses.COLOR_WHITE, ''), + } +except ImportError: + curses = None + _baseterminfoparams = {} + +# allow the extensions to change the default +_enabledbydefault = False + +# start and stop parameters for effects +_effects = { + 'none': 0, + 'black': 30, + 'red': 31, + 'green': 32, + 'yellow': 33, + 'blue': 34, + 'magenta': 35, + 'cyan': 36, + 'white': 37, + 'bold': 1, + 'italic': 3, + 'underline': 4, + 'inverse': 7, + 'dim': 2, + 'black_background': 40, + 'red_background': 41, + 'green_background': 42, + 'yellow_background': 43, + 'blue_background': 44, + 'purple_background': 45, + 'cyan_background': 46, + 'white_background': 47, + } + +_defaultstyles = { + 'grep.match': 'red bold', + 'grep.linenumber': 'green', + 'grep.rev': 'green', + 'grep.change': 'green', + 'grep.sep': 'cyan', + 'grep.filename': 'magenta', + 'grep.user': 'magenta', + 'grep.date': 'magenta', + 'bookmarks.active': 'green', + 'branches.active': 'none', + 'branches.closed': 'black bold', + 'branches.current': 'green', + 'branches.inactive': 'none', + 'diff.changed': 'white', + 'diff.deleted': 'red', + 'diff.diffline': 'bold', + 'diff.extended': 'cyan bold', + 'diff.file_a': 'red bold', + 'diff.file_b': 'green bold', + 'diff.hunk': 'magenta', + 'diff.inserted': 'green', + 'diff.tab': '', + 'diff.trailingwhitespace': 'bold red_background', + 'changeset.public' : '', + 'changeset.draft' : '', + 'changeset.secret' : '', + 'diffstat.deleted': 'red', + 'diffstat.inserted': 'green', + 'histedit.remaining': 'red bold', + 'ui.prompt': 'yellow', + 'log.changeset': 'yellow', + 'patchbomb.finalsummary': '', + 'patchbomb.from': 'magenta', + 'patchbomb.to': 'cyan', + 'patchbomb.subject': 'green', + 'patchbomb.diffstats': '', + 'rebase.rebased': 'blue', + 'rebase.remaining': 'red bold', + 'resolve.resolved': 'green bold', + 'resolve.unresolved': 'red bold', + 'shelve.age': 'cyan', + 'shelve.newest': 'green bold', + 'shelve.name': 'blue bold', + 'status.added': 'green bold', + 'status.clean': 'none', + 'status.copied': 'none', + 'status.deleted': 'cyan bold underline', + 'status.ignored': 'black bold', + 'status.modified': 'blue bold', + 'status.removed': 'red bold', + 'status.unknown': 'magenta bold underline', + 'tags.normal': 'green', + 'tags.local': 'black bold', +} def loadcolortable(ui, extname, colortable): - _styles.update(colortable) + _defaultstyles.update(colortable) + +def _terminfosetup(ui, mode): + '''Initialize terminfo data and the terminal if we're in terminfo mode.''' + + # If we failed to load curses, we go ahead and return. + if curses is None: + return + # Otherwise, see what the config file says. + if mode not in ('auto', 'terminfo'): + return + ui._terminfoparams.update(_baseterminfoparams) + + for key, val in ui.configitems('color'): + if key.startswith('color.'): + newval = (False, int(val), '') + ui._terminfoparams[key[6:]] = newval + elif key.startswith('terminfo.'): + newval = (True, '', val.replace('\\E', '\x1b')) + ui._terminfoparams[key[9:]] = newval + try: + curses.setupterm() + except curses.error as e: + ui._terminfoparams.clear() + return + + for key, (b, e, c) in ui._terminfoparams.items(): + if not b: + continue + if not c and not curses.tigetstr(e): + # Most terminals don't support dim, invis, etc, so don't be + # noisy and use ui.debug(). + ui.debug("no terminfo entry for %s\n" % e) + del ui._terminfoparams[key] + if not curses.tigetstr('setaf') or not curses.tigetstr('setab'): + # Only warn about missing terminfo entries if we explicitly asked for + # terminfo mode. + if mode == "terminfo": + ui.warn(_("no terminfo entry for setab/setaf: reverting to " + "ECMA-48 color\n")) + ui._terminfoparams.clear() + +def setup(ui): + """configure color on a ui + + That function both set the colormode for the ui object and read + the configuration looking for custom colors and effect definitions.""" + mode = _modesetup(ui) + ui._colormode = mode + if mode and mode != 'debug': + configstyles(ui) + +def _modesetup(ui): + if ui.plain(): + return None + default = 'never' + if _enabledbydefault: + default = 'auto' + config = ui.config('ui', 'color', default) + if config == 'debug': + return 'debug' + + auto = (config == 'auto') + always = not auto and util.parsebool(config) + if not always and not auto: + return None + + formatted = (always or (encoding.environ.get('TERM') != 'dumb' + and ui.formatted())) + + mode = ui.config('color', 'mode', 'auto') + + # If pager is active, color.pagermode overrides color.mode. + if getattr(ui, 'pageractive', False): + mode = ui.config('color', 'pagermode', mode) + + realmode = mode + if mode == 'auto': + if pycompat.osname == 'nt': + term = encoding.environ.get('TERM') + # TERM won't be defined in a vanilla cmd.exe environment. + + # UNIX-like environments on Windows such as Cygwin and MSYS will + # set TERM. They appear to make a best effort attempt at setting it + # to something appropriate. However, not all environments with TERM + # defined support ANSI. Since "ansi" could result in terminal + # gibberish, we error on the side of selecting "win32". However, if + # w32effects is not defined, we almost certainly don't support + # "win32", so don't even try. + if (term and 'xterm' in term) or not w32effects: + realmode = 'ansi' + else: + realmode = 'win32' + else: + realmode = 'ansi' + + def modewarn(): + # only warn if color.mode was explicitly set and we're in + # a formatted terminal + if mode == realmode and ui.formatted(): + ui.warn(_('warning: failed to set color mode to %s\n') % mode) + + if realmode == 'win32': + ui._terminfoparams.clear() + if not w32effects: + modewarn() + return None + _effects.update(w32effects) + elif realmode == 'ansi': + ui._terminfoparams.clear() + elif realmode == 'terminfo': + _terminfosetup(ui, mode) + if not ui._terminfoparams: + ## FIXME Shouldn't we return None in this case too? + modewarn() + realmode = 'ansi' + else: + return None + + if always or (auto and formatted): + return realmode + return None + +def configstyles(ui): + ui._styles.update(_defaultstyles) + for status, cfgeffects in ui.configitems('color'): + if '.' not in status or status.startswith(('color.', 'terminfo.')): + continue + cfgeffects = ui.configlist('color', status) + if cfgeffects: + good = [] + for e in cfgeffects: + if valideffect(ui, e): + good.append(e) + else: + ui.warn(_("ignoring unknown color/effect %r " + "(configured in color.%s)\n") + % (e, status)) + ui._styles[status] = ' '.join(good) + +def valideffect(ui, effect): + 'Determine if the effect is valid or not.' + return ((not ui._terminfoparams and effect in _effects) + or (effect in ui._terminfoparams + or effect[:-11] in ui._terminfoparams)) + +def _effect_str(ui, effect): + '''Helper function for render_effects().''' + + bg = False + if effect.endswith('_background'): + bg = True + effect = effect[:-11] + try: + attr, val, termcode = ui._terminfoparams[effect] + except KeyError: + return '' + if attr: + if termcode: + return termcode + else: + return curses.tigetstr(val) + elif bg: + return curses.tparm(curses.tigetstr('setab'), val) + else: + return curses.tparm(curses.tigetstr('setaf'), val) + +def _render_effects(ui, text, effects): + 'Wrap text in commands to turn on each effect.' + if not text: + return text + if ui._terminfoparams: + start = ''.join(_effect_str(ui, effect) + for effect in ['none'] + effects.split()) + stop = _effect_str(ui, 'none') + else: + start = [str(_effects[e]) for e in ['none'] + effects.split()] + start = '\033[' + ';'.join(start) + 'm' + stop = '\033[' + str(_effects['none']) + 'm' + return ''.join([start, text, stop]) + +def colorlabel(ui, msg, label): + """add color control code according to the mode""" + if ui._colormode == 'debug': + if label and msg: + if msg[-1] == '\n': + msg = "[%s|%s]\n" % (label, msg[:-1]) + else: + msg = "[%s|%s]" % (label, msg) + elif ui._colormode is not None: + effects = [] + for l in label.split(): + s = ui._styles.get(l, '') + if s: + effects.append(s) + elif valideffect(ui, l): + effects.append(l) + effects = ' '.join(effects) + if effects: + msg = '\n'.join([_render_effects(ui, line, effects) + for line in msg.split('\n')]) + return msg + +w32effects = None +if pycompat.osname == 'nt': + import ctypes + import re + + _kernel32 = ctypes.windll.kernel32 + + _WORD = ctypes.c_ushort + + _INVALID_HANDLE_VALUE = -1 + + class _COORD(ctypes.Structure): + _fields_ = [('X', ctypes.c_short), + ('Y', ctypes.c_short)] + + class _SMALL_RECT(ctypes.Structure): + _fields_ = [('Left', ctypes.c_short), + ('Top', ctypes.c_short), + ('Right', ctypes.c_short), + ('Bottom', ctypes.c_short)] + + class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): + _fields_ = [('dwSize', _COORD), + ('dwCursorPosition', _COORD), + ('wAttributes', _WORD), + ('srWindow', _SMALL_RECT), + ('dwMaximumWindowSize', _COORD)] + + _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11 + _STD_ERROR_HANDLE = 0xfffffff4 # (DWORD)-12 + + _FOREGROUND_BLUE = 0x0001 + _FOREGROUND_GREEN = 0x0002 + _FOREGROUND_RED = 0x0004 + _FOREGROUND_INTENSITY = 0x0008 + + _BACKGROUND_BLUE = 0x0010 + _BACKGROUND_GREEN = 0x0020 + _BACKGROUND_RED = 0x0040 + _BACKGROUND_INTENSITY = 0x0080 + + _COMMON_LVB_REVERSE_VIDEO = 0x4000 + _COMMON_LVB_UNDERSCORE = 0x8000 + + # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx + w32effects = { + 'none': -1, + 'black': 0, + 'red': _FOREGROUND_RED, + 'green': _FOREGROUND_GREEN, + 'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN, + 'blue': _FOREGROUND_BLUE, + 'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED, + 'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN, + 'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE, + 'bold': _FOREGROUND_INTENSITY, + 'black_background': 0x100, # unused value > 0x0f + 'red_background': _BACKGROUND_RED, + 'green_background': _BACKGROUND_GREEN, + 'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN, + 'blue_background': _BACKGROUND_BLUE, + 'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED, + 'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN, + 'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN | + _BACKGROUND_BLUE), + 'bold_background': _BACKGROUND_INTENSITY, + 'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only + 'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only + } + + passthrough = set([_FOREGROUND_INTENSITY, + _BACKGROUND_INTENSITY, + _COMMON_LVB_UNDERSCORE, + _COMMON_LVB_REVERSE_VIDEO]) + + stdout = _kernel32.GetStdHandle( + _STD_OUTPUT_HANDLE) # don't close the handle returned + if stdout is None or stdout == _INVALID_HANDLE_VALUE: + w32effects = None + else: + csbi = _CONSOLE_SCREEN_BUFFER_INFO() + if not _kernel32.GetConsoleScreenBufferInfo( + stdout, ctypes.byref(csbi)): + # stdout may not support GetConsoleScreenBufferInfo() + # when called from subprocess or redirected + w32effects = None + else: + origattr = csbi.wAttributes + ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)', + re.MULTILINE | re.DOTALL) + + def win32print(ui, writefunc, *msgs, **opts): + for text in msgs: + _win32print(ui, text, writefunc, **opts) + + def _win32print(ui, text, writefunc, **opts): + label = opts.get('label', '') + attr = origattr + + def mapcolor(val, attr): + if val == -1: + return origattr + elif val in passthrough: + return attr | val + elif val > 0x0f: + return (val & 0x70) | (attr & 0x8f) + else: + return (val & 0x07) | (attr & 0xf8) + + # determine console attributes based on labels + for l in label.split(): + style = ui._styles.get(l, '') + for effect in style.split(): + try: + attr = mapcolor(w32effects[effect], attr) + except KeyError: + # w32effects could not have certain attributes so we skip + # them if not found + pass + # hack to ensure regexp finds data + if not text.startswith('\033['): + text = '\033[m' + text + + # Look for ANSI-like codes embedded in text + m = re.match(ansire, text) + + try: + while m: + for sattr in m.group(1).split(';'): + if sattr: + attr = mapcolor(int(sattr), attr) + _kernel32.SetConsoleTextAttribute(stdout, attr) + writefunc(m.group(2), **opts) + m = re.match(ansire, m.group(3)) + finally: + # Explicitly reset original attributes + _kernel32.SetConsoleTextAttribute(stdout, origattr)
--- a/mercurial/commands.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/commands.py Sat Mar 11 13:53:14 2017 -0500 @@ -11,17 +11,10 @@ import errno import os import re -import socket -import string -import sys -import tempfile -import time from .i18n import _ from .node import ( - bin, hex, - nullhex, nullid, nullrev, short, @@ -40,30 +33,22 @@ error, exchange, extensions, - formatter, graphmod, hbisect, help, hg, lock as lockmod, merge as mergemod, - minirst, obsolete, patch, phases, - policy, - pvec, pycompat, - repair, - revlog, - revset, + revsetlang, scmutil, server, sshserver, - sslutil, streamclone, templatekw, - templater, ui as uimod, util, ) @@ -92,6 +77,11 @@ _('do not prompt, automatically pick the first choice for all prompts')), ('q', 'quiet', None, _('suppress output')), ('v', 'verbose', None, _('enable additional output')), + ('', 'color', '', + # i18n: 'always', 'auto', 'never', and 'debug' are keywords + # and should not be translated + _("when to colorize (boolean, always, auto, never, or debug)"), + _('TYPE')), ('', 'config', [], _('set/override config option (use \'section.name=value\')'), _('CONFIG')), @@ -107,6 +97,8 @@ ('', 'version', None, _('output version information and exit')), ('h', 'help', None, _('display help and exit')), ('', 'hidden', False, _('consider hidden changesets')), + ('', 'pager', 'auto', + _("when to paginate (boolean, always, auto, or never)"), _('TYPE')), ] dryrunopts = [('n', 'dry-run', None, @@ -433,6 +425,8 @@ if linenumber and (not opts.get('changeset')) and (not opts.get('number')): raise error.Abort(_('at least one of -n/-c is required for -l')) + ui.pager('annotate') + if fm.isplain(): def makefunc(get, fmt): return lambda x: fmt(get(x)) @@ -892,7 +886,8 @@ # update state state['current'] = [node] hbisect.save_state(repo, state) - status = ui.system(command, environ={'HG_NODE': hex(node)}) + status = ui.system(command, environ={'HG_NODE': hex(node)}, + blockedtag='bisect_check') if status == 125: transition = "skip" elif status == 0: @@ -1264,6 +1259,7 @@ fmt = ' ' * padsize + ' %d:%s' fm.condwrite(not ui.quiet, 'rev node', fmt, rev, hexfunc(ctx.node()), label='log.changeset changeset.%s' % ctx.phasestr()) + fm.context(ctx=ctx) fm.data(active=isactive, closed=not isopen, current=current) if not ui.quiet: fm.plain(notice) @@ -1427,6 +1423,7 @@ ctx = scmutil.revsingle(repo, opts.get('rev')) m = scmutil.match(ctx, (file1,) + pats, opts) + ui.pager('cat') return cmdutil.cat(ui, repo, ctx, m, '', **opts) @command('^clone', @@ -1799,9 +1796,10 @@ editor = ui.geteditor() ui.system("%s \"%s\"" % (editor, f), - onerr=error.Abort, errprefix=_("edit failed")) + onerr=error.Abort, errprefix=_("edit failed"), + blockedtag='config_edit') return - + ui.pager('config') fm = ui.formatter('config', opts) for f in scmutil.rcpath(): ui.debug('read config from: %s\n' % f) @@ -1866,1176 +1864,6 @@ with repo.wlock(False): return cmdutil.copy(ui, repo, pats, opts) -@command('debuginstall', [] + formatteropts, '', norepo=True) -def debuginstall(ui, **opts): - '''test Mercurial installation - - Returns 0 on success. - ''' - - def writetemp(contents): - (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-") - f = os.fdopen(fd, "wb") - f.write(contents) - f.close() - return name - - problems = 0 - - fm = ui.formatter('debuginstall', opts) - fm.startitem() - - # encoding - fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding) - err = None - try: - encoding.fromlocal("test") - except error.Abort as inst: - err = inst - problems += 1 - fm.condwrite(err, 'encodingerror', _(" %s\n" - " (check that your locale is properly set)\n"), err) - - # Python - fm.write('pythonexe', _("checking Python executable (%s)\n"), - pycompat.sysexecutable) - fm.write('pythonver', _("checking Python version (%s)\n"), - ("%d.%d.%d" % sys.version_info[:3])) - fm.write('pythonlib', _("checking Python lib (%s)...\n"), - os.path.dirname(os.__file__)) - - security = set(sslutil.supportedprotocols) - if sslutil.hassni: - security.add('sni') - - fm.write('pythonsecurity', _("checking Python security support (%s)\n"), - fm.formatlist(sorted(security), name='protocol', - fmt='%s', sep=',')) - - # These are warnings, not errors. So don't increment problem count. This - # may change in the future. - if 'tls1.2' not in security: - fm.plain(_(' TLS 1.2 not supported by Python install; ' - 'network connections lack modern security\n')) - if 'sni' not in security: - fm.plain(_(' SNI not supported by Python install; may have ' - 'connectivity issues with some servers\n')) - - # TODO print CA cert info - - # hg version - hgver = util.version() - fm.write('hgver', _("checking Mercurial version (%s)\n"), - hgver.split('+')[0]) - fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"), - '+'.join(hgver.split('+')[1:])) - - # compiled modules - fm.write('hgmodulepolicy', _("checking module policy (%s)\n"), - policy.policy) - fm.write('hgmodules', _("checking installed modules (%s)...\n"), - os.path.dirname(__file__)) - - err = None - try: - from . import ( - base85, - bdiff, - mpatch, - osutil, - ) - dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes - except Exception as inst: - err = inst - problems += 1 - fm.condwrite(err, 'extensionserror', " %s\n", err) - - compengines = util.compengines._engines.values() - fm.write('compengines', _('checking registered compression engines (%s)\n'), - fm.formatlist(sorted(e.name() for e in compengines), - name='compengine', fmt='%s', sep=', ')) - fm.write('compenginesavail', _('checking available compression engines ' - '(%s)\n'), - fm.formatlist(sorted(e.name() for e in compengines - if e.available()), - name='compengine', fmt='%s', sep=', ')) - wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE) - fm.write('compenginesserver', _('checking available compression engines ' - 'for wire protocol (%s)\n'), - fm.formatlist([e.name() for e in wirecompengines - if e.wireprotosupport()], - name='compengine', fmt='%s', sep=', ')) - - # templates - p = templater.templatepaths() - fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p)) - fm.condwrite(not p, '', _(" no template directories found\n")) - if p: - m = templater.templatepath("map-cmdline.default") - if m: - # template found, check if it is working - err = None - try: - templater.templater.frommapfile(m) - except Exception as inst: - err = inst - p = None - fm.condwrite(err, 'defaulttemplateerror', " %s\n", err) - else: - p = None - fm.condwrite(p, 'defaulttemplate', - _("checking default template (%s)\n"), m) - fm.condwrite(not m, 'defaulttemplatenotfound', - _(" template '%s' not found\n"), "default") - if not p: - problems += 1 - fm.condwrite(not p, '', - _(" (templates seem to have been installed incorrectly)\n")) - - # editor - editor = ui.geteditor() - editor = util.expandpath(editor) - fm.write('editor', _("checking commit editor... (%s)\n"), editor) - cmdpath = util.findexe(pycompat.shlexsplit(editor)[0]) - fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound', - _(" No commit editor set and can't find %s in PATH\n" - " (specify a commit editor in your configuration" - " file)\n"), not cmdpath and editor == 'vi' and editor) - fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound', - _(" Can't find editor '%s' in PATH\n" - " (specify a commit editor in your configuration" - " file)\n"), not cmdpath and editor) - if not cmdpath and editor != 'vi': - problems += 1 - - # check username - username = None - err = None - try: - username = ui.username() - except error.Abort as e: - err = e - problems += 1 - - fm.condwrite(username, 'username', _("checking username (%s)\n"), username) - fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n" - " (specify a username in your configuration file)\n"), err) - - fm.condwrite(not problems, '', - _("no problems detected\n")) - if not problems: - fm.data(problems=problems) - fm.condwrite(problems, 'problems', - _("%d problems detected," - " please check your install!\n"), problems) - fm.end() - - return problems - -@command('debugknown', [], _('REPO ID...'), norepo=True) -def debugknown(ui, repopath, *ids, **opts): - """test whether node ids are known to a repo - - Every ID must be a full-length hex node id string. Returns a list of 0s - and 1s indicating unknown/known. - """ - repo = hg.peer(ui, opts, repopath) - if not repo.capable('known'): - raise error.Abort("known() not supported by target repository") - flags = repo.known([bin(s) for s in ids]) - ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags]))) - -@command('debuglabelcomplete', [], _('LABEL...')) -def debuglabelcomplete(ui, repo, *args): - '''backwards compatibility with old bash completion scripts (DEPRECATED)''' - debugnamecomplete(ui, repo, *args) - -@command('debugmergestate', [], '') -def debugmergestate(ui, repo, *args): - """print merge state - - Use --verbose to print out information about whether v1 or v2 merge state - was chosen.""" - def _hashornull(h): - if h == nullhex: - return 'null' - else: - return h - - def printrecords(version): - ui.write(('* version %s records\n') % version) - if version == 1: - records = v1records - else: - records = v2records - - for rtype, record in records: - # pretty print some record types - if rtype == 'L': - ui.write(('local: %s\n') % record) - elif rtype == 'O': - ui.write(('other: %s\n') % record) - elif rtype == 'm': - driver, mdstate = record.split('\0', 1) - ui.write(('merge driver: %s (state "%s")\n') - % (driver, mdstate)) - elif rtype in 'FDC': - r = record.split('\0') - f, state, hash, lfile, afile, anode, ofile = r[0:7] - if version == 1: - onode = 'not stored in v1 format' - flags = r[7] - else: - onode, flags = r[7:9] - ui.write(('file: %s (record type "%s", state "%s", hash %s)\n') - % (f, rtype, state, _hashornull(hash))) - ui.write((' local path: %s (flags "%s")\n') % (lfile, flags)) - ui.write((' ancestor path: %s (node %s)\n') - % (afile, _hashornull(anode))) - ui.write((' other path: %s (node %s)\n') - % (ofile, _hashornull(onode))) - elif rtype == 'f': - filename, rawextras = record.split('\0', 1) - extras = rawextras.split('\0') - i = 0 - extrastrings = [] - while i < len(extras): - extrastrings.append('%s = %s' % (extras[i], extras[i + 1])) - i += 2 - - ui.write(('file extras: %s (%s)\n') - % (filename, ', '.join(extrastrings))) - elif rtype == 'l': - labels = record.split('\0', 2) - labels = [l for l in labels if len(l) > 0] - ui.write(('labels:\n')) - ui.write((' local: %s\n' % labels[0])) - ui.write((' other: %s\n' % labels[1])) - if len(labels) > 2: - ui.write((' base: %s\n' % labels[2])) - else: - ui.write(('unrecognized entry: %s\t%s\n') - % (rtype, record.replace('\0', '\t'))) - - # Avoid mergestate.read() since it may raise an exception for unsupported - # merge state records. We shouldn't be doing this, but this is OK since this - # command is pretty low-level. - ms = mergemod.mergestate(repo) - - # sort so that reasonable information is on top - v1records = ms._readrecordsv1() - v2records = ms._readrecordsv2() - order = 'LOml' - def key(r): - idx = order.find(r[0]) - if idx == -1: - return (1, r[1]) - else: - return (0, idx) - v1records.sort(key=key) - v2records.sort(key=key) - - if not v1records and not v2records: - ui.write(('no merge state found\n')) - elif not v2records: - ui.note(('no version 2 merge state\n')) - printrecords(1) - elif ms._v1v2match(v1records, v2records): - ui.note(('v1 and v2 states match: using v2\n')) - printrecords(2) - else: - ui.note(('v1 and v2 states mismatch: using v1\n')) - printrecords(1) - if ui.verbose: - printrecords(2) - -@command('debugnamecomplete', [], _('NAME...')) -def debugnamecomplete(ui, repo, *args): - '''complete "names" - tags, open branch names, bookmark names''' - - names = set() - # since we previously only listed open branches, we will handle that - # specially (after this for loop) - for name, ns in repo.names.iteritems(): - if name != 'branches': - names.update(ns.listnames(repo)) - names.update(tag for (tag, heads, tip, closed) - in repo.branchmap().iterbranches() if not closed) - completions = set() - if not args: - args = [''] - for a in args: - completions.update(n for n in names if n.startswith(a)) - ui.write('\n'.join(sorted(completions))) - ui.write('\n') - -@command('debuglocks', - [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')), - ('W', 'force-wlock', None, - _('free the working state lock (DANGEROUS)'))], - _('[OPTION]...')) -def debuglocks(ui, repo, **opts): - """show or modify state of locks - - By default, this command will show which locks are held. This - includes the user and process holding the lock, the amount of time - the lock has been held, and the machine name where the process is - running if it's not local. - - Locks protect the integrity of Mercurial's data, so should be - treated with care. System crashes or other interruptions may cause - locks to not be properly released, though Mercurial will usually - detect and remove such stale locks automatically. - - However, detecting stale locks may not always be possible (for - instance, on a shared filesystem). Removing locks may also be - blocked by filesystem permissions. - - Returns 0 if no locks are held. - - """ - - if opts.get('force_lock'): - repo.svfs.unlink('lock') - if opts.get('force_wlock'): - repo.vfs.unlink('wlock') - if opts.get('force_lock') or opts.get('force_lock'): - return 0 - - now = time.time() - held = 0 - - def report(vfs, name, method): - # this causes stale locks to get reaped for more accurate reporting - try: - l = method(False) - except error.LockHeld: - l = None - - if l: - l.release() - else: - try: - stat = vfs.lstat(name) - age = now - stat.st_mtime - user = util.username(stat.st_uid) - locker = vfs.readlock(name) - if ":" in locker: - host, pid = locker.split(':') - if host == socket.gethostname(): - locker = 'user %s, process %s' % (user, pid) - else: - locker = 'user %s, process %s, host %s' \ - % (user, pid, host) - ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age)) - return 1 - except OSError as e: - if e.errno != errno.ENOENT: - raise - - ui.write(("%-6s free\n") % (name + ":")) - return 0 - - held += report(repo.svfs, "lock", repo.lock) - held += report(repo.vfs, "wlock", repo.wlock) - - return held - -@command('debugobsolete', - [('', 'flags', 0, _('markers flag')), - ('', 'record-parents', False, - _('record parent information for the precursor')), - ('r', 'rev', [], _('display markers relevant to REV')), - ('', 'index', False, _('display index of the marker')), - ('', 'delete', [], _('delete markers specified by indices')), - ] + commitopts2 + formatteropts, - _('[OBSOLETED [REPLACEMENT ...]]')) -def debugobsolete(ui, repo, precursor=None, *successors, **opts): - """create arbitrary obsolete marker - - With no arguments, displays the list of obsolescence markers.""" - - def parsenodeid(s): - try: - # We do not use revsingle/revrange functions here to accept - # arbitrary node identifiers, possibly not present in the - # local repository. - n = bin(s) - if len(n) != len(nullid): - raise TypeError() - return n - except TypeError: - raise error.Abort('changeset references must be full hexadecimal ' - 'node identifiers') - - if opts.get('delete'): - indices = [] - for v in opts.get('delete'): - try: - indices.append(int(v)) - except ValueError: - raise error.Abort(_('invalid index value: %r') % v, - hint=_('use integers for indices')) - - if repo.currenttransaction(): - raise error.Abort(_('cannot delete obsmarkers in the middle ' - 'of transaction.')) - - with repo.lock(): - n = repair.deleteobsmarkers(repo.obsstore, indices) - ui.write(_('deleted %i obsolescence markers\n') % n) - - return - - if precursor is not None: - if opts['rev']: - raise error.Abort('cannot select revision when creating marker') - metadata = {} - metadata['user'] = opts['user'] or ui.username() - succs = tuple(parsenodeid(succ) for succ in successors) - l = repo.lock() - try: - tr = repo.transaction('debugobsolete') - try: - date = opts.get('date') - if date: - date = util.parsedate(date) - else: - date = None - prec = parsenodeid(precursor) - parents = None - if opts['record_parents']: - if prec not in repo.unfiltered(): - raise error.Abort('cannot used --record-parents on ' - 'unknown changesets') - parents = repo.unfiltered()[prec].parents() - parents = tuple(p.node() for p in parents) - repo.obsstore.create(tr, prec, succs, opts['flags'], - parents=parents, date=date, - metadata=metadata) - tr.close() - except ValueError as exc: - raise error.Abort(_('bad obsmarker input: %s') % exc) - finally: - tr.release() - finally: - l.release() - else: - if opts['rev']: - revs = scmutil.revrange(repo, opts['rev']) - nodes = [repo[r].node() for r in revs] - markers = list(obsolete.getmarkers(repo, nodes=nodes)) - markers.sort(key=lambda x: x._data) - else: - markers = obsolete.getmarkers(repo) - - markerstoiter = markers - isrelevant = lambda m: True - if opts.get('rev') and opts.get('index'): - markerstoiter = obsolete.getmarkers(repo) - markerset = set(markers) - isrelevant = lambda m: m in markerset - - fm = ui.formatter('debugobsolete', opts) - for i, m in enumerate(markerstoiter): - if not isrelevant(m): - # marker can be irrelevant when we're iterating over a set - # of markers (markerstoiter) which is bigger than the set - # of markers we want to display (markers) - # this can happen if both --index and --rev options are - # provided and thus we need to iterate over all of the markers - # to get the correct indices, but only display the ones that - # are relevant to --rev value - continue - fm.startitem() - ind = i if opts.get('index') else None - cmdutil.showmarker(fm, m, index=ind) - fm.end() - -@command('debugpathcomplete', - [('f', 'full', None, _('complete an entire path')), - ('n', 'normal', None, _('show only normal files')), - ('a', 'added', None, _('show only added files')), - ('r', 'removed', None, _('show only removed files'))], - _('FILESPEC...')) -def debugpathcomplete(ui, repo, *specs, **opts): - '''complete part or all of a tracked path - - This command supports shells that offer path name completion. It - currently completes only files already known to the dirstate. - - Completion extends only to the next path segment unless - --full is specified, in which case entire paths are used.''' - - def complete(path, acceptable): - dirstate = repo.dirstate - spec = os.path.normpath(os.path.join(pycompat.getcwd(), path)) - rootdir = repo.root + pycompat.ossep - if spec != repo.root and not spec.startswith(rootdir): - return [], [] - if os.path.isdir(spec): - spec += '/' - spec = spec[len(rootdir):] - fixpaths = pycompat.ossep != '/' - if fixpaths: - spec = spec.replace(pycompat.ossep, '/') - speclen = len(spec) - fullpaths = opts['full'] - files, dirs = set(), set() - adddir, addfile = dirs.add, files.add - for f, st in dirstate.iteritems(): - if f.startswith(spec) and st[0] in acceptable: - if fixpaths: - f = f.replace('/', pycompat.ossep) - if fullpaths: - addfile(f) - continue - s = f.find(pycompat.ossep, speclen) - if s >= 0: - adddir(f[:s]) - else: - addfile(f) - return files, dirs - - acceptable = '' - if opts['normal']: - acceptable += 'nm' - if opts['added']: - acceptable += 'a' - if opts['removed']: - acceptable += 'r' - cwd = repo.getcwd() - if not specs: - specs = ['.'] - - files, dirs = set(), set() - for spec in specs: - f, d = complete(spec, acceptable or 'nmar') - files.update(f) - dirs.update(d) - files.update(dirs) - ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files))) - ui.write('\n') - -@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True) -def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): - '''access the pushkey key/value protocol - - With two args, list the keys in the given namespace. - - With five args, set a key to new if it currently is set to old. - Reports success or failure. - ''' - - target = hg.peer(ui, {}, repopath) - if keyinfo: - key, old, new = keyinfo - r = target.pushkey(namespace, key, old, new) - ui.status(str(r) + '\n') - return not r - else: - for k, v in sorted(target.listkeys(namespace).iteritems()): - ui.write("%s\t%s\n" % (k.encode('string-escape'), - v.encode('string-escape'))) - -@command('debugpvec', [], _('A B')) -def debugpvec(ui, repo, a, b=None): - ca = scmutil.revsingle(repo, a) - cb = scmutil.revsingle(repo, b) - pa = pvec.ctxpvec(ca) - pb = pvec.ctxpvec(cb) - if pa == pb: - rel = "=" - elif pa > pb: - rel = ">" - elif pa < pb: - rel = "<" - elif pa | pb: - rel = "|" - ui.write(_("a: %s\n") % pa) - ui.write(_("b: %s\n") % pb) - ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth)) - ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") % - (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec), - pa.distance(pb), rel)) - -@command('debugrebuilddirstate|debugrebuildstate', - [('r', 'rev', '', _('revision to rebuild to'), _('REV')), - ('', 'minimal', None, _('only rebuild files that are inconsistent with ' - 'the working copy parent')), - ], - _('[-r REV]')) -def debugrebuilddirstate(ui, repo, rev, **opts): - """rebuild the dirstate as it would look like for the given revision - - If no revision is specified the first current parent will be used. - - The dirstate will be set to the files of the given revision. - The actual working directory content or existing dirstate - information such as adds or removes is not considered. - - ``minimal`` will only rebuild the dirstate status for files that claim to be - tracked but are not in the parent manifest, or that exist in the parent - manifest but are not in the dirstate. It will not change adds, removes, or - modified files that are in the working copy parent. - - One use of this command is to make the next :hg:`status` invocation - check the actual file content. - """ - ctx = scmutil.revsingle(repo, rev) - with repo.wlock(): - dirstate = repo.dirstate - changedfiles = None - # See command doc for what minimal does. - if opts.get('minimal'): - manifestfiles = set(ctx.manifest().keys()) - dirstatefiles = set(dirstate) - manifestonly = manifestfiles - dirstatefiles - dsonly = dirstatefiles - manifestfiles - dsnotadded = set(f for f in dsonly if dirstate[f] != 'a') - changedfiles = manifestonly | dsnotadded - - dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) - -@command('debugrebuildfncache', [], '') -def debugrebuildfncache(ui, repo): - """rebuild the fncache file""" - repair.rebuildfncache(ui, repo) - -@command('debugrename', - [('r', 'rev', '', _('revision to debug'), _('REV'))], - _('[-r REV] FILE')) -def debugrename(ui, repo, file1, *pats, **opts): - """dump rename information""" - - ctx = scmutil.revsingle(repo, opts.get('rev')) - m = scmutil.match(ctx, (file1,) + pats, opts) - for abs in ctx.walk(m): - fctx = ctx[abs] - o = fctx.filelog().renamed(fctx.filenode()) - rel = m.rel(abs) - if o: - ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) - else: - ui.write(_("%s not renamed\n") % rel) - -@command('debugrevlog', debugrevlogopts + - [('d', 'dump', False, _('dump index data'))], - _('-c|-m|FILE'), - optionalrepo=True) -def debugrevlog(ui, repo, file_=None, **opts): - """show data and statistics about a revlog""" - r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts) - - if opts.get("dump"): - numrevs = len(r) - ui.write(("# rev p1rev p2rev start end deltastart base p1 p2" - " rawsize totalsize compression heads chainlen\n")) - ts = 0 - heads = set() - - for rev in xrange(numrevs): - dbase = r.deltaparent(rev) - if dbase == -1: - dbase = rev - cbase = r.chainbase(rev) - clen = r.chainlen(rev) - p1, p2 = r.parentrevs(rev) - rs = r.rawsize(rev) - ts = ts + rs - heads -= set(r.parentrevs(rev)) - heads.add(rev) - try: - compression = ts / r.end(rev) - except ZeroDivisionError: - compression = 0 - ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " - "%11d %5d %8d\n" % - (rev, p1, p2, r.start(rev), r.end(rev), - r.start(dbase), r.start(cbase), - r.start(p1), r.start(p2), - rs, ts, compression, len(heads), clen)) - return 0 - - v = r.version - format = v & 0xFFFF - flags = [] - gdelta = False - if v & revlog.REVLOGNGINLINEDATA: - flags.append('inline') - if v & revlog.REVLOGGENERALDELTA: - gdelta = True - flags.append('generaldelta') - if not flags: - flags = ['(none)'] - - nummerges = 0 - numfull = 0 - numprev = 0 - nump1 = 0 - nump2 = 0 - numother = 0 - nump1prev = 0 - nump2prev = 0 - chainlengths = [] - - datasize = [None, 0, 0] - fullsize = [None, 0, 0] - deltasize = [None, 0, 0] - chunktypecounts = {} - chunktypesizes = {} - - def addsize(size, l): - if l[0] is None or size < l[0]: - l[0] = size - if size > l[1]: - l[1] = size - l[2] += size - - numrevs = len(r) - for rev in xrange(numrevs): - p1, p2 = r.parentrevs(rev) - delta = r.deltaparent(rev) - if format > 0: - addsize(r.rawsize(rev), datasize) - if p2 != nullrev: - nummerges += 1 - size = r.length(rev) - if delta == nullrev: - chainlengths.append(0) - numfull += 1 - addsize(size, fullsize) - else: - chainlengths.append(chainlengths[delta] + 1) - addsize(size, deltasize) - if delta == rev - 1: - numprev += 1 - if delta == p1: - nump1prev += 1 - elif delta == p2: - nump2prev += 1 - elif delta == p1: - nump1 += 1 - elif delta == p2: - nump2 += 1 - elif delta != nullrev: - numother += 1 - - # Obtain data on the raw chunks in the revlog. - chunk = r._chunkraw(rev, rev)[1] - if chunk: - chunktype = chunk[0] - else: - chunktype = 'empty' - - if chunktype not in chunktypecounts: - chunktypecounts[chunktype] = 0 - chunktypesizes[chunktype] = 0 - - chunktypecounts[chunktype] += 1 - chunktypesizes[chunktype] += size - - # Adjust size min value for empty cases - for size in (datasize, fullsize, deltasize): - if size[0] is None: - size[0] = 0 - - numdeltas = numrevs - numfull - numoprev = numprev - nump1prev - nump2prev - totalrawsize = datasize[2] - datasize[2] /= numrevs - fulltotal = fullsize[2] - fullsize[2] /= numfull - deltatotal = deltasize[2] - if numrevs - numfull > 0: - deltasize[2] /= numrevs - numfull - totalsize = fulltotal + deltatotal - avgchainlen = sum(chainlengths) / numrevs - maxchainlen = max(chainlengths) - compratio = 1 - if totalsize: - compratio = totalrawsize / totalsize - - basedfmtstr = '%%%dd\n' - basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n' - - def dfmtstr(max): - return basedfmtstr % len(str(max)) - def pcfmtstr(max, padding=0): - return basepcfmtstr % (len(str(max)), ' ' * padding) - - def pcfmt(value, total): - if total: - return (value, 100 * float(value) / total) - else: - return value, 100.0 - - ui.write(('format : %d\n') % format) - ui.write(('flags : %s\n') % ', '.join(flags)) - - ui.write('\n') - fmt = pcfmtstr(totalsize) - fmt2 = dfmtstr(totalsize) - ui.write(('revisions : ') + fmt2 % numrevs) - ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs)) - ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs)) - ui.write(('revisions : ') + fmt2 % numrevs) - ui.write((' full : ') + fmt % pcfmt(numfull, numrevs)) - ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs)) - ui.write(('revision size : ') + fmt2 % totalsize) - ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize)) - ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize)) - - def fmtchunktype(chunktype): - if chunktype == 'empty': - return ' %s : ' % chunktype - elif chunktype in string.ascii_letters: - return ' 0x%s (%s) : ' % (hex(chunktype), chunktype) - else: - return ' 0x%s : ' % hex(chunktype) - - ui.write('\n') - ui.write(('chunks : ') + fmt2 % numrevs) - for chunktype in sorted(chunktypecounts): - ui.write(fmtchunktype(chunktype)) - ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) - ui.write(('chunks size : ') + fmt2 % totalsize) - for chunktype in sorted(chunktypecounts): - ui.write(fmtchunktype(chunktype)) - ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) - - ui.write('\n') - fmt = dfmtstr(max(avgchainlen, compratio)) - ui.write(('avg chain length : ') + fmt % avgchainlen) - ui.write(('max chain length : ') + fmt % maxchainlen) - ui.write(('compression ratio : ') + fmt % compratio) - - if format > 0: - ui.write('\n') - ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n') - % tuple(datasize)) - ui.write(('full revision size (min/max/avg) : %d / %d / %d\n') - % tuple(fullsize)) - ui.write(('delta size (min/max/avg) : %d / %d / %d\n') - % tuple(deltasize)) - - if numdeltas > 0: - ui.write('\n') - fmt = pcfmtstr(numdeltas) - fmt2 = pcfmtstr(numdeltas, 4) - ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas)) - if numprev > 0: - ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev, - numprev)) - ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev, - numprev)) - ui.write((' other : ') + fmt2 % pcfmt(numoprev, - numprev)) - if gdelta: - ui.write(('deltas against p1 : ') - + fmt % pcfmt(nump1, numdeltas)) - ui.write(('deltas against p2 : ') - + fmt % pcfmt(nump2, numdeltas)) - ui.write(('deltas against other : ') + fmt % pcfmt(numother, - numdeltas)) - -@command('debugrevspec', - [('', 'optimize', None, - _('print parsed tree after optimizing (DEPRECATED)')), - ('p', 'show-stage', [], - _('print parsed tree at the given stage'), _('NAME')), - ('', 'no-optimized', False, _('evaluate tree without optimization')), - ('', 'verify-optimized', False, _('verify optimized result')), - ], - ('REVSPEC')) -def debugrevspec(ui, repo, expr, **opts): - """parse and apply a revision specification - - Use -p/--show-stage option to print the parsed tree at the given stages. - Use -p all to print tree at every stage. - - Use --verify-optimized to compare the optimized result with the unoptimized - one. Returns 1 if the optimized result differs. - """ - stages = [ - ('parsed', lambda tree: tree), - ('expanded', lambda tree: revset.expandaliases(ui, tree)), - ('concatenated', revset.foldconcat), - ('analyzed', revset.analyze), - ('optimized', revset.optimize), - ] - if opts['no_optimized']: - stages = stages[:-1] - if opts['verify_optimized'] and opts['no_optimized']: - raise error.Abort(_('cannot use --verify-optimized with ' - '--no-optimized')) - stagenames = set(n for n, f in stages) - - showalways = set() - showchanged = set() - if ui.verbose and not opts['show_stage']: - # show parsed tree by --verbose (deprecated) - showalways.add('parsed') - showchanged.update(['expanded', 'concatenated']) - if opts['optimize']: - showalways.add('optimized') - if opts['show_stage'] and opts['optimize']: - raise error.Abort(_('cannot use --optimize with --show-stage')) - if opts['show_stage'] == ['all']: - showalways.update(stagenames) - else: - for n in opts['show_stage']: - if n not in stagenames: - raise error.Abort(_('invalid stage name: %s') % n) - showalways.update(opts['show_stage']) - - treebystage = {} - printedtree = None - tree = revset.parse(expr, lookup=repo.__contains__) - for n, f in stages: - treebystage[n] = tree = f(tree) - if n in showalways or (n in showchanged and tree != printedtree): - if opts['show_stage'] or n != 'parsed': - ui.write(("* %s:\n") % n) - ui.write(revset.prettyformat(tree), "\n") - printedtree = tree - - if opts['verify_optimized']: - arevs = revset.makematcher(treebystage['analyzed'])(repo) - brevs = revset.makematcher(treebystage['optimized'])(repo) - if ui.verbose: - ui.note(("* analyzed set:\n"), revset.prettyformatset(arevs), "\n") - ui.note(("* optimized set:\n"), revset.prettyformatset(brevs), "\n") - arevs = list(arevs) - brevs = list(brevs) - if arevs == brevs: - return 0 - ui.write(('--- analyzed\n'), label='diff.file_a') - ui.write(('+++ optimized\n'), label='diff.file_b') - sm = difflib.SequenceMatcher(None, arevs, brevs) - for tag, alo, ahi, blo, bhi in sm.get_opcodes(): - if tag in ('delete', 'replace'): - for c in arevs[alo:ahi]: - ui.write('-%s\n' % c, label='diff.deleted') - if tag in ('insert', 'replace'): - for c in brevs[blo:bhi]: - ui.write('+%s\n' % c, label='diff.inserted') - if tag == 'equal': - for c in arevs[alo:ahi]: - ui.write(' %s\n' % c) - return 1 - - func = revset.makematcher(tree) - revs = func(repo) - if ui.verbose: - ui.note(("* set:\n"), revset.prettyformatset(revs), "\n") - for c in revs: - ui.write("%s\n" % c) - -@command('debugsetparents', [], _('REV1 [REV2]')) -def debugsetparents(ui, repo, rev1, rev2=None): - """manually set the parents of the current working directory - - This is useful for writing repository conversion tools, but should - be used with care. For example, neither the working directory nor the - dirstate is updated, so file status may be incorrect after running this - command. - - Returns 0 on success. - """ - - r1 = scmutil.revsingle(repo, rev1).node() - r2 = scmutil.revsingle(repo, rev2, 'null').node() - - with repo.wlock(): - repo.setparents(r1, r2) - -@command('debugdirstate|debugstate', - [('', 'nodates', None, _('do not display the saved mtime')), - ('', 'datesort', None, _('sort by saved mtime'))], - _('[OPTION]...')) -def debugstate(ui, repo, **opts): - """show the contents of the current dirstate""" - - nodates = opts.get('nodates') - datesort = opts.get('datesort') - - timestr = "" - if datesort: - keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename - else: - keyfunc = None # sort by filename - for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc): - if ent[3] == -1: - timestr = 'unset ' - elif nodates: - timestr = 'set ' - else: - timestr = time.strftime("%Y-%m-%d %H:%M:%S ", - time.localtime(ent[3])) - if ent[1] & 0o20000: - mode = 'lnk' - else: - mode = '%3o' % (ent[1] & 0o777 & ~util.umask) - ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) - for f in repo.dirstate.copies(): - ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) - -@command('debugsub', - [('r', 'rev', '', - _('revision to check'), _('REV'))], - _('[-r REV] [REV]')) -def debugsub(ui, repo, rev=None): - ctx = scmutil.revsingle(repo, rev, None) - for k, v in sorted(ctx.substate.items()): - ui.write(('path %s\n') % k) - ui.write((' source %s\n') % v[0]) - ui.write((' revision %s\n') % v[1]) - -@command('debugsuccessorssets', - [], - _('[REV]')) -def debugsuccessorssets(ui, repo, *revs): - """show set of successors for revision - - A successors set of changeset A is a consistent group of revisions that - succeed A. It contains non-obsolete changesets only. - - In most cases a changeset A has a single successors set containing a single - successor (changeset A replaced by A'). - - A changeset that is made obsolete with no successors are called "pruned". - Such changesets have no successors sets at all. - - A changeset that has been "split" will have a successors set containing - more than one successor. - - A changeset that has been rewritten in multiple different ways is called - "divergent". Such changesets have multiple successor sets (each of which - may also be split, i.e. have multiple successors). - - Results are displayed as follows:: - - <rev1> - <successors-1A> - <rev2> - <successors-2A> - <successors-2B1> <successors-2B2> <successors-2B3> - - Here rev2 has two possible (i.e. divergent) successors sets. The first - holds one element, whereas the second holds three (i.e. the changeset has - been split). - """ - # passed to successorssets caching computation from one call to another - cache = {} - ctx2str = str - node2str = short - if ui.debug(): - def ctx2str(ctx): - return ctx.hex() - node2str = hex - for rev in scmutil.revrange(repo, revs): - ctx = repo[rev] - ui.write('%s\n'% ctx2str(ctx)) - for succsset in obsolete.successorssets(repo, ctx.node(), cache): - if succsset: - ui.write(' ') - ui.write(node2str(succsset[0])) - for node in succsset[1:]: - ui.write(' ') - ui.write(node2str(node)) - ui.write('\n') - -@command('debugtemplate', - [('r', 'rev', [], _('apply template on changesets'), _('REV')), - ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))], - _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'), - optionalrepo=True) -def debugtemplate(ui, repo, tmpl, **opts): - """parse and apply a template - - If -r/--rev is given, the template is processed as a log template and - applied to the given changesets. Otherwise, it is processed as a generic - template. - - Use --verbose to print the parsed tree. - """ - revs = None - if opts['rev']: - if repo is None: - raise error.RepoError(_('there is no Mercurial repository here ' - '(.hg not found)')) - revs = scmutil.revrange(repo, opts['rev']) - - props = {} - for d in opts['define']: - try: - k, v = (e.strip() for e in d.split('=', 1)) - if not k: - raise ValueError - props[k] = v - except ValueError: - raise error.Abort(_('malformed keyword definition: %s') % d) - - if ui.verbose: - aliases = ui.configitems('templatealias') - tree = templater.parse(tmpl) - ui.note(templater.prettyformat(tree), '\n') - newtree = templater.expandaliases(tree, aliases) - if newtree != tree: - ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n') - - mapfile = None - if revs is None: - k = 'debugtemplate' - t = formatter.maketemplater(ui, k, tmpl) - ui.write(templater.stringify(t(k, **props))) - else: - displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl, - mapfile, buffered=False) - for r in revs: - displayer.show(repo[r], **props) - displayer.close() - -@command('debugwalk', walkopts, _('[OPTION]... [FILE]...'), inferrepo=True) -def debugwalk(ui, repo, *pats, **opts): - """show how files match on given patterns""" - m = scmutil.match(repo[None], pats, opts) - items = list(repo.walk(m)) - if not items: - return - f = lambda fn: fn - if ui.configbool('ui', 'slash') and pycompat.ossep != '/': - f = lambda fn: util.normpath(fn) - fmt = 'f %%-%ds %%-%ds %%s' % ( - max([len(abs) for abs in items]), - max([len(m.rel(abs)) for abs in items])) - for abs in items: - line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '') - ui.write("%s\n" % line.rstrip()) - -@command('debugwireargs', - [('', 'three', '', 'three'), - ('', 'four', '', 'four'), - ('', 'five', '', 'five'), - ] + remoteopts, - _('REPO [OPTIONS]... [ONE [TWO]]'), - norepo=True) -def debugwireargs(ui, repopath, *vals, **opts): - repo = hg.peer(ui, opts, repopath) - for opt in remoteopts: - del opts[opt[1]] - args = {} - for k, v in opts.iteritems(): - if v: - args[k] = v - # run twice to check that we don't mess up the stream for the next command - res1 = repo.debugwireargs(*vals, **args) - res2 = repo.debugwireargs(*vals, **args) - ui.write("%s\n" % res1) - if res1 != res2: - ui.warn("%s\n" % res2) - @command('^diff', [('r', 'rev', [], _('revision'), _('REV')), ('c', 'change', '', _('change made by revision'), _('REV')) @@ -3119,6 +1947,7 @@ diffopts = patch.diffallopts(ui, opts) m = scmutil.match(repo[node2], pats, opts) + ui.pager('diff') cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat, listsubrepos=opts.get('subrepos'), root=opts.get('root')) @@ -3200,6 +2029,7 @@ ui.note(_('exporting patches:\n')) else: ui.note(_('exporting patch:\n')) + ui.pager('export') cmdutil.export(repo, revs, template=opts.get('output'), switch_parent=opts.get('switch_parent'), opts=patch.diffallopts(ui, opts)) @@ -3261,6 +2091,7 @@ fmt = '%s' + end m = scmutil.match(ctx, pats, opts) + ui.pager('files') with ui.formatter('files', opts) as fm: return cmdutil.files(ui, ctx, m, fm, fmt, opts.get('subrepos')) @@ -3782,6 +2613,7 @@ except error.LookupError: pass + ui.pager('grep') fm = ui.formatter('grep', opts) for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): rev = ctx.rev() @@ -3897,11 +2729,6 @@ Returns 0 if successful. """ - textwidth = ui.configint('ui', 'textwidth', 78) - termwidth = ui.termwidth() - 2 - if textwidth <= 0 or termwidth < textwidth: - textwidth = termwidth - keep = opts.get('system') or [] if len(keep) == 0: if pycompat.sysplatform.startswith('win'): @@ -3916,36 +2743,8 @@ if ui.verbose: keep.append('verbose') - section = None - subtopic = None - if name and '.' in name: - name, remaining = name.split('.', 1) - remaining = encoding.lower(remaining) - if '.' in remaining: - subtopic, section = remaining.split('.', 1) - else: - if name in help.subtopics: - subtopic = remaining - else: - section = remaining - - text = help.help_(ui, name, subtopic=subtopic, **opts) - - formatted, pruned = minirst.format(text, textwidth, keep=keep, - section=section) - - # We could have been given a weird ".foo" section without a name - # to look for, or we could have simply failed to found "foo.bar" - # because bar isn't a section of foo - if section and not (formatted and name): - raise error.Abort(_("help section not found")) - - if 'verbose' in pruned: - keep.append('omitted') - else: - keep.append('notomitted') - formatted, pruned = minirst.format(text, textwidth, keep=keep, - section=section) + formatted = help.formattedhelp(ui, name, keep=keep, **opts) + ui.pager('help') ui.write(formatted) @@ -4127,8 +2926,9 @@ Import a list of patches and commit them individually (unless --no-commit is specified). - To read a patch from standard input, use "-" as the patch name. If - a URL is specified, the patch will be downloaded from there. + To read a patch from standard input (stdin), use "-" as the patch + name. If a URL is specified, the patch will be downloaded from + there. Import first applies changes to the working directory (unless --bypass is specified), import will abort if there are outstanding @@ -4198,6 +2998,10 @@ hg import incoming-patches.mbox + - import patches from stdin:: + + hg import - + - attempt to exactly restore an exported changeset (not always possible):: @@ -4392,6 +3196,7 @@ if 'bookmarks' not in other.listkeys('namespaces'): ui.warn(_("remote doesn't support bookmarks\n")) return 0 + ui.pager('incoming') ui.status(_('comparing with %s\n') % util.hidepassword(source)) return bookmarks.incoming(ui, repo, other) @@ -4458,6 +3263,7 @@ m = scmutil.match(ctx, pats, opts, default='relglob', badfn=lambda x, y: False) + ui.pager('locate') for abs in ctx.matches(m): if opts.get('fullpath'): ui.write(repo.wjoin(abs), end) @@ -4589,7 +3395,7 @@ """ if opts.get('follow') and opts.get('rev'): - opts['rev'] = [revset.formatspec('reverse(::%lr)', opts.get('rev'))] + opts['rev'] = [revsetlang.formatspec('reverse(::%lr)', opts.get('rev'))] del opts['follow'] if opts.get('graph'): @@ -4606,6 +3412,7 @@ endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) + ui.pager('log') displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True) for rev in revs: if count == limit: @@ -4648,7 +3455,6 @@ Returns 0 on success. """ - fm = ui.formatter('manifest', opts) if opts.get('all'): @@ -4664,6 +3470,7 @@ for fn, b, size in repo.store.datafiles(): if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix: res.append(fn[plen:-slen]) + ui.pager('manifest') for f in res: fm.startitem() fm.write("path", '%s\n', f) @@ -4680,6 +3487,7 @@ mode = {'l': '644', 'x': '755', '': '644'} ctx = scmutil.revsingle(repo, node) mf = ctx.manifest() + ui.pager('manifest') for f in ctx: fm.startitem() fl = ctx[f].flags() @@ -4812,6 +3620,7 @@ return revdag = cmdutil.graphrevs(repo, o, opts) + ui.pager('outgoing') displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True) cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges) cmdutil.outgoinghooks(ui, repo, other, opts, o) @@ -4825,6 +3634,7 @@ ui.warn(_("remote doesn't support bookmarks\n")) return 0 ui.status(_('comparing with %s\n') % util.hidepassword(dest)) + ui.pager('outgoing') return bookmarks.outgoing(ui, repo, other) repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default') @@ -4921,6 +3731,7 @@ Returns 0 on success. """ + ui.pager('paths') if search: pathitems = [(name, path) for name, path in ui.paths.iteritems() if name == search] @@ -5268,7 +4079,7 @@ elif path.pushrev: # It doesn't make any sense to specify ancestor revisions. So limit # to DAG heads to make discovery simpler. - expr = revset.formatspec('heads(%r)', path.pushrev) + expr = revsetlang.formatspec('heads(%r)', path.pushrev) revs = scmutil.revrange(repo, [expr]) revs = [repo[rev].node() for rev in revs] if not revs: @@ -5434,6 +4245,8 @@ - :hg:`resolve -l`: list files which had or still have conflicts. In the printed list, ``U`` = unresolved and ``R`` = resolved. + You can use ``set:unresolved()`` or ``set:resolved()`` to filter + the list. See :hg:`help filesets` for details. .. note:: @@ -5457,6 +4270,7 @@ hint=('use --all to re-merge all unresolved files')) if show: + ui.pager('resolve') fm = ui.formatter('resolve', opts) ms = mergemod.mergestate.read(repo) m = scmutil.match(repo[None], pats, opts) @@ -5780,8 +4594,8 @@ ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'), _('FILE')), ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')), - ('', 'stdio', None, _('for remote clients')), - ('', 'cmdserver', '', _('for remote clients'), _('MODE')), + ('', 'stdio', None, _('for remote clients (ADVANCED)')), + ('', 'cmdserver', '', _('for remote clients (ADVANCED)'), _('MODE')), ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')), ('', 'style', '', _('template style to use'), _('STYLE')), ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')), @@ -5946,6 +4760,7 @@ or ui.configbool('ui', 'statuscopies')) and not opts.get('no_status'): copy = copies.pathcopies(repo[node1], repo[node2], m) + ui.pager('status') fm = ui.formatter('status', opts) fmt = '%s' + end showchar = not opts.get('no_status') @@ -5976,6 +4791,7 @@ Returns 0 on success. """ + ui.pager('summary') ctx = repo[None] parents = ctx.parents() pnode = parents[0].node() @@ -6368,6 +5184,7 @@ Returns 0 on success. """ + ui.pager('tags') fm = ui.formatter('tags', opts) hexfunc = fm.hexfunc tagtype = "" @@ -6464,12 +5281,13 @@ @command('^update|up|checkout|co', [('C', 'clean', None, _('discard uncommitted changes (no backup)')), ('c', 'check', None, _('require clean working directory')), + ('m', 'merge', None, _('merge uncommitted changes')), ('d', 'date', '', _('tipmost revision matching date'), _('DATE')), ('r', 'rev', '', _('revision'), _('REV')) ] + mergetoolopts, - _('[-c] [-C] [-d DATE] [[-r] REV]')) + _('[-C|-c|-m] [-d DATE] [[-r] REV]')) def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False, - tool=None): + merge=None, tool=None): """update working directory (or switch revisions) Update the repository's working directory to the specified @@ -6488,10 +5306,11 @@ .. container:: verbose - The following rules apply when the working directory contains - uncommitted changes: - - 1. If neither -c/--check nor -C/--clean is specified, and if + The -C/--clean, -c/--check, and -m/--merge options control what + happens if the working directory contains uncommitted changes. + At most of one of them can be specified. + + 1. If no option is specified, and if the requested changeset is an ancestor or descendant of the working directory's parent, the uncommitted changes are merged into the requested changeset and the merged @@ -6500,10 +5319,14 @@ branch), the update is aborted and the uncommitted changes are preserved. - 2. With the -c/--check option, the update is aborted and the + 2. With the -m/--merge option, the update is allowed even if the + requested changeset is not an ancestor or descendant of + the working directory's parent. + + 3. With the -c/--check option, the update is aborted and the uncommitted changes are preserved. - 3. With the -C/--clean option, uncommitted changes are discarded and + 4. With the -C/--clean option, uncommitted changes are discarded and the working directory is updated to the requested changeset. To cancel an uncommitted merge (and lose your changes), use @@ -6528,8 +5351,15 @@ if date and rev is not None: raise error.Abort(_("you can't specify a revision and a date")) - if check and clean: - raise error.Abort(_("cannot specify both -c/--check and -C/--clean")) + if len([x for x in (clean, check, merge) if x]) > 1: + raise error.Abort(_("can only specify one of -C/--clean, -c/--check, " + "or -m/merge")) + + updatecheck = None + if check: + updatecheck = 'abort' + elif merge: + updatecheck = 'none' with repo.wlock(): cmdutil.clearunfinished(repo) @@ -6541,12 +5371,10 @@ brev = rev rev = scmutil.revsingle(repo, rev, rev).rev() - if check: - cmdutil.bailifchanged(repo, merge=False) - repo.ui.setconfig('ui', 'forcemerge', tool, 'update') - return hg.updatetotally(ui, repo, rev, brev, clean=clean, check=check) + return hg.updatetotally(ui, repo, rev, brev, clean=clean, + updatecheck=updatecheck) @command('verify', []) def verify(ui, repo): @@ -6570,6 +5398,8 @@ @command('version', [] + formatteropts, norepo=True) def version_(ui, **opts): """output version and copyright information""" + if ui.verbose: + ui.pager('version') fm = ui.formatter("version", opts) fm.startitem() fm.write("ver", _("Mercurial Distributed SCM (version %s)\n"),
--- a/mercurial/commandserver.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/commandserver.py Sat Mar 11 13:53:14 2017 -0500 @@ -304,8 +304,8 @@ ui.flush() newfiles = [] nullfd = os.open(os.devnull, os.O_RDWR) - for f, sysf, mode in [(ui.fin, util.stdin, 'rb'), - (ui.fout, util.stdout, 'wb')]: + for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')), + (ui.fout, util.stdout, pycompat.sysstr('wb'))]: if f is sysf: newfd = os.dup(f.fileno()) os.dup2(nullfd, f.fileno()) @@ -447,6 +447,7 @@ self._sock = None self._oldsigchldhandler = None self._workerpids = set() # updated by signal handler; do not iterate + self._socketunlinked = None def init(self): self._sock = socket.socket(socket.AF_UNIX) @@ -455,11 +456,17 @@ o = signal.signal(signal.SIGCHLD, self._sigchldhandler) self._oldsigchldhandler = o self._servicehandler.printbanner(self.address) + self._socketunlinked = False + + def _unlinksocket(self): + if not self._socketunlinked: + self._servicehandler.unlinksocket(self.address) + self._socketunlinked = True def _cleanup(self): signal.signal(signal.SIGCHLD, self._oldsigchldhandler) self._sock.close() - self._servicehandler.unlinksocket(self.address) + self._unlinksocket() # don't kill child processes as they have active clients, just wait self._reapworkers(0) @@ -470,11 +477,23 @@ self._cleanup() def _mainloop(self): + exiting = False h = self._servicehandler - while not h.shouldexit(): + while True: + if not exiting and h.shouldexit(): + # clients can no longer connect() to the domain socket, so + # we stop queuing new requests. + # for requests that are queued (connect()-ed, but haven't been + # accept()-ed), handle them before exit. otherwise, clients + # waiting for recv() will receive ECONNRESET. + self._unlinksocket() + exiting = True try: ready = select.select([self._sock], [], [], h.pollinterval)[0] if not ready: + # only exit if we completed all queued requests + if exiting: + break continue conn, _addr = self._sock.accept() except (select.error, socket.error) as inst:
--- a/mercurial/config.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/config.py Sat Mar 11 13:53:14 2017 -0500 @@ -169,5 +169,9 @@ def read(self, path, fp=None, sections=None, remap=None): if not fp: - fp = util.posixfile(path) - self.parse(path, fp.read(), sections, remap, self.read) + fp = util.posixfile(path, 'rb') + assert getattr(fp, 'mode', r'rb') == r'rb', ( + 'config files must be opened in binary mode, got fp=%r mode=%r' % ( + fp, fp.mode)) + self.parse(path, fp.read(), + sections=sections, remap=remap, include=self.read)
--- a/mercurial/context.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/context.py Sat Mar 11 13:53:14 2017 -0500 @@ -18,11 +18,11 @@ bin, hex, modifiednodeid, - newnodeid, nullid, nullrev, short, wdirid, + wdirnodes, ) from . import ( encoding, @@ -90,14 +90,11 @@ def __iter__(self): return iter(self._manifest) - def _manifestmatches(self, match, s): - """generate a new manifest filtered by the match argument - - This method is for internal use only and mainly exists to provide an - object oriented way for other contexts to customize the manifest - generation. - """ - return self.manifest().matches(match) + def _buildstatusmanifest(self, status): + """Builds a manifest that includes the given status results, if this is + a working copy context. For non-working copy contexts, it just returns + the normal manifest.""" + return self.manifest() def _matchstatus(self, other, match): """return match.always if match is none @@ -116,17 +113,19 @@ # 1000 and cache it so that when you read 1001, we just need to apply a # delta to what's in the cache. So that's one full reconstruction + one # delta application. + mf2 = None if self.rev() is not None and self.rev() < other.rev(): - self.manifest() - mf1 = other._manifestmatches(match, s) - mf2 = self._manifestmatches(match, s) + mf2 = self._buildstatusmanifest(s) + mf1 = other._buildstatusmanifest(s) + if mf2 is None: + mf2 = self._buildstatusmanifest(s) modified, added = [], [] removed = [] clean = [] deleted, unknown, ignored = s.deleted, s.unknown, s.ignored deletedset = set(deleted) - d = mf1.diff(mf2, clean=listclean) + d = mf1.diff(mf2, match=match, clean=listclean) for fn, value in d.iteritems(): if fn in deletedset: continue @@ -140,7 +139,7 @@ removed.append(fn) elif flag1 != flag2: modified.append(fn) - elif node2 != newnodeid: + elif node2 not in wdirnodes: # When comparing files between two commits, we save time by # not comparing the file contents when the nodeids differ. # Note that this means we incorrectly report a reverted change @@ -153,8 +152,10 @@ if removed: # need to filter files if they are already reported as removed - unknown = [fn for fn in unknown if fn not in mf1] - ignored = [fn for fn in ignored if fn not in mf1] + unknown = [fn for fn in unknown if fn not in mf1 and + (not match or match(fn))] + ignored = [fn for fn in ignored if fn not in mf1 and + (not match or match(fn))] # if they're deleted, don't report them as removed removed = [fn for fn in removed if fn not in deletedset] @@ -1166,7 +1167,7 @@ diffinrange = any(stype == '!' for _, stype in filteredblocks) return diffinrange, linerange1 -def blockancestors(fctx, fromline, toline): +def blockancestors(fctx, fromline, toline, followfirst=False): """Yield ancestors of `fctx` with respect to the block of lines within `fromline`-`toline` range. """ @@ -1175,9 +1176,11 @@ while visit: c, linerange2 = visit.pop(max(visit)) pl = c.parents() + if followfirst: + pl = pl[:1] if not pl: # The block originates from the initial revision. - yield c + yield c, linerange2 continue inrange = False for p in pl: @@ -1190,7 +1193,7 @@ continue visit[p.linkrev(), p.filenode()] = p, linerange1 if inrange: - yield c + yield c, linerange2 class committablectx(basectx): """A committablectx object provides common functionality for a context that @@ -1263,35 +1266,6 @@ return self._repo.dirstate.flagfunc(self._buildflagfunc) @propertycache - def _manifest(self): - """generate a manifest corresponding to the values in self._status - - This reuse the file nodeid from parent, but we append an extra letter - when modified. Modified files get an extra 'm' while added files get - an extra 'a'. This is used by manifests merge to see that files - are different and by update logic to avoid deleting newly added files. - """ - parents = self.parents() - - man = parents[0].manifest().copy() - - ff = self._flagfunc - for i, l in ((addednodeid, self._status.added), - (modifiednodeid, self._status.modified)): - for f in l: - man[f] = i - try: - man.setflag(f, ff(f)) - except OSError: - pass - - for f in self._status.deleted + self._status.removed: - if f in man: - del man[f] - - return man - - @propertycache def _status(self): return self._repo.status() @@ -1605,22 +1579,6 @@ pass return modified, fixup - def _manifestmatches(self, match, s): - """Slow path for workingctx - - The fast path is when we compare the working directory to its parent - which means this function is comparing with a non-parent; therefore we - need to build a manifest and return what matches. - """ - mf = self._repo['.']._manifestmatches(match, s) - for f in s.modified + s.added: - mf[f] = newnodeid - mf.setflag(f, self.flags(f)) - for f in s.removed: - if f in mf: - del mf[f] - return mf - def _dirstatestatus(self, match=None, ignored=False, clean=False, unknown=False): '''Gets the status from the dirstate -- internal use only.''' @@ -1652,6 +1610,39 @@ return s + @propertycache + def _manifest(self): + """generate a manifest corresponding to the values in self._status + + This reuse the file nodeid from parent, but we use special node + identifiers for added and modified files. This is used by manifests + merge to see that files are different and by update logic to avoid + deleting newly added files. + """ + return self._buildstatusmanifest(self._status) + + def _buildstatusmanifest(self, status): + """Builds a manifest that includes the given status results.""" + parents = self.parents() + + man = parents[0].manifest().copy() + + ff = self._flagfunc + for i, l in ((addednodeid, status.added), + (modifiednodeid, status.modified)): + for f in l: + man[f] = i + try: + man.setflag(f, ff(f)) + except OSError: + pass + + for f in status.deleted + status.removed: + if f in man: + del man[f] + + return man + def _buildstatus(self, other, s, match, listignored, listclean, listunknown): """build a status with respect to another context
--- a/mercurial/copies.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/copies.py Sat Mar 11 13:53:14 2017 -0500 @@ -149,10 +149,7 @@ """ ma = a.manifest() mb = b.manifest() - if match: - ma = ma.matches(match) - mb = mb.matches(match) - return mb.filesnotin(ma) + return mb.filesnotin(ma, match=match) def _forwardcopies(a, b, match=None): '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
--- a/mercurial/crecord.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/crecord.py Sat Mar 11 13:53:14 2017 -0500 @@ -1375,7 +1375,8 @@ pass helpwin.refresh() try: - helpwin.getkey() + with self.ui.timeblockedsection('crecord'): + helpwin.getkey() except curses.error: pass @@ -1392,7 +1393,8 @@ self.stdscr.refresh() confirmwin.refresh() try: - response = chr(self.stdscr.getch()) + with self.ui.timeblockedsection('crecord'): + response = chr(self.stdscr.getch()) except ValueError: response = None @@ -1412,7 +1414,8 @@ are you sure you want to review/edit and confirm the selected changes [yn]? """) - response = self.confirmationwindow(confirmtext) + with self.ui.timeblockedsection('crecord'): + response = self.confirmationwindow(confirmtext) if response is None: response = "n" if response.lower().startswith("y"): @@ -1655,7 +1658,8 @@ while True: self.updatescreen() try: - keypressed = self.statuswin.getkey() + with self.ui.timeblockedsection('crecord'): + keypressed = self.statuswin.getkey() if self.errorstr is not None: self.errorstr = None continue
--- a/mercurial/debugcommands.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/debugcommands.py Sat Mar 11 13:53:14 2017 -0500 @@ -7,41 +7,63 @@ from __future__ import absolute_import +import difflib +import errno import operator import os import random +import socket +import string +import sys +import tempfile +import time from .i18n import _ from .node import ( bin, hex, + nullhex, nullid, + nullrev, short, ) from . import ( bundle2, changegroup, cmdutil, + color, commands, context, dagparser, dagutil, + encoding, error, exchange, extensions, fileset, + formatter, hg, localrepo, lock as lockmod, + merge as mergemod, + obsolete, + policy, + pvec, pycompat, repair, revlog, + revset, + revsetlang, scmutil, setdiscovery, simplemerge, + smartset, + sslutil, streamclone, + templater, treediscovery, util, + vfs as vfsmod, ) release = lockmod.release @@ -55,7 +77,7 @@ """find the ancestor revision of two revisions in a given index""" if len(args) == 3: index, rev1, rev2 = args - r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False), index) + r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index) lookup = r.lookup elif len(args) == 2: if not repo: @@ -324,6 +346,47 @@ error = _(".hg/dirstate inconsistent with current parent's manifest") raise error.Abort(error) +@command('debugcolor', + [('', 'style', None, _('show all configured styles'))], + 'hg debugcolor') +def debugcolor(ui, repo, **opts): + """show available color, effects or style""" + ui.write(('color mode: %s\n') % ui._colormode) + if opts.get('style'): + return _debugdisplaystyle(ui) + else: + return _debugdisplaycolor(ui) + +def _debugdisplaycolor(ui): + ui = ui.copy() + ui._styles.clear() + for effect in color._effects.keys(): + ui._styles[effect] = effect + if ui._terminfoparams: + for k, v in ui.configitems('color'): + if k.startswith('color.'): + ui._styles[k] = k[6:] + elif k.startswith('terminfo.'): + ui._styles[k] = k[9:] + ui.write(_('available colors:\n')) + # sort label with a '_' after the other to group '_background' entry. + items = sorted(ui._styles.items(), + key=lambda i: ('_' in i[0], i[0], i[1])) + for colorname, label in items: + ui.write(('%s\n') % colorname, label=label) + +def _debugdisplaystyle(ui): + ui.write(_('available style:\n')) + width = max(len(s) for s in ui._styles) + for label, effects in sorted(ui._styles.items()): + ui.write('%s' % label, label=label) + if effects: + # 50 + ui.write(': ') + ui.write(' ' * (max(0, width - len(label)))) + ui.write(', '.join(ui.label(e, e) for e in effects.split())) + ui.write('\n') + @command('debugcommands', [], _('[COMMAND]'), norepo=True) def debugcommands(ui, cmd='', *args): """list all available commands and options""" @@ -390,7 +453,7 @@ spaces = opts.get('spaces') dots = opts.get('dots') if file_: - rlog = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False), + rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), file_) revs = set((int(r) for r in revs)) def events(): @@ -567,6 +630,37 @@ fm.end() +@command('debugdirstate|debugstate', + [('', 'nodates', None, _('do not display the saved mtime')), + ('', 'datesort', None, _('sort by saved mtime'))], + _('[OPTION]...')) +def debugstate(ui, repo, **opts): + """show the contents of the current dirstate""" + + nodates = opts.get('nodates') + datesort = opts.get('datesort') + + timestr = "" + if datesort: + keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename + else: + keyfunc = None # sort by filename + for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc): + if ent[3] == -1: + timestr = 'unset ' + elif nodates: + timestr = 'set ' + else: + timestr = time.strftime("%Y-%m-%d %H:%M:%S ", + time.localtime(ent[3])) + if ent[1] & 0o20000: + mode = 'lnk' + else: + mode = '%3o' % (ent[1] & 0o777 & ~util.umask) + ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) + for f in repo.dirstate.copies(): + ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) + @command('debugdiscovery', [('', 'old', None, _('use old-style discovery')), ('', 'nonheads', None, @@ -641,7 +735,7 @@ fm = ui.formatter('debugextensions', opts) for extname, extmod in sorted(exts, key=operator.itemgetter(0)): isinternal = extensions.ismoduleinternal(extmod) - extsource = extmod.__file__ + extsource = pycompat.fsencode(extmod.__file__) if isinternal: exttestedwith = [] # never expose magic string to users else: @@ -851,6 +945,1106 @@ ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) ui.write("}\n") +@command('debuginstall', [] + commands.formatteropts, '', norepo=True) +def debuginstall(ui, **opts): + '''test Mercurial installation + + Returns 0 on success. + ''' + + def writetemp(contents): + (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-") + f = os.fdopen(fd, pycompat.sysstr("wb")) + f.write(contents) + f.close() + return name + + problems = 0 + + fm = ui.formatter('debuginstall', opts) + fm.startitem() + + # encoding + fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding) + err = None + try: + encoding.fromlocal("test") + except error.Abort as inst: + err = inst + problems += 1 + fm.condwrite(err, 'encodingerror', _(" %s\n" + " (check that your locale is properly set)\n"), err) + + # Python + fm.write('pythonexe', _("checking Python executable (%s)\n"), + pycompat.sysexecutable) + fm.write('pythonver', _("checking Python version (%s)\n"), + ("%d.%d.%d" % sys.version_info[:3])) + fm.write('pythonlib', _("checking Python lib (%s)...\n"), + os.path.dirname(pycompat.fsencode(os.__file__))) + + security = set(sslutil.supportedprotocols) + if sslutil.hassni: + security.add('sni') + + fm.write('pythonsecurity', _("checking Python security support (%s)\n"), + fm.formatlist(sorted(security), name='protocol', + fmt='%s', sep=',')) + + # These are warnings, not errors. So don't increment problem count. This + # may change in the future. + if 'tls1.2' not in security: + fm.plain(_(' TLS 1.2 not supported by Python install; ' + 'network connections lack modern security\n')) + if 'sni' not in security: + fm.plain(_(' SNI not supported by Python install; may have ' + 'connectivity issues with some servers\n')) + + # TODO print CA cert info + + # hg version + hgver = util.version() + fm.write('hgver', _("checking Mercurial version (%s)\n"), + hgver.split('+')[0]) + fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"), + '+'.join(hgver.split('+')[1:])) + + # compiled modules + fm.write('hgmodulepolicy', _("checking module policy (%s)\n"), + policy.policy) + fm.write('hgmodules', _("checking installed modules (%s)...\n"), + os.path.dirname(pycompat.fsencode(__file__))) + + err = None + try: + from . import ( + base85, + bdiff, + mpatch, + osutil, + ) + dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes + except Exception as inst: + err = inst + problems += 1 + fm.condwrite(err, 'extensionserror', " %s\n", err) + + compengines = util.compengines._engines.values() + fm.write('compengines', _('checking registered compression engines (%s)\n'), + fm.formatlist(sorted(e.name() for e in compengines), + name='compengine', fmt='%s', sep=', ')) + fm.write('compenginesavail', _('checking available compression engines ' + '(%s)\n'), + fm.formatlist(sorted(e.name() for e in compengines + if e.available()), + name='compengine', fmt='%s', sep=', ')) + wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE) + fm.write('compenginesserver', _('checking available compression engines ' + 'for wire protocol (%s)\n'), + fm.formatlist([e.name() for e in wirecompengines + if e.wireprotosupport()], + name='compengine', fmt='%s', sep=', ')) + + # templates + p = templater.templatepaths() + fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p)) + fm.condwrite(not p, '', _(" no template directories found\n")) + if p: + m = templater.templatepath("map-cmdline.default") + if m: + # template found, check if it is working + err = None + try: + templater.templater.frommapfile(m) + except Exception as inst: + err = inst + p = None + fm.condwrite(err, 'defaulttemplateerror', " %s\n", err) + else: + p = None + fm.condwrite(p, 'defaulttemplate', + _("checking default template (%s)\n"), m) + fm.condwrite(not m, 'defaulttemplatenotfound', + _(" template '%s' not found\n"), "default") + if not p: + problems += 1 + fm.condwrite(not p, '', + _(" (templates seem to have been installed incorrectly)\n")) + + # editor + editor = ui.geteditor() + editor = util.expandpath(editor) + fm.write('editor', _("checking commit editor... (%s)\n"), editor) + cmdpath = util.findexe(pycompat.shlexsplit(editor)[0]) + fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound', + _(" No commit editor set and can't find %s in PATH\n" + " (specify a commit editor in your configuration" + " file)\n"), not cmdpath and editor == 'vi' and editor) + fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound', + _(" Can't find editor '%s' in PATH\n" + " (specify a commit editor in your configuration" + " file)\n"), not cmdpath and editor) + if not cmdpath and editor != 'vi': + problems += 1 + + # check username + username = None + err = None + try: + username = ui.username() + except error.Abort as e: + err = e + problems += 1 + + fm.condwrite(username, 'username', _("checking username (%s)\n"), username) + fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n" + " (specify a username in your configuration file)\n"), err) + + fm.condwrite(not problems, '', + _("no problems detected\n")) + if not problems: + fm.data(problems=problems) + fm.condwrite(problems, 'problems', + _("%d problems detected," + " please check your install!\n"), problems) + fm.end() + + return problems + +@command('debugknown', [], _('REPO ID...'), norepo=True) +def debugknown(ui, repopath, *ids, **opts): + """test whether node ids are known to a repo + + Every ID must be a full-length hex node id string. Returns a list of 0s + and 1s indicating unknown/known. + """ + repo = hg.peer(ui, opts, repopath) + if not repo.capable('known'): + raise error.Abort("known() not supported by target repository") + flags = repo.known([bin(s) for s in ids]) + ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags]))) + +@command('debuglabelcomplete', [], _('LABEL...')) +def debuglabelcomplete(ui, repo, *args): + '''backwards compatibility with old bash completion scripts (DEPRECATED)''' + commands.debugnamecomplete(ui, repo, *args) + +@command('debuglocks', + [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')), + ('W', 'force-wlock', None, + _('free the working state lock (DANGEROUS)'))], + _('[OPTION]...')) +def debuglocks(ui, repo, **opts): + """show or modify state of locks + + By default, this command will show which locks are held. This + includes the user and process holding the lock, the amount of time + the lock has been held, and the machine name where the process is + running if it's not local. + + Locks protect the integrity of Mercurial's data, so should be + treated with care. System crashes or other interruptions may cause + locks to not be properly released, though Mercurial will usually + detect and remove such stale locks automatically. + + However, detecting stale locks may not always be possible (for + instance, on a shared filesystem). Removing locks may also be + blocked by filesystem permissions. + + Returns 0 if no locks are held. + + """ + + if opts.get('force_lock'): + repo.svfs.unlink('lock') + if opts.get('force_wlock'): + repo.vfs.unlink('wlock') + if opts.get('force_lock') or opts.get('force_lock'): + return 0 + + now = time.time() + held = 0 + + def report(vfs, name, method): + # this causes stale locks to get reaped for more accurate reporting + try: + l = method(False) + except error.LockHeld: + l = None + + if l: + l.release() + else: + try: + stat = vfs.lstat(name) + age = now - stat.st_mtime + user = util.username(stat.st_uid) + locker = vfs.readlock(name) + if ":" in locker: + host, pid = locker.split(':') + if host == socket.gethostname(): + locker = 'user %s, process %s' % (user, pid) + else: + locker = 'user %s, process %s, host %s' \ + % (user, pid, host) + ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age)) + return 1 + except OSError as e: + if e.errno != errno.ENOENT: + raise + + ui.write(("%-6s free\n") % (name + ":")) + return 0 + + held += report(repo.svfs, "lock", repo.lock) + held += report(repo.vfs, "wlock", repo.wlock) + + return held + +@command('debugmergestate', [], '') +def debugmergestate(ui, repo, *args): + """print merge state + + Use --verbose to print out information about whether v1 or v2 merge state + was chosen.""" + def _hashornull(h): + if h == nullhex: + return 'null' + else: + return h + + def printrecords(version): + ui.write(('* version %s records\n') % version) + if version == 1: + records = v1records + else: + records = v2records + + for rtype, record in records: + # pretty print some record types + if rtype == 'L': + ui.write(('local: %s\n') % record) + elif rtype == 'O': + ui.write(('other: %s\n') % record) + elif rtype == 'm': + driver, mdstate = record.split('\0', 1) + ui.write(('merge driver: %s (state "%s")\n') + % (driver, mdstate)) + elif rtype in 'FDC': + r = record.split('\0') + f, state, hash, lfile, afile, anode, ofile = r[0:7] + if version == 1: + onode = 'not stored in v1 format' + flags = r[7] + else: + onode, flags = r[7:9] + ui.write(('file: %s (record type "%s", state "%s", hash %s)\n') + % (f, rtype, state, _hashornull(hash))) + ui.write((' local path: %s (flags "%s")\n') % (lfile, flags)) + ui.write((' ancestor path: %s (node %s)\n') + % (afile, _hashornull(anode))) + ui.write((' other path: %s (node %s)\n') + % (ofile, _hashornull(onode))) + elif rtype == 'f': + filename, rawextras = record.split('\0', 1) + extras = rawextras.split('\0') + i = 0 + extrastrings = [] + while i < len(extras): + extrastrings.append('%s = %s' % (extras[i], extras[i + 1])) + i += 2 + + ui.write(('file extras: %s (%s)\n') + % (filename, ', '.join(extrastrings))) + elif rtype == 'l': + labels = record.split('\0', 2) + labels = [l for l in labels if len(l) > 0] + ui.write(('labels:\n')) + ui.write((' local: %s\n' % labels[0])) + ui.write((' other: %s\n' % labels[1])) + if len(labels) > 2: + ui.write((' base: %s\n' % labels[2])) + else: + ui.write(('unrecognized entry: %s\t%s\n') + % (rtype, record.replace('\0', '\t'))) + + # Avoid mergestate.read() since it may raise an exception for unsupported + # merge state records. We shouldn't be doing this, but this is OK since this + # command is pretty low-level. + ms = mergemod.mergestate(repo) + + # sort so that reasonable information is on top + v1records = ms._readrecordsv1() + v2records = ms._readrecordsv2() + order = 'LOml' + def key(r): + idx = order.find(r[0]) + if idx == -1: + return (1, r[1]) + else: + return (0, idx) + v1records.sort(key=key) + v2records.sort(key=key) + + if not v1records and not v2records: + ui.write(('no merge state found\n')) + elif not v2records: + ui.note(('no version 2 merge state\n')) + printrecords(1) + elif ms._v1v2match(v1records, v2records): + ui.note(('v1 and v2 states match: using v2\n')) + printrecords(2) + else: + ui.note(('v1 and v2 states mismatch: using v1\n')) + printrecords(1) + if ui.verbose: + printrecords(2) + +@command('debugnamecomplete', [], _('NAME...')) +def debugnamecomplete(ui, repo, *args): + '''complete "names" - tags, open branch names, bookmark names''' + + names = set() + # since we previously only listed open branches, we will handle that + # specially (after this for loop) + for name, ns in repo.names.iteritems(): + if name != 'branches': + names.update(ns.listnames(repo)) + names.update(tag for (tag, heads, tip, closed) + in repo.branchmap().iterbranches() if not closed) + completions = set() + if not args: + args = [''] + for a in args: + completions.update(n for n in names if n.startswith(a)) + ui.write('\n'.join(sorted(completions))) + ui.write('\n') + +@command('debugobsolete', + [('', 'flags', 0, _('markers flag')), + ('', 'record-parents', False, + _('record parent information for the precursor')), + ('r', 'rev', [], _('display markers relevant to REV')), + ('', 'index', False, _('display index of the marker')), + ('', 'delete', [], _('delete markers specified by indices')), + ] + commands.commitopts2 + commands.formatteropts, + _('[OBSOLETED [REPLACEMENT ...]]')) +def debugobsolete(ui, repo, precursor=None, *successors, **opts): + """create arbitrary obsolete marker + + With no arguments, displays the list of obsolescence markers.""" + + def parsenodeid(s): + try: + # We do not use revsingle/revrange functions here to accept + # arbitrary node identifiers, possibly not present in the + # local repository. + n = bin(s) + if len(n) != len(nullid): + raise TypeError() + return n + except TypeError: + raise error.Abort('changeset references must be full hexadecimal ' + 'node identifiers') + + if opts.get('delete'): + indices = [] + for v in opts.get('delete'): + try: + indices.append(int(v)) + except ValueError: + raise error.Abort(_('invalid index value: %r') % v, + hint=_('use integers for indices')) + + if repo.currenttransaction(): + raise error.Abort(_('cannot delete obsmarkers in the middle ' + 'of transaction.')) + + with repo.lock(): + n = repair.deleteobsmarkers(repo.obsstore, indices) + ui.write(_('deleted %i obsolescence markers\n') % n) + + return + + if precursor is not None: + if opts['rev']: + raise error.Abort('cannot select revision when creating marker') + metadata = {} + metadata['user'] = opts['user'] or ui.username() + succs = tuple(parsenodeid(succ) for succ in successors) + l = repo.lock() + try: + tr = repo.transaction('debugobsolete') + try: + date = opts.get('date') + if date: + date = util.parsedate(date) + else: + date = None + prec = parsenodeid(precursor) + parents = None + if opts['record_parents']: + if prec not in repo.unfiltered(): + raise error.Abort('cannot used --record-parents on ' + 'unknown changesets') + parents = repo.unfiltered()[prec].parents() + parents = tuple(p.node() for p in parents) + repo.obsstore.create(tr, prec, succs, opts['flags'], + parents=parents, date=date, + metadata=metadata) + tr.close() + except ValueError as exc: + raise error.Abort(_('bad obsmarker input: %s') % exc) + finally: + tr.release() + finally: + l.release() + else: + if opts['rev']: + revs = scmutil.revrange(repo, opts['rev']) + nodes = [repo[r].node() for r in revs] + markers = list(obsolete.getmarkers(repo, nodes=nodes)) + markers.sort(key=lambda x: x._data) + else: + markers = obsolete.getmarkers(repo) + + markerstoiter = markers + isrelevant = lambda m: True + if opts.get('rev') and opts.get('index'): + markerstoiter = obsolete.getmarkers(repo) + markerset = set(markers) + isrelevant = lambda m: m in markerset + + fm = ui.formatter('debugobsolete', opts) + for i, m in enumerate(markerstoiter): + if not isrelevant(m): + # marker can be irrelevant when we're iterating over a set + # of markers (markerstoiter) which is bigger than the set + # of markers we want to display (markers) + # this can happen if both --index and --rev options are + # provided and thus we need to iterate over all of the markers + # to get the correct indices, but only display the ones that + # are relevant to --rev value + continue + fm.startitem() + ind = i if opts.get('index') else None + cmdutil.showmarker(fm, m, index=ind) + fm.end() + +@command('debugpathcomplete', + [('f', 'full', None, _('complete an entire path')), + ('n', 'normal', None, _('show only normal files')), + ('a', 'added', None, _('show only added files')), + ('r', 'removed', None, _('show only removed files'))], + _('FILESPEC...')) +def debugpathcomplete(ui, repo, *specs, **opts): + '''complete part or all of a tracked path + + This command supports shells that offer path name completion. It + currently completes only files already known to the dirstate. + + Completion extends only to the next path segment unless + --full is specified, in which case entire paths are used.''' + + def complete(path, acceptable): + dirstate = repo.dirstate + spec = os.path.normpath(os.path.join(pycompat.getcwd(), path)) + rootdir = repo.root + pycompat.ossep + if spec != repo.root and not spec.startswith(rootdir): + return [], [] + if os.path.isdir(spec): + spec += '/' + spec = spec[len(rootdir):] + fixpaths = pycompat.ossep != '/' + if fixpaths: + spec = spec.replace(pycompat.ossep, '/') + speclen = len(spec) + fullpaths = opts['full'] + files, dirs = set(), set() + adddir, addfile = dirs.add, files.add + for f, st in dirstate.iteritems(): + if f.startswith(spec) and st[0] in acceptable: + if fixpaths: + f = f.replace('/', pycompat.ossep) + if fullpaths: + addfile(f) + continue + s = f.find(pycompat.ossep, speclen) + if s >= 0: + adddir(f[:s]) + else: + addfile(f) + return files, dirs + + acceptable = '' + if opts['normal']: + acceptable += 'nm' + if opts['added']: + acceptable += 'a' + if opts['removed']: + acceptable += 'r' + cwd = repo.getcwd() + if not specs: + specs = ['.'] + + files, dirs = set(), set() + for spec in specs: + f, d = complete(spec, acceptable or 'nmar') + files.update(f) + dirs.update(d) + files.update(dirs) + ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files))) + ui.write('\n') + +@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True) +def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): + '''access the pushkey key/value protocol + + With two args, list the keys in the given namespace. + + With five args, set a key to new if it currently is set to old. + Reports success or failure. + ''' + + target = hg.peer(ui, {}, repopath) + if keyinfo: + key, old, new = keyinfo + r = target.pushkey(namespace, key, old, new) + ui.status(str(r) + '\n') + return not r + else: + for k, v in sorted(target.listkeys(namespace).iteritems()): + ui.write("%s\t%s\n" % (k.encode('string-escape'), + v.encode('string-escape'))) + +@command('debugpvec', [], _('A B')) +def debugpvec(ui, repo, a, b=None): + ca = scmutil.revsingle(repo, a) + cb = scmutil.revsingle(repo, b) + pa = pvec.ctxpvec(ca) + pb = pvec.ctxpvec(cb) + if pa == pb: + rel = "=" + elif pa > pb: + rel = ">" + elif pa < pb: + rel = "<" + elif pa | pb: + rel = "|" + ui.write(_("a: %s\n") % pa) + ui.write(_("b: %s\n") % pb) + ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth)) + ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") % + (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec), + pa.distance(pb), rel)) + +@command('debugrebuilddirstate|debugrebuildstate', + [('r', 'rev', '', _('revision to rebuild to'), _('REV')), + ('', 'minimal', None, _('only rebuild files that are inconsistent with ' + 'the working copy parent')), + ], + _('[-r REV]')) +def debugrebuilddirstate(ui, repo, rev, **opts): + """rebuild the dirstate as it would look like for the given revision + + If no revision is specified the first current parent will be used. + + The dirstate will be set to the files of the given revision. + The actual working directory content or existing dirstate + information such as adds or removes is not considered. + + ``minimal`` will only rebuild the dirstate status for files that claim to be + tracked but are not in the parent manifest, or that exist in the parent + manifest but are not in the dirstate. It will not change adds, removes, or + modified files that are in the working copy parent. + + One use of this command is to make the next :hg:`status` invocation + check the actual file content. + """ + ctx = scmutil.revsingle(repo, rev) + with repo.wlock(): + dirstate = repo.dirstate + changedfiles = None + # See command doc for what minimal does. + if opts.get('minimal'): + manifestfiles = set(ctx.manifest().keys()) + dirstatefiles = set(dirstate) + manifestonly = manifestfiles - dirstatefiles + dsonly = dirstatefiles - manifestfiles + dsnotadded = set(f for f in dsonly if dirstate[f] != 'a') + changedfiles = manifestonly | dsnotadded + + dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) + +@command('debugrebuildfncache', [], '') +def debugrebuildfncache(ui, repo): + """rebuild the fncache file""" + repair.rebuildfncache(ui, repo) + +@command('debugrename', + [('r', 'rev', '', _('revision to debug'), _('REV'))], + _('[-r REV] FILE')) +def debugrename(ui, repo, file1, *pats, **opts): + """dump rename information""" + + ctx = scmutil.revsingle(repo, opts.get('rev')) + m = scmutil.match(ctx, (file1,) + pats, opts) + for abs in ctx.walk(m): + fctx = ctx[abs] + o = fctx.filelog().renamed(fctx.filenode()) + rel = m.rel(abs) + if o: + ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) + else: + ui.write(_("%s not renamed\n") % rel) + +@command('debugrevlog', commands.debugrevlogopts + + [('d', 'dump', False, _('dump index data'))], + _('-c|-m|FILE'), + optionalrepo=True) +def debugrevlog(ui, repo, file_=None, **opts): + """show data and statistics about a revlog""" + r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts) + + if opts.get("dump"): + numrevs = len(r) + ui.write(("# rev p1rev p2rev start end deltastart base p1 p2" + " rawsize totalsize compression heads chainlen\n")) + ts = 0 + heads = set() + + for rev in xrange(numrevs): + dbase = r.deltaparent(rev) + if dbase == -1: + dbase = rev + cbase = r.chainbase(rev) + clen = r.chainlen(rev) + p1, p2 = r.parentrevs(rev) + rs = r.rawsize(rev) + ts = ts + rs + heads -= set(r.parentrevs(rev)) + heads.add(rev) + try: + compression = ts / r.end(rev) + except ZeroDivisionError: + compression = 0 + ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " + "%11d %5d %8d\n" % + (rev, p1, p2, r.start(rev), r.end(rev), + r.start(dbase), r.start(cbase), + r.start(p1), r.start(p2), + rs, ts, compression, len(heads), clen)) + return 0 + + v = r.version + format = v & 0xFFFF + flags = [] + gdelta = False + if v & revlog.REVLOGNGINLINEDATA: + flags.append('inline') + if v & revlog.REVLOGGENERALDELTA: + gdelta = True + flags.append('generaldelta') + if not flags: + flags = ['(none)'] + + nummerges = 0 + numfull = 0 + numprev = 0 + nump1 = 0 + nump2 = 0 + numother = 0 + nump1prev = 0 + nump2prev = 0 + chainlengths = [] + + datasize = [None, 0, 0] + fullsize = [None, 0, 0] + deltasize = [None, 0, 0] + chunktypecounts = {} + chunktypesizes = {} + + def addsize(size, l): + if l[0] is None or size < l[0]: + l[0] = size + if size > l[1]: + l[1] = size + l[2] += size + + numrevs = len(r) + for rev in xrange(numrevs): + p1, p2 = r.parentrevs(rev) + delta = r.deltaparent(rev) + if format > 0: + addsize(r.rawsize(rev), datasize) + if p2 != nullrev: + nummerges += 1 + size = r.length(rev) + if delta == nullrev: + chainlengths.append(0) + numfull += 1 + addsize(size, fullsize) + else: + chainlengths.append(chainlengths[delta] + 1) + addsize(size, deltasize) + if delta == rev - 1: + numprev += 1 + if delta == p1: + nump1prev += 1 + elif delta == p2: + nump2prev += 1 + elif delta == p1: + nump1 += 1 + elif delta == p2: + nump2 += 1 + elif delta != nullrev: + numother += 1 + + # Obtain data on the raw chunks in the revlog. + chunk = r._chunkraw(rev, rev)[1] + if chunk: + chunktype = chunk[0] + else: + chunktype = 'empty' + + if chunktype not in chunktypecounts: + chunktypecounts[chunktype] = 0 + chunktypesizes[chunktype] = 0 + + chunktypecounts[chunktype] += 1 + chunktypesizes[chunktype] += size + + # Adjust size min value for empty cases + for size in (datasize, fullsize, deltasize): + if size[0] is None: + size[0] = 0 + + numdeltas = numrevs - numfull + numoprev = numprev - nump1prev - nump2prev + totalrawsize = datasize[2] + datasize[2] /= numrevs + fulltotal = fullsize[2] + fullsize[2] /= numfull + deltatotal = deltasize[2] + if numrevs - numfull > 0: + deltasize[2] /= numrevs - numfull + totalsize = fulltotal + deltatotal + avgchainlen = sum(chainlengths) / numrevs + maxchainlen = max(chainlengths) + compratio = 1 + if totalsize: + compratio = totalrawsize / totalsize + + basedfmtstr = '%%%dd\n' + basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n' + + def dfmtstr(max): + return basedfmtstr % len(str(max)) + def pcfmtstr(max, padding=0): + return basepcfmtstr % (len(str(max)), ' ' * padding) + + def pcfmt(value, total): + if total: + return (value, 100 * float(value) / total) + else: + return value, 100.0 + + ui.write(('format : %d\n') % format) + ui.write(('flags : %s\n') % ', '.join(flags)) + + ui.write('\n') + fmt = pcfmtstr(totalsize) + fmt2 = dfmtstr(totalsize) + ui.write(('revisions : ') + fmt2 % numrevs) + ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs)) + ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs)) + ui.write(('revisions : ') + fmt2 % numrevs) + ui.write((' full : ') + fmt % pcfmt(numfull, numrevs)) + ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs)) + ui.write(('revision size : ') + fmt2 % totalsize) + ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize)) + ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize)) + + def fmtchunktype(chunktype): + if chunktype == 'empty': + return ' %s : ' % chunktype + elif chunktype in string.ascii_letters: + return ' 0x%s (%s) : ' % (hex(chunktype), chunktype) + else: + return ' 0x%s : ' % hex(chunktype) + + ui.write('\n') + ui.write(('chunks : ') + fmt2 % numrevs) + for chunktype in sorted(chunktypecounts): + ui.write(fmtchunktype(chunktype)) + ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) + ui.write(('chunks size : ') + fmt2 % totalsize) + for chunktype in sorted(chunktypecounts): + ui.write(fmtchunktype(chunktype)) + ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) + + ui.write('\n') + fmt = dfmtstr(max(avgchainlen, compratio)) + ui.write(('avg chain length : ') + fmt % avgchainlen) + ui.write(('max chain length : ') + fmt % maxchainlen) + ui.write(('compression ratio : ') + fmt % compratio) + + if format > 0: + ui.write('\n') + ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n') + % tuple(datasize)) + ui.write(('full revision size (min/max/avg) : %d / %d / %d\n') + % tuple(fullsize)) + ui.write(('delta size (min/max/avg) : %d / %d / %d\n') + % tuple(deltasize)) + + if numdeltas > 0: + ui.write('\n') + fmt = pcfmtstr(numdeltas) + fmt2 = pcfmtstr(numdeltas, 4) + ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas)) + if numprev > 0: + ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev, + numprev)) + ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev, + numprev)) + ui.write((' other : ') + fmt2 % pcfmt(numoprev, + numprev)) + if gdelta: + ui.write(('deltas against p1 : ') + + fmt % pcfmt(nump1, numdeltas)) + ui.write(('deltas against p2 : ') + + fmt % pcfmt(nump2, numdeltas)) + ui.write(('deltas against other : ') + fmt % pcfmt(numother, + numdeltas)) + +@command('debugrevspec', + [('', 'optimize', None, + _('print parsed tree after optimizing (DEPRECATED)')), + ('p', 'show-stage', [], + _('print parsed tree at the given stage'), _('NAME')), + ('', 'no-optimized', False, _('evaluate tree without optimization')), + ('', 'verify-optimized', False, _('verify optimized result')), + ], + ('REVSPEC')) +def debugrevspec(ui, repo, expr, **opts): + """parse and apply a revision specification + + Use -p/--show-stage option to print the parsed tree at the given stages. + Use -p all to print tree at every stage. + + Use --verify-optimized to compare the optimized result with the unoptimized + one. Returns 1 if the optimized result differs. + """ + stages = [ + ('parsed', lambda tree: tree), + ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)), + ('concatenated', revsetlang.foldconcat), + ('analyzed', revsetlang.analyze), + ('optimized', revsetlang.optimize), + ] + if opts['no_optimized']: + stages = stages[:-1] + if opts['verify_optimized'] and opts['no_optimized']: + raise error.Abort(_('cannot use --verify-optimized with ' + '--no-optimized')) + stagenames = set(n for n, f in stages) + + showalways = set() + showchanged = set() + if ui.verbose and not opts['show_stage']: + # show parsed tree by --verbose (deprecated) + showalways.add('parsed') + showchanged.update(['expanded', 'concatenated']) + if opts['optimize']: + showalways.add('optimized') + if opts['show_stage'] and opts['optimize']: + raise error.Abort(_('cannot use --optimize with --show-stage')) + if opts['show_stage'] == ['all']: + showalways.update(stagenames) + else: + for n in opts['show_stage']: + if n not in stagenames: + raise error.Abort(_('invalid stage name: %s') % n) + showalways.update(opts['show_stage']) + + treebystage = {} + printedtree = None + tree = revsetlang.parse(expr, lookup=repo.__contains__) + for n, f in stages: + treebystage[n] = tree = f(tree) + if n in showalways or (n in showchanged and tree != printedtree): + if opts['show_stage'] or n != 'parsed': + ui.write(("* %s:\n") % n) + ui.write(revsetlang.prettyformat(tree), "\n") + printedtree = tree + + if opts['verify_optimized']: + arevs = revset.makematcher(treebystage['analyzed'])(repo) + brevs = revset.makematcher(treebystage['optimized'])(repo) + if ui.verbose: + ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n") + ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n") + arevs = list(arevs) + brevs = list(brevs) + if arevs == brevs: + return 0 + ui.write(('--- analyzed\n'), label='diff.file_a') + ui.write(('+++ optimized\n'), label='diff.file_b') + sm = difflib.SequenceMatcher(None, arevs, brevs) + for tag, alo, ahi, blo, bhi in sm.get_opcodes(): + if tag in ('delete', 'replace'): + for c in arevs[alo:ahi]: + ui.write('-%s\n' % c, label='diff.deleted') + if tag in ('insert', 'replace'): + for c in brevs[blo:bhi]: + ui.write('+%s\n' % c, label='diff.inserted') + if tag == 'equal': + for c in arevs[alo:ahi]: + ui.write(' %s\n' % c) + return 1 + + func = revset.makematcher(tree) + revs = func(repo) + if ui.verbose: + ui.note(("* set:\n"), smartset.prettyformat(revs), "\n") + for c in revs: + ui.write("%s\n" % c) + +@command('debugsetparents', [], _('REV1 [REV2]')) +def debugsetparents(ui, repo, rev1, rev2=None): + """manually set the parents of the current working directory + + This is useful for writing repository conversion tools, but should + be used with care. For example, neither the working directory nor the + dirstate is updated, so file status may be incorrect after running this + command. + + Returns 0 on success. + """ + + r1 = scmutil.revsingle(repo, rev1).node() + r2 = scmutil.revsingle(repo, rev2, 'null').node() + + with repo.wlock(): + repo.setparents(r1, r2) + +@command('debugsub', + [('r', 'rev', '', + _('revision to check'), _('REV'))], + _('[-r REV] [REV]')) +def debugsub(ui, repo, rev=None): + ctx = scmutil.revsingle(repo, rev, None) + for k, v in sorted(ctx.substate.items()): + ui.write(('path %s\n') % k) + ui.write((' source %s\n') % v[0]) + ui.write((' revision %s\n') % v[1]) + +@command('debugsuccessorssets', + [], + _('[REV]')) +def debugsuccessorssets(ui, repo, *revs): + """show set of successors for revision + + A successors set of changeset A is a consistent group of revisions that + succeed A. It contains non-obsolete changesets only. + + In most cases a changeset A has a single successors set containing a single + successor (changeset A replaced by A'). + + A changeset that is made obsolete with no successors are called "pruned". + Such changesets have no successors sets at all. + + A changeset that has been "split" will have a successors set containing + more than one successor. + + A changeset that has been rewritten in multiple different ways is called + "divergent". Such changesets have multiple successor sets (each of which + may also be split, i.e. have multiple successors). + + Results are displayed as follows:: + + <rev1> + <successors-1A> + <rev2> + <successors-2A> + <successors-2B1> <successors-2B2> <successors-2B3> + + Here rev2 has two possible (i.e. divergent) successors sets. The first + holds one element, whereas the second holds three (i.e. the changeset has + been split). + """ + # passed to successorssets caching computation from one call to another + cache = {} + ctx2str = str + node2str = short + if ui.debug(): + def ctx2str(ctx): + return ctx.hex() + node2str = hex + for rev in scmutil.revrange(repo, revs): + ctx = repo[rev] + ui.write('%s\n'% ctx2str(ctx)) + for succsset in obsolete.successorssets(repo, ctx.node(), cache): + if succsset: + ui.write(' ') + ui.write(node2str(succsset[0])) + for node in succsset[1:]: + ui.write(' ') + ui.write(node2str(node)) + ui.write('\n') + +@command('debugtemplate', + [('r', 'rev', [], _('apply template on changesets'), _('REV')), + ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))], + _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'), + optionalrepo=True) +def debugtemplate(ui, repo, tmpl, **opts): + """parse and apply a template + + If -r/--rev is given, the template is processed as a log template and + applied to the given changesets. Otherwise, it is processed as a generic + template. + + Use --verbose to print the parsed tree. + """ + revs = None + if opts['rev']: + if repo is None: + raise error.RepoError(_('there is no Mercurial repository here ' + '(.hg not found)')) + revs = scmutil.revrange(repo, opts['rev']) + + props = {} + for d in opts['define']: + try: + k, v = (e.strip() for e in d.split('=', 1)) + if not k: + raise ValueError + props[k] = v + except ValueError: + raise error.Abort(_('malformed keyword definition: %s') % d) + + if ui.verbose: + aliases = ui.configitems('templatealias') + tree = templater.parse(tmpl) + ui.note(templater.prettyformat(tree), '\n') + newtree = templater.expandaliases(tree, aliases) + if newtree != tree: + ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n') + + mapfile = None + if revs is None: + k = 'debugtemplate' + t = formatter.maketemplater(ui, k, tmpl) + ui.write(templater.stringify(t(k, **props))) + else: + displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl, + mapfile, buffered=False) + for r in revs: + displayer.show(repo[r], **props) + displayer.close() + @command('debugupgraderepo', [ ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')), ('', 'run', False, _('performs an upgrade')), @@ -875,3 +2069,43 @@ unable to access the repository should be low. """ return repair.upgraderepo(ui, repo, run=run, optimize=optimize) + +@command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'), + inferrepo=True) +def debugwalk(ui, repo, *pats, **opts): + """show how files match on given patterns""" + m = scmutil.match(repo[None], pats, opts) + items = list(repo.walk(m)) + if not items: + return + f = lambda fn: fn + if ui.configbool('ui', 'slash') and pycompat.ossep != '/': + f = lambda fn: util.normpath(fn) + fmt = 'f %%-%ds %%-%ds %%s' % ( + max([len(abs) for abs in items]), + max([len(m.rel(abs)) for abs in items])) + for abs in items: + line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '') + ui.write("%s\n" % line.rstrip()) + +@command('debugwireargs', + [('', 'three', '', 'three'), + ('', 'four', '', 'four'), + ('', 'five', '', 'five'), + ] + commands.remoteopts, + _('REPO [OPTIONS]... [ONE [TWO]]'), + norepo=True) +def debugwireargs(ui, repopath, *vals, **opts): + repo = hg.peer(ui, opts, repopath) + for opt in commands.remoteopts: + del opts[opt[1]] + args = {} + for k, v in opts.iteritems(): + if v: + args[k] = v + # run twice to check that we don't mess up the stream for the next command + res1 = repo.debugwireargs(*vals, **args) + res2 = repo.debugwireargs(*vals, **args) + ui.write("%s\n" % res1) + if res1 != res2: + ui.warn("%s\n" % res2)
--- a/mercurial/destutil.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/destutil.py Sat Mar 11 13:53:14 2017 -0500 @@ -12,37 +12,10 @@ bookmarks, error, obsolete, + scmutil, ) -def _destupdatevalidate(repo, rev, clean, check): - """validate that the destination comply to various rules - - This exists as its own function to help wrapping from extensions.""" - wc = repo[None] - p1 = wc.p1() - if not clean: - # Check that the update is linear. - # - # Mercurial do not allow update-merge for non linear pattern - # (that would be technically possible but was considered too confusing - # for user a long time ago) - # - # See mercurial.merge.update for details - if p1.rev() not in repo.changelog.ancestors([rev], inclusive=True): - dirty = wc.dirty(missing=True) - foreground = obsolete.foreground(repo, [p1.node()]) - if not repo[rev].node() in foreground: - if dirty: - msg = _("uncommitted changes") - hint = _("commit and merge, or update --clean to" - " discard changes") - raise error.UpdateAbort(msg, hint=hint) - elif not check: # destination is not a descendant. - msg = _("not a linear update") - hint = _("merge or update --check to force update") - raise error.UpdateAbort(msg, hint=hint) - -def _destupdateobs(repo, clean, check): +def _destupdateobs(repo, clean): """decide of an update destination from obsolescence markers""" node = None wc = repo[None] @@ -78,7 +51,7 @@ movemark = repo['.'].node() return node, movemark, None -def _destupdatebook(repo, clean, check): +def _destupdatebook(repo, clean): """decide on an update destination from active bookmark""" # we also move the active bookmark, if any activemark = None @@ -87,7 +60,7 @@ activemark = node return node, movemark, activemark -def _destupdatebranch(repo, clean, check): +def _destupdatebranch(repo, clean): """decide on an update destination from current branch This ignores closed branch heads. @@ -113,7 +86,7 @@ node = repo['.'].node() return node, movemark, None -def _destupdatebranchfallback(repo, clean, check): +def _destupdatebranchfallback(repo, clean): """decide on an update destination from closed heads in current branch""" wc = repo[None] currentbranch = wc.branch() @@ -143,7 +116,7 @@ 'branchfallback': _destupdatebranchfallback, } -def destupdate(repo, clean=False, check=False): +def destupdate(repo, clean=False): """destination for bare update operation return (rev, movemark, activemark) @@ -156,13 +129,11 @@ node = movemark = activemark = None for step in destupdatesteps: - node, movemark, activemark = destupdatestepmap[step](repo, clean, check) + node, movemark, activemark = destupdatestepmap[step](repo, clean) if node is not None: break rev = repo[node].rev() - _destupdatevalidate(repo, rev, clean, check) - return rev, movemark, activemark msgdestmerge = { @@ -372,9 +343,6 @@ def desthistedit(ui, repo): """Default base revision to edit for `hg histedit`.""" - # Avoid cycle: scmutil -> revset -> destutil - from . import scmutil - default = ui.config('histedit', 'defaultrev', histeditdefaultrevset) if default: revs = scmutil.revrange(repo, [default])
--- a/mercurial/dirstate.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/dirstate.py Sat Mar 11 13:53:14 2017 -0500 @@ -23,6 +23,7 @@ pathutil, pycompat, scmutil, + txnutil, util, ) @@ -54,26 +55,16 @@ def nonnormalentries(dmap): '''Compute the nonnormal dirstate entries from the dmap''' try: - return parsers.nonnormalentries(dmap) + return parsers.nonnormalotherparententries(dmap) except AttributeError: - return set(fname for fname, e in dmap.iteritems() - if e[0] != 'n' or e[3] == -1) - -def _trypending(root, vfs, filename): - '''Open file to be read according to HG_PENDING environment variable - - This opens '.pending' of specified 'filename' only when HG_PENDING - is equal to 'root'. - - This returns '(fp, is_pending_opened)' tuple. - ''' - if root == encoding.environ.get('HG_PENDING'): - try: - return (vfs('%s.pending' % filename), True) - except IOError as inst: - if inst.errno != errno.ENOENT: - raise - return (vfs(filename), False) + nonnorm = set() + otherparent = set() + for fname, e in dmap.iteritems(): + if e[0] != 'n' or e[3] == -1: + nonnorm.add(fname) + if e[0] == 'n' and e[2] == -2: + otherparent.add(fname) + return nonnorm, otherparent class dirstate(object): @@ -104,6 +95,7 @@ self._pendingfilename = '%s.pending' % self._filename self._plchangecallbacks = {} self._origpl = None + self._updatedfiles = set() # for consistent view between _pl() and _read() invocations self._pendingmode = None @@ -145,7 +137,15 @@ @propertycache def _nonnormalset(self): - return nonnormalentries(self._map) + nonnorm, otherparents = nonnormalentries(self._map) + self._otherparentset = otherparents + return nonnorm + + @propertycache + def _otherparentset(self): + nonnorm, otherparents = nonnormalentries(self._map) + self._nonnormalset = nonnorm + return otherparents @propertycache def _filefoldmap(self): @@ -355,7 +355,12 @@ self._pl = p1, p2 copies = {} if oldp2 != nullid and p2 == nullid: - for f, s in self._map.iteritems(): + candidatefiles = self._nonnormalset.union(self._otherparentset) + for f in candidatefiles: + s = self._map.get(f) + if s is None: + continue + # Discard 'm' markers when moving away from a merge state if s[0] == 'm': if f in self._copymap: @@ -385,7 +390,7 @@ raise def _opendirstatefile(self): - fp, mode = _trypending(self._root, self._opener, self._filename) + fp, mode = txnutil.trypending(self._root, self._opener, self._filename) if self._pendingmode is not None and self._pendingmode != mode: fp.close() raise error.Abort(_('working directory state may be ' @@ -441,11 +446,13 @@ def invalidate(self): for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch", - "_pl", "_dirs", "_ignore", "_nonnormalset"): + "_pl", "_dirs", "_ignore", "_nonnormalset", + "_otherparentset"): if a in self.__dict__: delattr(self, a) self._lastnormaltime = 0 self._dirty = False + self._updatedfiles.clear() self._parentwriters = 0 self._origpl = None @@ -456,8 +463,11 @@ self._dirty = True if source is not None: self._copymap[dest] = source + self._updatedfiles.add(source) + self._updatedfiles.add(dest) elif dest in self._copymap: del self._copymap[dest] + self._updatedfiles.add(dest) def copied(self, file): return self._copymap.get(file, None) @@ -474,6 +484,8 @@ if normed in self._filefoldmap: del self._filefoldmap[normed] + self._updatedfiles.add(f) + def _addpath(self, f, state, mode, size, mtime): oldstate = self[f] if state == 'a' or oldstate == 'r': @@ -490,9 +502,12 @@ if oldstate in "?r" and "_dirs" in self.__dict__: self._dirs.addpath(f) self._dirty = True + self._updatedfiles.add(f) self._map[f] = dirstatetuple(state, mode, size, mtime) if state != 'n' or mtime == -1: self._nonnormalset.add(f) + if size == -2: + self._otherparentset.add(f) def normal(self, f): '''Mark a file normal and clean.''' @@ -567,6 +582,7 @@ size = -1 elif entry[0] == 'n' and entry[2] == -2: # other parent size = -2 + self._otherparentset.add(f) self._map[f] = dirstatetuple('r', 0, size, 0) self._nonnormalset.add(f) if size == 0 and f in self._copymap: @@ -666,11 +682,13 @@ def clear(self): self._map = {} self._nonnormalset = set() + self._otherparentset = set() if "_dirs" in self.__dict__: delattr(self, "_dirs") self._copymap = {} self._pl = [nullid, nullid] self._lastnormaltime = 0 + self._updatedfiles.clear() self._dirty = True def rebuild(self, parent, allfiles, changedfiles=None): @@ -707,13 +725,15 @@ # emulate dropping timestamp in 'parsers.pack_dirstate' now = _getfsnow(self._opener) dmap = self._map - for f, e in dmap.iteritems(): - if e[0] == 'n' and e[3] == now: + for f in self._updatedfiles: + e = dmap.get(f) + if e is not None and e[0] == 'n' and e[3] == now: dmap[f] = dirstatetuple(e[0], e[1], e[2], -1) self._nonnormalset.add(f) # emulate that all 'dirstate.normal' results are written out self._lastnormaltime = 0 + self._updatedfiles.clear() # delay writing in-memory changes out tr.addfilegenerator('dirstate', (self._filename,), @@ -762,7 +782,7 @@ break st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now)) - self._nonnormalset = nonnormalentries(self._map) + self._nonnormalset, self._otherparentset = nonnormalentries(self._map) st.close() self._lastnormaltime = 0 self._dirty = self._dirtypl = False @@ -1224,8 +1244,9 @@ # use '_writedirstate' instead of 'write' to write changes certainly, # because the latter omits writing out if transaction is running. # output file will be used to create backup of dirstate at this point. - self._writedirstate(self._opener(filename, "w", atomictemp=True, - checkambig=True)) + if self._dirty or not self._opener.exists(filename): + self._writedirstate(self._opener(filename, "w", atomictemp=True, + checkambig=True)) if tr: # ensure that subsequent tr.writepending returns True for @@ -1239,8 +1260,14 @@ # end of this transaction tr.registertmp(filename, location='plain') - self._opener.write(prefix + self._filename + suffix, - self._opener.tryread(filename)) + backupname = prefix + self._filename + suffix + assert backupname != filename + if self._opener.exists(backupname): + self._opener.unlink(backupname) + # hardlink backup is okay because _writedirstate is always called + # with an "atomictemp=True" file. + util.copyfile(self._opener.join(filename), + self._opener.join(backupname), hardlink=True) def restorebackup(self, tr, suffix='', prefix=''): '''Restore dirstate by backup file with suffix'''
--- a/mercurial/dispatch.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/dispatch.py Sat Mar 11 13:53:14 2017 -0500 @@ -33,6 +33,7 @@ extensions, fancyopts, fileset, + help, hg, hook, profiling, @@ -123,7 +124,7 @@ return -1 msg = ' '.join(' ' in a and repr(a) or a for a in req.args) - starttime = time.time() + starttime = util.timer() ret = None try: ret = _runcatch(req) @@ -135,8 +136,11 @@ raise ret = -1 finally: - duration = time.time() - starttime + duration = util.timer() - starttime req.ui.flush() + if req.ui.logblockedtimes: + req.ui._blockedtimes['command_duration'] = duration * 1000 + req.ui.log('uiblocked', 'ui blocked ms', **req.ui._blockedtimes) req.ui.log("commandfinish", "%s exited %s after %0.2f seconds\n", msg, ret or 0, duration) return ret @@ -230,28 +234,35 @@ (inst.args[0], " ".join(inst.args[1]))) except error.CommandError as inst: if inst.args[0]: + ui.pager('help') ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1])) commands.help_(ui, inst.args[0], full=False, command=True) else: + ui.pager('help') ui.warn(_("hg: %s\n") % inst.args[1]) commands.help_(ui, 'shortlist') except error.ParseError as inst: _formatparse(ui.warn, inst) return -1 except error.UnknownCommand as inst: - ui.warn(_("hg: unknown command '%s'\n") % inst.args[0]) + nocmdmsg = _("hg: unknown command '%s'\n") % inst.args[0] try: # check if the command is in a disabled extension # (but don't check for extensions themselves) - commands.help_(ui, inst.args[0], unknowncmd=True) + formatted = help.formattedhelp(ui, inst.args[0], unknowncmd=True) + ui.warn(nocmdmsg) + ui.write(formatted) except (error.UnknownCommand, error.Abort): suggested = False if len(inst.args) == 2: sim = _getsimilar(inst.args[1], inst.args[0]) if sim: + ui.warn(nocmdmsg) _reportsimilar(ui.warn, sim) suggested = True if not suggested: + ui.pager('help') + ui.warn(nocmdmsg) commands.help_(ui, 'shortlist') except IOError: raise @@ -345,7 +356,8 @@ return '' cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:]) cmd = aliasinterpolate(self.name, args, cmd) - return ui.system(cmd, environ=env) + return ui.system(cmd, environ=env, + blockedtag='alias_%s' % self.name) self.fn = fn return @@ -655,107 +667,120 @@ rpath = _earlygetopt(["-R", "--repository", "--repo"], args) path, lui = _getlocal(ui, rpath) - # Configure extensions in phases: uisetup, extsetup, cmdtable, and - # reposetup. Programs like TortoiseHg will call _dispatch several - # times so we keep track of configured extensions in _loaded. - extensions.loadall(lui) - exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded] - # Propagate any changes to lui.__class__ by extensions - ui.__class__ = lui.__class__ - - # (uisetup and extsetup are handled in extensions.loadall) - - for name, module in exts: - for objname, loadermod, loadername in extraloaders: - extraobj = getattr(module, objname, None) - if extraobj is not None: - getattr(loadermod, loadername)(ui, name, extraobj) - _loaded.add(name) - - # (reposetup is handled in hg.repository) - # Side-effect of accessing is debugcommands module is guaranteed to be # imported and commands.table is populated. debugcommands.command - addaliases(lui, commands.table) - - # All aliases and commands are completely defined, now. - # Check abbreviation/ambiguity of shell alias. - shellaliasfn = _checkshellalias(lui, ui, args) - if shellaliasfn: - with profiling.maybeprofile(lui): - return shellaliasfn() - - # check for fallback encoding - fallback = lui.config('ui', 'fallbackencoding') - if fallback: - encoding.fallbackencoding = fallback - - fullargs = args - cmd, func, args, options, cmdoptions = _parse(lui, args) - - if options["config"]: - raise error.Abort(_("option --config may not be abbreviated!")) - if options["cwd"]: - raise error.Abort(_("option --cwd may not be abbreviated!")) - if options["repository"]: - raise error.Abort(_( - "option -R has to be separated from other options (e.g. not -qR) " - "and --repository may only be abbreviated as --repo!")) - - if options["encoding"]: - encoding.encoding = options["encoding"] - if options["encodingmode"]: - encoding.encodingmode = options["encodingmode"] - if options["time"]: - def get_times(): - t = os.times() - if t[4] == 0.0: # Windows leaves this as zero, so use time.clock() - t = (t[0], t[1], t[2], t[3], time.clock()) - return t - s = get_times() - def print_time(): - t = get_times() - ui.warn(_("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") % - (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3])) - atexit.register(print_time) - uis = set([ui, lui]) if req.repo: uis.add(req.repo.ui) - if options['verbose'] or options['debug'] or options['quiet']: - for opt in ('verbose', 'debug', 'quiet'): - val = str(bool(options[opt])) - for ui_ in uis: - ui_.setconfig('ui', opt, val, '--' + opt) - - if options['profile']: + if '--profile' in args: for ui_ in uis: ui_.setconfig('profiling', 'enabled', 'true', '--profile') - if options['traceback']: - for ui_ in uis: - ui_.setconfig('ui', 'traceback', 'on', '--traceback') + with profiling.maybeprofile(lui): + # Configure extensions in phases: uisetup, extsetup, cmdtable, and + # reposetup. Programs like TortoiseHg will call _dispatch several + # times so we keep track of configured extensions in _loaded. + extensions.loadall(lui) + exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded] + # Propagate any changes to lui.__class__ by extensions + ui.__class__ = lui.__class__ + + # (uisetup and extsetup are handled in extensions.loadall) + + for name, module in exts: + for objname, loadermod, loadername in extraloaders: + extraobj = getattr(module, objname, None) + if extraobj is not None: + getattr(loadermod, loadername)(ui, name, extraobj) + _loaded.add(name) + + # (reposetup is handled in hg.repository) + + addaliases(lui, commands.table) + + # All aliases and commands are completely defined, now. + # Check abbreviation/ambiguity of shell alias. + shellaliasfn = _checkshellalias(lui, ui, args) + if shellaliasfn: + return shellaliasfn() + + # check for fallback encoding + fallback = lui.config('ui', 'fallbackencoding') + if fallback: + encoding.fallbackencoding = fallback + + fullargs = args + cmd, func, args, options, cmdoptions = _parse(lui, args) + + if options["config"]: + raise error.Abort(_("option --config may not be abbreviated!")) + if options["cwd"]: + raise error.Abort(_("option --cwd may not be abbreviated!")) + if options["repository"]: + raise error.Abort(_( + "option -R has to be separated from other options (e.g. not " + "-qR) and --repository may only be abbreviated as --repo!")) - if options['noninteractive']: - for ui_ in uis: - ui_.setconfig('ui', 'interactive', 'off', '-y') + if options["encoding"]: + encoding.encoding = options["encoding"] + if options["encodingmode"]: + encoding.encodingmode = options["encodingmode"] + if options["time"]: + def get_times(): + t = os.times() + if t[4] == 0.0: + # Windows leaves this as zero, so use time.clock() + t = (t[0], t[1], t[2], t[3], time.clock()) + return t + s = get_times() + def print_time(): + t = get_times() + ui.warn( + _("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") % + (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3])) + atexit.register(print_time) - if cmdoptions.get('insecure', False): + if options['verbose'] or options['debug'] or options['quiet']: + for opt in ('verbose', 'debug', 'quiet'): + val = str(bool(options[opt])) + for ui_ in uis: + ui_.setconfig('ui', opt, val, '--' + opt) + + if options['traceback']: + for ui_ in uis: + ui_.setconfig('ui', 'traceback', 'on', '--traceback') + + if options['noninteractive']: + for ui_ in uis: + ui_.setconfig('ui', 'interactive', 'off', '-y') + + if util.parsebool(options['pager']): + ui.pager('internal-always-' + cmd) + elif options['pager'] != 'auto': + ui.disablepager() + + if cmdoptions.get('insecure', False): + for ui_ in uis: + ui_.insecureconnections = True + + # setup color handling + coloropt = options['color'] for ui_ in uis: - ui_.insecureconnections = True + if coloropt: + ui_.setconfig('ui', 'color', coloropt, '--color') + color.setup(ui_) - if options['version']: - return commands.version_(ui) - if options['help']: - return commands.help_(ui, cmd, command=cmd is not None) - elif not cmd: - return commands.help_(ui, 'shortlist') + if options['version']: + return commands.version_(ui) + if options['help']: + return commands.help_(ui, cmd, command=cmd is not None) + elif not cmd: + return commands.help_(ui, 'shortlist') - with profiling.maybeprofile(lui): repo = None cmdpats = args[:] if not func.norepo: @@ -835,6 +860,8 @@ if ui.config('ui', 'supportcontact', None) is None: for name, mod in extensions.extensions(): testedwith = getattr(mod, 'testedwith', '') + if pycompat.ispy3 and isinstance(testedwith, str): + testedwith = testedwith.encode(u'utf-8') report = getattr(mod, 'buglink', _('the extension author.')) if not testedwith.strip(): # We found an untested extension. It's likely the culprit. @@ -855,7 +882,7 @@ worst = name, nearest, report if worst[0] is not None: name, testedwith, report = worst - if not isinstance(testedwith, str): + if not isinstance(testedwith, (bytes, str)): testedwith = '.'.join([str(c) for c in testedwith]) warning = (_('** Unknown exception encountered with ' 'possibly-broken third-party extension %s\n' @@ -869,7 +896,12 @@ bugtracker = _("https://mercurial-scm.org/wiki/BugTracker") warning = (_("** unknown exception encountered, " "please report by visiting\n** ") + bugtracker + '\n') - warning += ((_("** Python %s\n") % sys.version.replace('\n', '')) + + if pycompat.ispy3: + sysversion = sys.version.encode(u'utf-8') + else: + sysversion = sys.version + sysversion = sysversion.replace('\n', '') + warning += ((_("** Python %s\n") % sysversion) + (_("** Mercurial Distributed SCM (version %s)\n") % util.version()) + (_("** Extensions loaded: %s\n") %
--- a/mercurial/exchange.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/exchange.py Sat Mar 11 13:53:14 2017 -0500 @@ -1724,9 +1724,15 @@ if url.startswith('remote:http:') or url.startswith('remote:https:'): captureoutput = True try: + # note: outside bundle1, 'heads' is expected to be empty and this + # 'check_heads' call wil be a no-op check_heads(repo, heads, 'uploading changes') # push can proceed - if util.safehasattr(cg, 'params'): + if not util.safehasattr(cg, 'params'): + # legacy case: bundle1 (changegroup 01) + lockandtr[1] = repo.lock() + r = cg.apply(repo, source, url) + else: r = None try: def gettransaction(): @@ -1765,9 +1771,6 @@ mandatory=False) parts.append(part) raise - else: - lockandtr[1] = repo.lock() - r = cg.apply(repo, source, url) finally: lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0]) if recordout is not None:
--- a/mercurial/extensions.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/extensions.py Sat Mar 11 13:53:14 2017 -0500 @@ -8,6 +8,7 @@ from __future__ import absolute_import import imp +import inspect import os from .i18n import ( @@ -150,7 +151,7 @@ try: extsetup(ui) except TypeError: - if extsetup.func_code.co_argcount != 0: + if inspect.getargspec(extsetup).args: raise extsetup() # old extsetup with no ui argument @@ -159,7 +160,7 @@ newindex = len(_order) for (name, path) in result: if path: - if path[0] == '!': + if path[0:1] == '!': _disabledextensions[name] = path[1:] continue try: @@ -362,7 +363,8 @@ '''find paths of disabled extensions. returns a dict of {name: path} removes /__init__.py from packages if strip_init is True''' import hgext - extpath = os.path.dirname(os.path.abspath(hgext.__file__)) + extpath = os.path.dirname( + os.path.abspath(pycompat.fsencode(hgext.__file__))) try: # might not be a filesystem path files = os.listdir(extpath) except OSError:
--- a/mercurial/filemerge.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/filemerge.py Sat Mar 11 13:53:14 2017 -0500 @@ -489,8 +489,11 @@ args = util.interpolate(r'\$', replace, args, lambda s: util.shellquote(util.localpath(s))) cmd = toolpath + ' ' + args + if _toolbool(ui, tool, "gui"): + repo.ui.status(_('running merge tool %s for file %s\n') % + (tool, fcd.path())) repo.ui.debug('launching merge tool: %s\n' % cmd) - r = ui.system(cmd, cwd=repo.root, environ=env) + r = ui.system(cmd, cwd=repo.root, environ=env, blockedtag='mergetool') repo.ui.debug('merge tool returned: %s\n' % r) return True, r, False @@ -582,7 +585,7 @@ pre = "%s~%s." % (os.path.basename(fullbase), prefix) (fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext) data = repo.wwritedata(ctx.path(), ctx.data()) - f = os.fdopen(fd, "wb") + f = os.fdopen(fd, pycompat.sysstr("wb")) f.write(data) f.close() return name
--- a/mercurial/fileset.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/fileset.py Sat Mar 11 13:53:14 2017 -0500 @@ -15,6 +15,7 @@ merge, parser, registrar, + scmutil, util, ) @@ -438,6 +439,52 @@ s.append(f) return s +@predicate('revs(revs, pattern)') +def revs(mctx, x): + """Evaluate set in the specified revisions. If the revset match multiple + revs, this will return file matching pattern in any of the revision. + """ + # i18n: "revs" is a keyword + r, x = getargs(x, 2, 2, _("revs takes two arguments")) + # i18n: "revs" is a keyword + revspec = getstring(r, _("first argument to revs must be a revision")) + repo = mctx.ctx.repo() + revs = scmutil.revrange(repo, [revspec]) + + found = set() + result = [] + for r in revs: + ctx = repo[r] + for f in getset(mctx.switch(ctx, _buildstatus(ctx, x)), x): + if f not in found: + found.add(f) + result.append(f) + return result + +@predicate('status(base, rev, pattern)') +def status(mctx, x): + """Evaluate predicate using status change between ``base`` and + ``rev``. Examples: + + - ``status(3, 7, added())`` - matches files added from "3" to "7" + """ + repo = mctx.ctx.repo() + # i18n: "status" is a keyword + b, r, x = getargs(x, 3, 3, _("status takes three arguments")) + # i18n: "status" is a keyword + baseerr = _("first argument to status must be a revision") + baserevspec = getstring(b, baseerr) + if not baserevspec: + raise error.ParseError(baseerr) + reverr = _("second argument to status must be a revision") + revspec = getstring(r, reverr) + if not revspec: + raise error.ParseError(reverr) + basenode, node = scmutil.revpair(repo, [baserevspec, revspec]) + basectx = repo[basenode] + ctx = repo[node] + return getset(mctx.switch(ctx, _buildstatus(ctx, x, basectx=basectx)), x) + @predicate('subrepo([pattern])') def subrepo(mctx, x): """Subrepositories whose paths match the given pattern. @@ -474,7 +521,7 @@ } class matchctx(object): - def __init__(self, ctx, subset=None, status=None): + def __init__(self, ctx, subset, status=None): self.ctx = ctx self.subset = subset self._status = status @@ -497,39 +544,71 @@ if (f in self.ctx and f not in removed) or f in unknown) def narrow(self, files): return matchctx(self.ctx, self.filter(files), self._status) + def switch(self, ctx, status=None): + subset = self.filter(_buildsubset(ctx, status)) + return matchctx(ctx, subset, status) + +class fullmatchctx(matchctx): + """A match context where any files in any revisions should be valid""" + + def __init__(self, ctx, status=None): + subset = _buildsubset(ctx, status) + super(fullmatchctx, self).__init__(ctx, subset, status) + def switch(self, ctx, status=None): + return fullmatchctx(ctx, status) + +# filesets using matchctx.switch() +_switchcallers = [ + 'revs', + 'status', +] def _intree(funcs, tree): if isinstance(tree, tuple): if tree[0] == 'func' and tree[1][0] == 'symbol': if tree[1][1] in funcs: return True + if tree[1][1] in _switchcallers: + # arguments won't be evaluated in the current context + return False for s in tree[1:]: if _intree(funcs, s): return True return False +def _buildsubset(ctx, status): + if status: + subset = [] + for c in status: + subset.extend(c) + return subset + else: + return list(ctx.walk(ctx.match([]))) + def getfileset(ctx, expr): tree = parse(expr) + return getset(fullmatchctx(ctx, _buildstatus(ctx, tree)), tree) +def _buildstatus(ctx, tree, basectx=None): # do we need status info? + + # temporaty boolean to simplify the next conditional + purewdir = ctx.rev() is None and basectx is None + if (_intree(_statuscallers, tree) or # Using matchctx.existing() on a workingctx requires us to check # for deleted files. - (ctx.rev() is None and _intree(_existingcallers, tree))): + (purewdir and _intree(_existingcallers, tree))): unknown = _intree(['unknown'], tree) ignored = _intree(['ignored'], tree) r = ctx.repo() - status = r.status(ctx.p1(), ctx, - unknown=unknown, ignored=ignored, clean=True) - subset = [] - for c in status: - subset.extend(c) + if basectx is None: + basectx = ctx.p1() + return r.status(basectx, ctx, + unknown=unknown, ignored=ignored, clean=True) else: - status = None - subset = list(ctx.walk(ctx.match([]))) - - return getset(matchctx(ctx, subset, status), tree) + return None def prettyformat(tree): return parser.prettyformat(tree, ('string', 'symbol'))
--- a/mercurial/formatter.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/formatter.py Sat Mar 11 13:53:14 2017 -0500 @@ -12,6 +12,7 @@ - fm.write() for unconditional output - fm.condwrite() to show some extra data conditionally in plain output +- fm.context() to provide changectx to template output - fm.data() to provide extra data to JSON or template output - fm.plain() to show raw text that isn't provided to JSON or template output @@ -171,6 +172,9 @@ # name is mandatory argument for now, but it could be optional if # we have default template keyword, e.g. {item} return self._converter.formatlist(data, name, fmt, sep) + def context(self, **ctxs): + '''insert context objects to be used to render template keywords''' + pass def data(self, **data): '''insert data into item that's not shown in default output''' self._item.update(data) @@ -257,24 +261,26 @@ pass class debugformatter(baseformatter): - def __init__(self, ui, topic, opts): + def __init__(self, ui, out, topic, opts): baseformatter.__init__(self, ui, topic, opts, _nullconverter) - self._ui.write("%s = [\n" % self._topic) + self._out = out + self._out.write("%s = [\n" % self._topic) def _showitem(self): - self._ui.write(" " + repr(self._item) + ",\n") + self._out.write(" " + repr(self._item) + ",\n") def end(self): baseformatter.end(self) - self._ui.write("]\n") + self._out.write("]\n") class pickleformatter(baseformatter): - def __init__(self, ui, topic, opts): + def __init__(self, ui, out, topic, opts): baseformatter.__init__(self, ui, topic, opts, _nullconverter) + self._out = out self._data = [] def _showitem(self): self._data.append(self._item) def end(self): baseformatter.end(self) - self._ui.write(pickle.dumps(self._data)) + self._out.write(pickle.dumps(self._data)) def _jsonifyobj(v): if isinstance(v, dict): @@ -295,28 +301,29 @@ return '"%s"' % encoding.jsonescape(v) class jsonformatter(baseformatter): - def __init__(self, ui, topic, opts): + def __init__(self, ui, out, topic, opts): baseformatter.__init__(self, ui, topic, opts, _nullconverter) - self._ui.write("[") - self._ui._first = True + self._out = out + self._out.write("[") + self._first = True def _showitem(self): - if self._ui._first: - self._ui._first = False + if self._first: + self._first = False else: - self._ui.write(",") + self._out.write(",") - self._ui.write("\n {\n") + self._out.write("\n {\n") first = True for k, v in sorted(self._item.items()): if first: first = False else: - self._ui.write(",\n") - self._ui.write(' "%s": %s' % (k, _jsonifyobj(v))) - self._ui.write("\n }") + self._out.write(",\n") + self._out.write(' "%s": %s' % (k, _jsonifyobj(v))) + self._out.write("\n }") def end(self): baseformatter.end(self) - self._ui.write("\n]\n") + self._out.write("\n]\n") class _templateconverter(object): '''convert non-primitive data types to be processed by templater''' @@ -342,13 +349,33 @@ lambda d: fmt % d[name]) class templateformatter(baseformatter): - def __init__(self, ui, topic, opts): + def __init__(self, ui, out, topic, opts): baseformatter.__init__(self, ui, topic, opts, _templateconverter) + self._out = out self._topic = topic - self._t = gettemplater(ui, topic, opts.get('template', '')) + self._t = gettemplater(ui, topic, opts.get('template', ''), + cache=templatekw.defaulttempl) + self._cache = {} # for templatekw/funcs to store reusable data + def context(self, **ctxs): + '''insert context objects to be used to render template keywords''' + assert all(k == 'ctx' for k in ctxs) + self._item.update(ctxs) def _showitem(self): - g = self._t(self._topic, ui=self._ui, **self._item) - self._ui.write(templater.stringify(g)) + # TODO: add support for filectx. probably each template keyword or + # function will have to declare dependent resources. e.g. + # @templatekeyword(..., requires=('ctx',)) + if 'ctx' in self._item: + props = templatekw.keywords.copy() + # explicitly-defined fields precede templatekw + props.update(self._item) + # but template resources must be always available + props['templ'] = self._t + props['repo'] = props['ctx'].repo() + props['revcache'] = {} + else: + props = self._item + g = self._t(self._topic, ui=self._ui, cache=self._cache, **props) + self._out.write(templater.stringify(g)) def lookuptemplate(ui, topic, tmpl): # looks like a literal template? @@ -382,17 +409,17 @@ # constant string? return tmpl, None -def gettemplater(ui, topic, spec): +def gettemplater(ui, topic, spec, cache=None): tmpl, mapfile = lookuptemplate(ui, topic, spec) assert not (tmpl and mapfile) if mapfile: - return templater.templater.frommapfile(mapfile) - return maketemplater(ui, topic, tmpl) + return templater.templater.frommapfile(mapfile, cache=cache) + return maketemplater(ui, topic, tmpl, cache=cache) -def maketemplater(ui, topic, tmpl, filters=None, cache=None): +def maketemplater(ui, topic, tmpl, cache=None): """Create a templater from a string template 'tmpl'""" aliases = ui.configitems('templatealias') - t = templater.templater(filters=filters, cache=cache, aliases=aliases) + t = templater.templater(cache=cache, aliases=aliases) if tmpl: t.cache[topic] = tmpl return t @@ -400,17 +427,17 @@ def formatter(ui, topic, opts): template = opts.get("template", "") if template == "json": - return jsonformatter(ui, topic, opts) + return jsonformatter(ui, ui, topic, opts) elif template == "pickle": - return pickleformatter(ui, topic, opts) + return pickleformatter(ui, ui, topic, opts) elif template == "debug": - return debugformatter(ui, topic, opts) + return debugformatter(ui, ui, topic, opts) elif template != "": - return templateformatter(ui, topic, opts) + return templateformatter(ui, ui, topic, opts) # developer config: ui.formatdebug elif ui.configbool('ui', 'formatdebug'): - return debugformatter(ui, topic, opts) + return debugformatter(ui, ui, topic, opts) # deprecated config: ui.formatjson elif ui.configbool('ui', 'formatjson'): - return jsonformatter(ui, topic, opts) + return jsonformatter(ui, ui, topic, opts) return plainformatter(ui, topic, opts)
--- a/mercurial/graphmod.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/graphmod.py Sat Mar 11 13:53:14 2017 -0500 @@ -22,6 +22,7 @@ from .node import nullrev from . import ( revset, + smartset, util, ) @@ -67,8 +68,8 @@ if gp is None: # precompute slow query as we know reachableroots() goes # through all revs (issue4782) - if not isinstance(revs, revset.baseset): - revs = revset.baseset(revs) + if not isinstance(revs, smartset.baseset): + revs = smartset.baseset(revs) gp = gpcache[mpar] = sorted(set(revset.reachableroots( repo, revs, [mpar]))) if not gp:
--- a/mercurial/help.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/help.py Sat Mar 11 13:53:14 2017 -0500 @@ -33,14 +33,17 @@ webcommands, ) -_exclkeywords = [ +_exclkeywords = set([ + "(ADVANCED)", "(DEPRECATED)", "(EXPERIMENTAL)", + # i18n: "(ADVANCED)" is a keyword, must be translated consistently + _("(ADVANCED)"), # i18n: "(DEPRECATED)" is a keyword, must be translated consistently _("(DEPRECATED)"), # i18n: "(EXPERIMENTAL)" is a keyword, must be translated consistently _("(EXPERIMENTAL)"), - ] + ]) def listexts(header, exts, indent=1, showdeprecated=False): '''return a text listing of the given extensions''' @@ -186,6 +189,8 @@ internalstable = sorted([ (['bundles'], _('Bundles'), loaddoc('bundles', subdir='internals')), + (['censor'], _('Censor'), + loaddoc('censor', subdir='internals')), (['changegroups'], _('Changegroups'), loaddoc('changegroups', subdir='internals')), (['requirements'], _('Repository Requirements'), @@ -205,6 +210,7 @@ return ''.join(lines) helptable = sorted([ + (['color'], _("Colorizing Outputs"), loaddoc('color')), (["config", "hgrc"], _("Configuration Files"), loaddoc('config')), (["dates"], _("Date Formats"), loaddoc('dates')), (["patterns"], _("File Name Patterns"), loaddoc('patterns')), @@ -230,6 +236,7 @@ loaddoc('scripting')), (['internals'], _("Technical implementation topics"), internalshelp), + (['pager'], _("Pager Support"), loaddoc('pager')), ]) # Maps topics with sub-topics to a list of their sub-topics. @@ -605,3 +612,49 @@ rst.extend(helplist(None, **opts)) return ''.join(rst) + +def formattedhelp(ui, name, keep=None, unknowncmd=False, full=True, **opts): + """get help for a given topic (as a dotted name) as rendered rst + + Either returns the rendered help text or raises an exception. + """ + if keep is None: + keep = [] + else: + keep = list(keep) # make a copy so we can mutate this later + fullname = name + section = None + subtopic = None + if name and '.' in name: + name, remaining = name.split('.', 1) + remaining = encoding.lower(remaining) + if '.' in remaining: + subtopic, section = remaining.split('.', 1) + else: + if name in subtopics: + subtopic = remaining + else: + section = remaining + textwidth = ui.configint('ui', 'textwidth', 78) + termwidth = ui.termwidth() - 2 + if textwidth <= 0 or termwidth < textwidth: + textwidth = termwidth + text = help_(ui, name, + subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts) + + formatted, pruned = minirst.format(text, textwidth, keep=keep, + section=section) + + # We could have been given a weird ".foo" section without a name + # to look for, or we could have simply failed to found "foo.bar" + # because bar isn't a section of foo + if section and not (formatted and name): + raise error.Abort(_("help section not found: %s") % fullname) + + if 'verbose' in pruned: + keep.append('omitted') + else: + keep.append('notomitted') + formatted, pruned = minirst.format(text, textwidth, keep=keep, + section=section) + return formatted
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/help/color.txt Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,134 @@ +Mercurial can colorizes output from several commands. + +For example, the diff command shows additions in green and deletions +in red, while the status command shows modified files in magenta. Many +other commands have analogous colors. It is possible to customize +these colors. + +To enable color use:: + + [ui] + color = auto + +Mode +==== + +Mercurial can use various system to display color. The supported modes are +``ansi``, ``win32``, and ``terminfo``. See :hg:`help config.color` for details +about how to control the mode + +Effects +======= + +Other effects in addition to color, like bold and underlined text, are +also available. By default, the terminfo database is used to find the +terminal codes used to change color and effect. If terminfo is not +available, then effects are rendered with the ECMA-48 SGR control +function (aka ANSI escape codes). + +The available effects in terminfo mode are 'blink', 'bold', 'dim', +'inverse', 'invisible', 'italic', 'standout', and 'underline'; in +ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and +'underline'. How each is rendered depends on the terminal emulator. +Some may not be available for a given terminal type, and will be +silently ignored. + +If the terminfo entry for your terminal is missing codes for an effect +or has the wrong codes, you can add or override those codes in your +configuration:: + + [color] + terminfo.dim = \E[2m + +where '\E' is substituted with an escape character. + +Labels +====== + +Text receives color effects depending on the labels that it has. Many +default Mercurial commands emit labelled text. You can also define +your own labels in templates using the label function, see :hg:`help +templates`. A single portion of text may have more than one label. In +that case, effects given to the last label will override any other +effects. This includes the special "none" effect, which nullifies +other effects. + +Labels are normally invisible. In order to see these labels and their +position in the text, use the global --color=debug option. The same +anchor text may be associated to multiple labels, e.g. + + [log.changeset changeset.secret|changeset: 22611:6f0a53c8f587] + +The following are the default effects for some default labels. Default +effects may be overridden from your configuration file:: + + [color] + status.modified = blue bold underline red_background + status.added = green bold + status.removed = red bold blue_background + status.deleted = cyan bold underline + status.unknown = magenta bold underline + status.ignored = black bold + + # 'none' turns off all effects + status.clean = none + status.copied = none + + qseries.applied = blue bold underline + qseries.unapplied = black bold + qseries.missing = red bold + + diff.diffline = bold + diff.extended = cyan bold + diff.file_a = red bold + diff.file_b = green bold + diff.hunk = magenta + diff.deleted = red + diff.inserted = green + diff.changed = white + diff.tab = + diff.trailingwhitespace = bold red_background + + # Blank so it inherits the style of the surrounding label + changeset.public = + changeset.draft = + changeset.secret = + + resolve.unresolved = red bold + resolve.resolved = green bold + + bookmarks.active = green + + branches.active = none + branches.closed = black bold + branches.current = green + branches.inactive = none + + tags.normal = green + tags.local = black bold + + rebase.rebased = blue + rebase.remaining = red bold + + shelve.age = cyan + shelve.newest = green bold + shelve.name = blue bold + + histedit.remaining = red bold + +Custom colors +============= + +Because there are only eight standard colors, this module allows you +to define color names for other color slots which might be available +for your terminal type, assuming terminfo mode. For instance:: + + color.brightblue = 12 + color.pink = 207 + color.orange = 202 + +to set 'brightblue' to color slot 12 (useful for 16 color terminals +that have brighter colors defined in the upper eight) and, 'pink' and +'orange' to colors in 256-color xterm's default color cube. These +defined colors may then be used as any of the pre-defined eight, +including appending '_background' to set the background to that color.
--- a/mercurial/help/config.txt Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/help/config.txt Sat Mar 11 13:53:14 2017 -0500 @@ -56,6 +56,7 @@ - ``<repo>/.hg/hgrc`` (per-repository) - ``$HOME/.hgrc`` (per-user) + - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user) - ``<install-root>/etc/mercurial/hgrc`` (per-installation) - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation) - ``/etc/mercurial/hgrc`` (per-system) @@ -276,7 +277,7 @@ will let you do ``hg echo foo`` to have ``foo`` printed in your terminal. A better example might be:: - purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm + purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f which will make ``hg purge`` delete all unknown files in the repository in the same manner as the purge extension. @@ -385,6 +386,33 @@ If no suitable authentication entry is found, the user is prompted for credentials as usual if required by the remote. +``color`` +--------- + +Configure the Mercurial color mode. For details about how to define your custom +effect and style see :hg:`help color`. + +``mode`` + String: control the method used to output color. One of ``auto``, ``ansi``, + ``win32``, ``terminfo`` or ``debug``. In auto mode the color extension will + use ANSI mode by default (or win32 mode on Windows) if it detects a + terminal. Any invalid value will disable color. + +``pagermode`` + String: optinal override of ``color.mode`` used with pager (from the pager + extensions). + + On some systems, terminfo mode may cause problems when using + color with the pager extension and less -R. less with the -R option + will only display ECMA-48 color codes, and terminfo mode may sometimes + emit codes that less doesn't understand. You can work around this by + either using ansi mode (or auto mode), or by using less -r (which will + pass through all terminal control codes, not just color control + codes). + + On some systems (such as MSYS in Windows), the terminal may support + a different color mode than the pager (activated via the "pager" + extension). ``committemplate`` ------------------ @@ -700,8 +728,8 @@ Example for ``~/.hgrc``:: [extensions] - # (the color extension will get loaded from Mercurial's path) - color = + # (the churn extension will get loaded from Mercurial's path) + churn = # (this extension will get loaded from the file specified) myfeature = ~/.hgext/myfeature.py @@ -1796,6 +1824,13 @@ By default, the first bundle advertised by the server is used. +``color`` + String: when to use to colorize output. possible value are auto, always, + never, or debug (default: never). 'auto' will use color whenever it seems + possible. See :hg:`help color` for details. + + (in addition a boolean can be used in place always/never) + ``commitsubrepos`` Whether to commit modified subrepositories when committing the parent repository. If False and one subrepository has uncommitted
--- a/mercurial/help/filesets.txt Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/help/filesets.txt Sat Mar 11 13:53:14 2017 -0500 @@ -69,6 +69,10 @@ hg revert "set:copied() and binary() and size('>1M')" +- Revert files that were added to the working directory:: + + hg revert "set:revs('wdir()', added())" + - Remove files listed in foo.lst that contain the letter a or b:: hg remove "set: 'listfile:foo.lst' and (**a* or **b*)"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/help/internals/censor.txt Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,22 @@ +The censor system allows retroactively removing content from +files. Actually censoring a node requires using the censor extension, +but the functionality for handling censored nodes is partially in core. + +Censored nodes in a filelog have the flag ``REVIDX_ISCENSORED`` set, +and the contents of the censored node are replaced with a censor +tombstone. For historical reasons, the tombstone is packed in the +filelog metadata field ``censored``. This allows censored nodes to be +(mostly) safely transmitted through old formats like changegroup +versions 1 and 2. When using changegroup formats older than 3, the +receiver is required to re-add the ``REVIDX_ISCENSORED`` flag when +storing the revision. This depends on the ``censored`` metadata key +never being used for anything other than censoring revisions, which is +true as of January 2017. Note that the revlog flag is the +authoritative marker of a censored node: the tombstone should only be +consulted when looking for a reason a node was censored or when revlog +flags are unavailable as mentioned above. + +The tombstone data is a free-form string. It's expected that users of +censor will want to record the reason for censoring a node in the +tombstone. Censored nodes must be able to fit in the size of the +content being censored.
--- a/mercurial/help/internals/changegroups.txt Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/help/internals/changegroups.txt Sat Mar 11 13:53:14 2017 -0500 @@ -1,35 +1,49 @@ Changegroups are representations of repository revlog data, specifically -the changelog, manifest, and filelogs. +the changelog data, root/flat manifest data, treemanifest data, and +filelogs. There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a -high-level, versions ``1`` and ``2`` are almost exactly the same, with -the only difference being a header on entries in the changeset -segment. Version ``3`` adds support for exchanging treemanifests and -includes revlog flags in the delta header. +high-level, versions ``1`` and ``2`` are almost exactly the same, with the +only difference being an additional item in the *delta header*. Version +``3`` adds support for revlog flags in the *delta header* and optionally +exchanging treemanifests (enabled by setting an option on the +``changegroup`` part in the bundle2). -Changegroups consists of 3 logical segments:: +Changegroups when not exchanging treemanifests consist of 3 logical +segments:: +---------------------------------+ | | | | | changeset | manifest | filelogs | | | | | + | | | | +---------------------------------+ +When exchanging treemanifests, there are 4 logical segments:: + + +-------------------------------------------------+ + | | | | | + | changeset | root | treemanifests | filelogs | + | | manifest | | | + | | | | | + +-------------------------------------------------+ + The principle building block of each segment is a *chunk*. A *chunk* is a framed piece of data:: +---------------------------------------+ | | | | length | data | - | (32 bits) | <length> bytes | + | (4 bytes) | (<length - 4> bytes) | | | | +---------------------------------------+ -Each chunk starts with a 32-bit big-endian signed integer indicating -the length of the raw data that follows. +All integers are big-endian signed integers. Each chunk starts with a 32-bit +integer indicating the length of the entire chunk (including the length field +itself). -There is a special case chunk that has 0 length (``0x00000000``). We -call this an *empty chunk*. +There is a special case chunk that has a value of 0 for the length +(``0x00000000``). We call this an *empty chunk*. Delta Groups ============ @@ -43,26 +57,27 @@ +------------------------------------------------------------------------+ | | | | | | | chunk0 length | chunk0 data | chunk1 length | chunk1 data | 0x0 | - | (32 bits) | (various) | (32 bits) | (various) | (32 bits) | + | (4 bytes) | (various) | (4 bytes) | (various) | (4 bytes) | | | | | | | - +------------------------------------------------------------+-----------+ + +------------------------------------------------------------------------+ Each *chunk*'s data consists of the following:: - +-----------------------------------------+ - | | | | - | delta header | mdiff header | delta | - | (various) | (12 bytes) | (various) | - | | | | - +-----------------------------------------+ + +---------------------------------------+ + | | | + | delta header | delta data | + | (various by version) | (various) | + | | | + +---------------------------------------+ -The *length* field is the byte length of the remaining 3 logical pieces -of data. The *delta* is a diff from an existing entry in the changelog. +The *delta data* is a series of *delta*s that describe a diff from an existing +entry (either that the recipient already has, or previously specified in the +bundlei/changegroup). The *delta header* is different between versions ``1``, ``2``, and ``3`` of the changegroup format. -Version 1:: +Version 1 (headerlen=80):: +------------------------------------------------------+ | | | | | @@ -71,7 +86,7 @@ | | | | | +------------------------------------------------------+ -Version 2:: +Version 2 (headerlen=100):: +------------------------------------------------------------------+ | | | | | | @@ -80,30 +95,35 @@ | | | | | | +------------------------------------------------------------------+ -Version 3:: +Version 3 (headerlen=102):: +------------------------------------------------------------------------------+ | | | | | | | - | node | p1 node | p2 node | base node | link node | flags | + | node | p1 node | p2 node | base node | link node | flags | | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | | | | | | | | +------------------------------------------------------------------------------+ -The *mdiff header* consists of 3 32-bit big-endian signed integers -describing offsets at which to apply the following delta content:: +The *delta data* consists of ``chunklen - 4 - headerlen`` bytes, which contain a +series of *delta*s, densely packed (no separators). These deltas describe a diff +from an existing entry (either that the recipient already has, or previously +specified in the bundle/changegroup). The format is described more fully in +``hg help internals.bdiff``, but briefly:: - +-------------------------------------+ - | | | | - | offset | old length | new length | - | (32 bits) | (32 bits) | (32 bits) | - | | | | - +-------------------------------------+ + +---------------------------------------------------------------+ + | | | | | + | start offset | end offset | new length | content | + | (4 bytes) | (4 bytes) | (4 bytes) | (<new length> bytes) | + | | | | | + +---------------------------------------------------------------+ + +Please note that the length field in the delta data does *not* include itself. In version 1, the delta is always applied against the previous node from the changegroup or the first parent if this is the first entry in the changegroup. -In version 2, the delta base node is encoded in the entry in the +In version 2 and up, the delta base node is encoded in the entry in the changegroup. This allows the delta to be expressed against any parent, which can result in smaller deltas and more efficient encoding of data. @@ -111,43 +131,58 @@ ================= The *changeset segment* consists of a single *delta group* holding -changelog data. It is followed by an *empty chunk* to denote the -boundary to the *manifests segment*. +changelog data. The *empty chunk* at the end of the *delta group* denotes +the boundary to the *manifest segment*. Manifest Segment ================ -The *manifest segment* consists of a single *delta group* holding -manifest data. It is followed by an *empty chunk* to denote the boundary -to the *filelogs segment*. +The *manifest segment* consists of a single *delta group* holding manifest +data. If treemanifests are in use, it contains only the manifest for the +root directory of the repository. Otherwise, it contains the entire +manifest data. The *empty chunk* at the end of the *delta group* denotes +the boundary to the next segment (either the *treemanifests segment* or the +*filelogs segment*, depending on version and the request options). + +Treemanifests Segment +--------------------- + +The *treemanifests segment* only exists in changegroup version ``3``, and +only if the 'treemanifest' param is part of the bundle2 changegroup part +(it is not possible to use changegroup version 3 outside of bundle2). +Aside from the filenames in the *treemanifests segment* containing a +trailing ``/`` character, it behaves identically to the *filelogs segment* +(see below). The final sub-segment is followed by an *empty chunk* (logically, +a sub-segment with filename size 0). This denotes the boundary to the +*filelogs segment*. Filelogs Segment ================ -The *filelogs* segment consists of multiple sub-segments, each +The *filelogs segment* consists of multiple sub-segments, each corresponding to an individual file whose data is being described:: - +--------------------------------------+ - | | | | | - | filelog0 | filelog1 | filelog2 | ... | - | | | | | - +--------------------------------------+ + +--------------------------------------------------+ + | | | | | | + | filelog0 | filelog1 | filelog2 | ... | 0x0 | + | | | | | (4 bytes) | + | | | | | | + +--------------------------------------------------+ -In version ``3`` of the changegroup format, filelogs may include -directory logs when treemanifests are in use. directory logs are -identified by having a trailing '/' on their filename (see below). - -The final filelog sub-segment is followed by an *empty chunk* to denote -the end of the segment and the overall changegroup. +The final filelog sub-segment is followed by an *empty chunk* (logically, +a sub-segment with filename size 0). This denotes the end of the segment +and of the overall changegroup. Each filelog sub-segment consists of the following:: - +------------------------------------------+ - | | | | - | filename size | filename | delta group | - | (32 bits) | (various) | (various) | - | | | | - +------------------------------------------+ + +------------------------------------------------------+ + | | | | + | filename length | filename | delta group | + | (4 bytes) | (<length - 4> bytes) | (various) | + | | | | + +------------------------------------------------------+ That is, a *chunk* consisting of the filename (not terminated or padded) -followed by N chunks constituting the *delta group* for this file. +followed by N chunks constituting the *delta group* for this file. The +*empty chunk* at the end of each *delta group* denotes the boundary to the +next filelog sub-segment.
--- a/mercurial/help/internals/requirements.txt Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/help/internals/requirements.txt Sat Mar 11 13:53:14 2017 -0500 @@ -55,6 +55,17 @@ The requirement was added in Mercurial 1.3 (released July 2009). +relshared +========= + +Derivative of ``shared``; the location of the store is relative to the +store of this repository. + +This requirement is set when a repository is created via :hg:`share` +using the ``--relative`` option. + +The requirement was added in Mercurial 4.2 (released May 2017). + dotencode =========
--- a/mercurial/help/internals/revlogs.txt Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/help/internals/revlogs.txt Sat Mar 11 13:53:14 2017 -0500 @@ -108,9 +108,9 @@ 16-19 (4 bytes) Base or previous revision this revision's delta was produced against. - -1 means this revision holds full text (as opposed to a delta). - For generaldelta repos, this is the previous revision in the delta - chain. For non-generaldelta repos, this is the base or first + This revision holds full text (as opposed to a delta) if it points to + itself. For generaldelta repos, this is the previous revision in the + delta chain. For non-generaldelta repos, this is the base or first revision in the delta chain. 20-23 (4 bytes)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/help/pager.txt Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,35 @@ +Some Mercurial commands produce a lot of output, and Mercurial will +attempt to use a pager to make those commands more pleasant. + +To set the pager that should be used, set the application variable:: + + [pager] + pager = less -FRX + +If no pager is set, the pager extensions uses the environment variable +$PAGER. If neither pager.pager, nor $PAGER is set, a default pager +will be used, typically `more`. + +You can disable the pager for certain commands by adding them to the +pager.ignore list:: + + [pager] + ignore = version, help, update + +To ignore global commands like :hg:`version` or :hg:`help`, you have +to specify them in your user configuration file. + +To control whether the pager is used at all for an individual command, +you can use --pager=<value>:: + + - use as needed: `auto`. + - require the pager: `yes` or `on`. + - suppress the pager: `no` or `off` (any unrecognized value + will also work). + +To globally turn off all attempts to use a pager, set:: + + [pager] + enable = false + +which will prevent the pager from running.
--- a/mercurial/help/patterns.txt Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/help/patterns.txt Sat Mar 11 13:53:14 2017 -0500 @@ -13,7 +13,10 @@ To use a plain path name without any pattern matching, start it with ``path:``. These path names must completely match starting at the -current repository root. +current repository root, and when the path points to a directory, it is matched +recursively. To match all files in a directory non-recursively (not including +any files in subdirectories), ``rootfilesin:`` can be used, specifying an +absolute path (relative to the repository root). To use an extended glob, start a name with ``glob:``. Globs are rooted at the current directory; a glob such as ``*.c`` will only match files @@ -39,12 +42,15 @@ All patterns, except for ``glob:`` specified in command line (not for ``-I`` or ``-X`` options), can match also against directories: files under matched directories are treated as matched. +For ``-I`` and ``-X`` options, ``glob:`` will match directories recursively. Plain examples:: - path:foo/bar a name bar in a directory named foo in the root - of the repository - path:path:name a file or directory named "path:name" + path:foo/bar a name bar in a directory named foo in the root + of the repository + path:path:name a file or directory named "path:name" + rootfilesin:foo/bar the files in a directory called foo/bar, but not any files + in its subdirectories and not a file bar in directory foo Glob examples:: @@ -52,6 +58,8 @@ *.c any name ending in ".c" in the current directory **.c any name ending in ".c" in any subdirectory of the current directory including itself. + foo/* any file in directory foo plus all its subdirectories, + recursively foo/*.c any name ending in ".c" in the directory foo foo/**.c any name ending in ".c" in any subdirectory of foo including itself.
--- a/mercurial/hg.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/hg.py Sat Mar 11 13:53:14 2017 -0500 @@ -40,6 +40,7 @@ url, util, verify as verifymod, + vfs as vfsmod, ) release = lock.release @@ -195,7 +196,8 @@ return '' return os.path.basename(os.path.normpath(path)) -def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None): +def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None, + relative=False): '''create a shared repository''' if not islocal(source): @@ -218,8 +220,8 @@ sharedpath = srcrepo.sharedpath # if our source is already sharing - destwvfs = scmutil.vfs(dest, realpath=True) - destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True) + destwvfs = vfsmod.vfs(dest, realpath=True) + destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True) if destvfs.lexists(): raise error.Abort(_('destination already exists')) @@ -235,7 +237,16 @@ if inst.errno != errno.ENOENT: raise - requirements += 'shared\n' + if relative: + try: + sharedpath = os.path.relpath(sharedpath, destvfs.base) + requirements += 'relshared\n' + except IOError as e: + raise error.Abort(_('cannot calculate relative path'), + hint=str(e)) + else: + requirements += 'shared\n' + destvfs.write('requires', requirements) destvfs.write('sharedpath', sharedpath) @@ -302,8 +313,8 @@ else: ui.progress(topic, pos + num) srcpublishing = srcrepo.publishing() - srcvfs = scmutil.vfs(srcrepo.sharedpath) - dstvfs = scmutil.vfs(destpath) + srcvfs = vfsmod.vfs(srcrepo.sharedpath) + dstvfs = vfsmod.vfs(destpath) for f in srcrepo.store.copylist(): if srcpublishing and f.endswith('phaseroots'): continue @@ -359,7 +370,7 @@ if e.errno != errno.EEXIST: raise - poolvfs = scmutil.vfs(pooldir) + poolvfs = vfsmod.vfs(pooldir) basename = os.path.basename(sharepath) with lock.lock(poolvfs, '%s.lock' % basename): @@ -464,7 +475,7 @@ if not dest: raise error.Abort(_("empty destination path is not valid")) - destvfs = scmutil.vfs(dest, expandpath=True) + destvfs = vfsmod.vfs(dest, expandpath=True) if destvfs.lexists(): if not destvfs.isdir(): raise error.Abort(_("destination '%s' already exists") % dest) @@ -681,18 +692,19 @@ repo.ui.status(_("%d files updated, %d files merged, " "%d files removed, %d files unresolved\n") % stats) -def updaterepo(repo, node, overwrite): +def updaterepo(repo, node, overwrite, updatecheck=None): """Update the working directory to node. When overwrite is set, changes are clobbered, merged else returns stats (see pydoc mercurial.merge.applyupdates)""" return mergemod.update(repo, node, False, overwrite, - labels=['working copy', 'destination']) + labels=['working copy', 'destination'], + updatecheck=updatecheck) -def update(repo, node, quietempty=False): - """update the working directory to node, merging linear changes""" - stats = updaterepo(repo, node, False) +def update(repo, node, quietempty=False, updatecheck=None): + """update the working directory to node""" + stats = updaterepo(repo, node, False, updatecheck=updatecheck) _showstats(repo, stats, quietempty) if stats[3]: repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) @@ -712,7 +724,7 @@ # naming conflict in updatetotally() _clean = clean -def updatetotally(ui, repo, checkout, brev, clean=False, check=False): +def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): """Update the working directory with extra care for non-file components This takes care of non-file components below: @@ -724,22 +736,38 @@ :checkout: to which revision the working directory is updated :brev: a name, which might be a bookmark to be activated after updating :clean: whether changes in the working directory can be discarded - :check: whether changes in the working directory should be checked + :updatecheck: how to deal with a dirty working directory + + Valid values for updatecheck are (None => linear): + + * abort: abort if the working directory is dirty + * none: don't check (merge working directory changes into destination) + * linear: check that update is linear before merging working directory + changes into destination + * noconflict: check that the update does not result in file merges This returns whether conflict is detected at updating or not. """ + if updatecheck is None: + updatecheck = ui.config('experimental', 'updatecheck') + if updatecheck not in ('abort', 'none', 'linear', 'noconflict'): + # If not configured, or invalid value configured + updatecheck = 'linear' with repo.wlock(): movemarkfrom = None warndest = False if checkout is None: - updata = destutil.destupdate(repo, clean=clean, check=check) + updata = destutil.destupdate(repo, clean=clean) checkout, movemarkfrom, brev = updata warndest = True if clean: ret = _clean(repo, checkout) else: - ret = _update(repo, checkout) + if updatecheck == 'abort': + cmdutil.bailifchanged(repo, merge=False) + updatecheck = 'none' + ret = _update(repo, checkout, updatecheck=updatecheck) if not ret and movemarkfrom: if movemarkfrom == repo['.'].node(): @@ -802,7 +830,7 @@ if not chlist: ui.status(_("no changes found\n")) return subreporecurse() - + ui.pager('incoming') displayer = cmdutil.show_changeset(ui, other, opts, buffered) displaychlist(other, chlist, displayer) displayer.close() @@ -870,6 +898,7 @@ if opts.get('newest_first'): o.reverse() + ui.pager('outgoing') displayer = cmdutil.show_changeset(ui, repo, opts) count = 0 for n in o:
--- a/mercurial/hgweb/webcommands.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/hgweb/webcommands.py Sat Mar 11 13:53:14 2017 -0500 @@ -32,7 +32,9 @@ error, graphmod, revset, + revsetlang, scmutil, + smartset, templatefilters, templater, util, @@ -238,20 +240,20 @@ revdef = 'reverse(%s)' % query try: - tree = revset.parse(revdef) + tree = revsetlang.parse(revdef) except error.ParseError: # can't parse to a revset tree return MODE_KEYWORD, query - if revset.depth(tree) <= 2: + if revsetlang.depth(tree) <= 2: # no revset syntax used return MODE_KEYWORD, query if any((token, (value or '')[:3]) == ('string', 're:') - for token, value, pos in revset.tokenize(revdef)): + for token, value, pos in revsetlang.tokenize(revdef)): return MODE_KEYWORD, query - funcsused = revset.funcsused(tree) + funcsused = revsetlang.funcsused(tree) if not funcsused.issubset(revset.safesymbols): return MODE_KEYWORD, query @@ -752,13 +754,14 @@ if fctx is not None: path = fctx.path() ctx = fctx.changectx() + basectx = ctx.p1() parity = paritygen(web.stripecount) style = web.config('web', 'style', 'paper') if 'style' in req.form: style = req.form['style'][0] - diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style) + diffs = webutil.diffs(web.repo, tmpl, ctx, basectx, [path], parity, style) if fctx is not None: rename = webutil.renamelink(fctx) ctx = fctx @@ -1148,7 +1151,7 @@ # We have to feed a baseset to dagwalker as it is expecting smartset # object. This does not have a big impact on hgweb performance itself # since hgweb graphing code is not itself lazy yet. - dag = graphmod.dagwalker(web.repo, revset.baseset(revs)) + dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) # As we said one line above... not lazy. tree = list(graphmod.colored(dag, web.repo))
--- a/mercurial/hgweb/webutil.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/hgweb/webutil.py Sat Mar 11 13:53:14 2017 -0500 @@ -412,16 +412,9 @@ def diffs(repo, tmpl, ctx, basectx, files, parity, style): - def countgen(): - start = 1 - while True: - yield start - start += 1 - - blockcount = countgen() - def prettyprintlines(diff, blockno): - for lineno, l in enumerate(diff.splitlines(True)): - difflineno = "%d.%d" % (blockno, lineno + 1) + def prettyprintlines(lines, blockno): + for lineno, l in enumerate(lines, 1): + difflineno = "%d.%d" % (blockno, lineno) if l.startswith('+'): ltype = "difflineplus" elif l.startswith('-'): @@ -432,7 +425,7 @@ ltype = "diffline" yield tmpl(ltype, line=l, - lineno=lineno + 1, + lineno=lineno, lineid="l%s" % difflineno, linenumber="% 8s" % difflineno) @@ -442,29 +435,19 @@ m = match.always(repo.root, repo.getcwd()) diffopts = patch.diffopts(repo.ui, untrusted=True) - if basectx is None: - parents = ctx.parents() - if parents: - node1 = parents[0].node() - else: - node1 = nullid - else: - node1 = basectx.node() + node1 = basectx.node() node2 = ctx.node() - block = [] - for chunk in patch.diff(repo, node1, node2, m, opts=diffopts): - if chunk.startswith('diff') and block: - blockno = next(blockcount) + diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts) + for blockno, (header, hunks) in enumerate(diffhunks, 1): + if style != 'raw': + header = header[1:] + lines = [h + '\n' for h in header] + for hunkrange, hunklines in hunks: + lines.extend(hunklines) + if lines: yield tmpl('diffblock', parity=next(parity), blockno=blockno, - lines=prettyprintlines(''.join(block), blockno)) - block = [] - if chunk.startswith('diff') and style != 'raw': - chunk = ''.join(chunk.splitlines(True)[1:]) - block.append(chunk) - blockno = next(blockcount) - yield tmpl('diffblock', parity=next(parity), blockno=blockno, - lines=prettyprintlines(''.join(block), blockno)) + lines=prettyprintlines(lines, blockno)) def compare(tmpl, context, leftlines, rightlines): '''Generator function that provides side-by-side comparison data.'''
--- a/mercurial/hook.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/hook.py Sat Mar 11 13:53:14 2017 -0500 @@ -9,7 +9,6 @@ import os import sys -import time from .i18n import _ from . import ( @@ -88,7 +87,7 @@ % (hname, funcname)) ui.note(_("calling hook %s: %s\n") % (hname, funcname)) - starttime = time.time() + starttime = util.timer() try: r = obj(ui=ui, repo=repo, hooktype=name, **args) @@ -106,7 +105,7 @@ ui.traceback() return True, True finally: - duration = time.time() - starttime + duration = util.timer() - starttime ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n', name, funcname, duration) if r: @@ -118,7 +117,7 @@ def _exthook(ui, repo, name, cmd, args, throw): ui.note(_("running hook %s: %s\n") % (name, cmd)) - starttime = time.time() + starttime = util.timer() env = {} # make in-memory changes visible to external process @@ -143,9 +142,9 @@ cwd = repo.root else: cwd = pycompat.getcwd() - r = ui.system(cmd, environ=env, cwd=cwd) + r = ui.system(cmd, environ=env, cwd=cwd, blockedtag='exthook-%s' % (name,)) - duration = time.time() - starttime + duration = util.timer() - starttime ui.log('exthook', 'exthook-%s: %s finished in %0.2f seconds\n', name, cmd, duration) if r:
--- a/mercurial/httpconnection.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/httpconnection.py Sat Mar 11 13:53:14 2017 -0500 @@ -67,13 +67,13 @@ # moved here from url.py to avoid a cycle def readauthforuri(ui, uri, user): # Read configuration - config = dict() + groups = {} for key, val in ui.configitems('auth'): if '.' not in key: ui.warn(_("ignoring invalid [auth] key '%s'\n") % key) continue group, setting = key.rsplit('.', 1) - gdict = config.setdefault(group, dict()) + gdict = groups.setdefault(group, {}) if setting in ('username', 'cert', 'key'): val = util.expandpath(val) gdict[setting] = val @@ -83,7 +83,7 @@ bestuser = None bestlen = 0 bestauth = None - for group, auth in config.iteritems(): + for group, auth in groups.iteritems(): if user and user != auth.get('username', user): # If a username was set in the URI, the entry username # must either match it or be unset
--- a/mercurial/httppeer.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/httppeer.py Sat Mar 11 13:53:14 2017 -0500 @@ -20,6 +20,7 @@ bundle2, error, httpconnection, + pycompat, statichttprepo, url, util, @@ -327,7 +328,7 @@ try: # dump bundle to disk fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") - fh = os.fdopen(fd, "wb") + fh = os.fdopen(fd, pycompat.sysstr("wb")) d = fp.read(4096) while d: fh.write(d)
--- a/mercurial/i18n.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/i18n.py Sat Mar 11 13:53:14 2017 -0500 @@ -21,7 +21,7 @@ if getattr(sys, 'frozen', None) is not None: module = pycompat.sysexecutable else: - module = __file__ + module = pycompat.fsencode(__file__) try: unicode
--- a/mercurial/keepalive.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/keepalive.py Sat Mar 11 13:53:14 2017 -0500 @@ -310,14 +310,16 @@ try: if req.has_data(): data = req.get_data() - h.putrequest('POST', req.get_selector(), **skipheaders) + h.putrequest( + req.get_method(), req.get_selector(), **skipheaders) if 'content-type' not in headers: h.putheader('Content-type', 'application/x-www-form-urlencoded') if 'content-length' not in headers: h.putheader('Content-length', '%d' % len(data)) else: - h.putrequest('GET', req.get_selector(), **skipheaders) + h.putrequest( + req.get_method(), req.get_selector(), **skipheaders) except socket.error as err: raise urlerr.urlerror(err) for k, v in headers.items():
--- a/mercurial/localrepo.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/localrepo.py Sat Mar 11 13:53:14 2017 -0500 @@ -28,6 +28,7 @@ bundle2, changegroup, changelog, + color, context, dirstate, dirstateguard, @@ -50,12 +51,15 @@ pushkey, repoview, revset, + revsetlang, scmutil, store, subrepo, tags as tagsmod, transaction, + txnutil, util, + vfs as vfsmod, ) release = lockmod.release @@ -66,6 +70,8 @@ """All filecache usage on repo are done for logic that should be unfiltered """ + def join(self, obj, fname): + return obj.vfs.join(fname) def __get__(self, repo, type=None): if repo is None: return self @@ -241,7 +247,7 @@ supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2')) _basesupported = supportedformats | set(('store', 'fncache', 'shared', - 'dotencode')) + 'relshared', 'dotencode')) openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2')) filtername = None @@ -251,16 +257,19 @@ def __init__(self, baseui, path, create=False): self.requirements = set() - self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True) - self.wopener = self.wvfs + # vfs to access the working copy + self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True) + # vfs to access the content of the repository + self.vfs = None + # vfs to access the store part of the repository + self.svfs = None self.root = self.wvfs.base self.path = self.wvfs.join(".hg") self.origroot = path self.auditor = pathutil.pathauditor(self.root, self._checknested) self.nofsauditor = pathutil.pathauditor(self.root, self._checknested, realfs=False) - self.vfs = scmutil.vfs(self.path) - self.opener = self.vfs + self.vfs = vfsmod.vfs(self.path) self.baseui = baseui self.ui = baseui.copy() self.ui.copy = baseui.copy # prevent copying repo configuration @@ -270,7 +279,7 @@ self._phasedefaults = [] try: self.ui.readconfig(self.join("hgrc"), self.root) - extensions.loadall(self.ui) + self._loadextensions() except IOError: pass @@ -283,6 +292,7 @@ setupfunc(self.ui, self.supported) else: self.supported = self._basesupported + color.setup(self.ui) # Add compression engines. for name in util.compengines: @@ -321,8 +331,10 @@ self.sharedpath = self.path try: - vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'), - realpath=True) + sharedpath = self.vfs.read("sharedpath").rstrip('\n') + if 'relshared' in self.requirements: + sharedpath = self.vfs.join(sharedpath) + vfs = vfsmod.vfs(sharedpath, realpath=True) s = vfs.base if not vfs.exists(): raise error.RepoError( @@ -333,7 +345,7 @@ raise self.store = store.store( - self.requirements, self.sharedpath, scmutil.vfs) + self.requirements, self.sharedpath, vfsmod.vfs) self.spath = self.store.path self.svfs = self.store.vfs self.sjoin = self.store.join @@ -368,9 +380,22 @@ # generic mapping between names and nodes self.names = namespaces.namespaces() + @property + def wopener(self): + self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2') + return self.wvfs + + @property + def opener(self): + self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2') + return self.vfs + def close(self): self._writecaches() + def _loadextensions(self): + extensions.loadall(self.ui) + def _writecaches(self): if self._revbranchcache: self._revbranchcache.write() @@ -461,9 +486,9 @@ """Return a filtered version of a repository""" # build a new class with the mixin and the current class # (possibly subclass of the repo) - class proxycls(repoview.repoview, self.unfiltered().__class__): + class filteredrepo(repoview.repoview, self.unfiltered().__class__): pass - return proxycls(self, name) + return filteredrepo(self, name) @repofilecache('bookmarks', 'bookmarks.current') def _bookmarks(self): @@ -509,10 +534,8 @@ @storecache('00changelog.i') def changelog(self): c = changelog.changelog(self.svfs) - if 'HG_PENDING' in encoding.environ: - p = encoding.environ['HG_PENDING'] - if p.startswith(self.root): - c.readpending('00changelog.i.a') + if txnutil.mayhavepending(self.root): + c.readpending('00changelog.i.a') return c def _constructmanifest(self): @@ -570,15 +593,16 @@ '''Find revisions matching a revset. The revset is specified as a string ``expr`` that may contain - %-formatting to escape certain types. See ``revset.formatspec``. + %-formatting to escape certain types. See ``revsetlang.formatspec``. Revset aliases from the configuration are not expanded. To expand - user aliases, consider calling ``scmutil.revrange()``. + user aliases, consider calling ``scmutil.revrange()`` or + ``repo.anyrevs([expr], user=True)``. Returns a revset.abstractsmartset, which is a list-like interface that contains integer revisions. ''' - expr = revset.formatspec(expr, *args) + expr = revsetlang.formatspec(expr, *args) m = revset.match(None, expr) return m(self) @@ -594,6 +618,18 @@ for r in self.revs(expr, *args): yield self[r] + def anyrevs(self, specs, user=False): + '''Find revisions matching one of the given revsets. + + Revset aliases from the configuration are not expanded by default. To + expand user aliases, specify ``user=True``. + ''' + if user: + m = revset.matchany(self.ui, specs, repo=self) + else: + m = revset.matchany(None, specs) + return m(self) + def url(self): return 'file:' + self.root @@ -1852,6 +1888,11 @@ listsubrepos) def heads(self, start=None): + if start is None: + cl = self.changelog + headrevs = reversed(cl.headrevs()) + return [cl.node(rev) for rev in headrevs] + heads = self.changelog.heads(start) # sort the output in rev descending order return sorted(heads, key=self.changelog.rev, reverse=True) @@ -1973,6 +2014,14 @@ def a(): for vfs, src, dest in renamefiles: try: + # if src and dest refer to a same file, vfs.rename is a no-op, + # leaving both src and dest on disk. delete dest to make sure + # the rename couldn't be such a no-op. + vfs.unlink(dest) + except OSError as ex: + if ex.errno != errno.ENOENT: + raise + try: vfs.rename(src, dest) except OSError: # journal file does not yet exist pass
--- a/mercurial/lock.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/lock.py Sat Mar 11 13:53:14 2017 -0500 @@ -9,15 +9,33 @@ import contextlib import errno +import os import socket import time import warnings from . import ( error, + pycompat, util, ) +def _getlockprefix(): + """Return a string which is used to differentiate pid namespaces + + It's useful to detect "dead" processes and remove stale locks with + confidence. Typically it's just hostname. On modern linux, we include an + extra Linux-specific pid namespace identifier. + """ + result = socket.gethostname() + if pycompat.sysplatform.startswith('linux'): + try: + result += '/%x' % os.stat('/proc/self/ns/pid').st_ino + except OSError as ex: + if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR): + raise + return result + class lock(object): '''An advisory lock held by one process to control access to a set of files. Non-cooperating processes or incorrectly written scripts @@ -99,7 +117,7 @@ self.held += 1 return if lock._host is None: - lock._host = socket.gethostname() + lock._host = _getlockprefix() lockname = '%s:%s' % (lock._host, self.pid) retry = 5 while not self.held and retry:
--- a/mercurial/manifest.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/manifest.py Sat Mar 11 13:53:14 2017 -0500 @@ -445,8 +445,12 @@ def keys(self): return list(self.iterkeys()) - def filesnotin(self, m2): + def filesnotin(self, m2, match=None): '''Set of files in this manifest that are not in the other''' + if match: + m1 = self.matches(match) + m2 = m2.matches(match) + return m1.filesnotin(m2) diff = self.diff(m2) files = set(filepath for filepath, hashflags in diff.iteritems() @@ -523,7 +527,7 @@ m._lm = self._lm.filtercopy(match) return m - def diff(self, m2, clean=False): + def diff(self, m2, match=None, clean=False): '''Finds changes between the current manifest and m2. Args: @@ -538,6 +542,10 @@ the nodeid will be None and the flags will be the empty string. ''' + if match: + m1 = self.matches(match) + m2 = m2.matches(match) + return m1.diff(m2, clean=clean) return self._lm.diff(m2._lm, clean) def setflag(self, key, flag): @@ -906,8 +914,13 @@ copy._copyfunc = self._copyfunc return copy - def filesnotin(self, m2): + def filesnotin(self, m2, match=None): '''Set of files in this manifest that are not in the other''' + if match: + m1 = self.matches(match) + m2 = m2.matches(match) + return m1.filesnotin(m2) + files = set() def _filesnotin(t1, t2): if t1._node == t2._node and not t1._dirty and not t2._dirty: @@ -1025,7 +1038,7 @@ ret._dirty = True return ret - def diff(self, m2, clean=False): + def diff(self, m2, match=None, clean=False): '''Finds changes between the current manifest and m2. Args: @@ -1040,6 +1053,10 @@ the nodeid will be None and the flags will be the empty string. ''' + if match: + m1 = self.matches(match) + m2 = m2.matches(match) + return m1.diff(m2, clean=clean) result = {} emptytree = treemanifest() def _diff(t1, t2): @@ -1132,7 +1149,12 @@ '''A revlog that stores manifest texts. This is responsible for caching the full-text manifest contents. ''' - def __init__(self, opener, dir='', dirlogcache=None): + def __init__(self, opener, dir='', dirlogcache=None, indexfile=None): + """Constructs a new manifest revlog + + `indexfile` - used by extensions to have two manifests at once, like + when transitioning between flatmanifeset and treemanifests. + """ # During normal operations, we expect to deal with not more than four # revs at a time (such as during commit --amend). When rebasing large # stacks of commits, the number can go up, hence the config knob below. @@ -1150,12 +1172,16 @@ self._fulltextcache = util.lrucachedict(cachesize) - indexfile = "00manifest.i" if dir: assert self._treeondisk, 'opts is %r' % opts if not dir.endswith('/'): dir = dir + '/' - indexfile = "meta/" + dir + "00manifest.i" + + if indexfile is None: + indexfile = '00manifest.i' + if dir: + indexfile = "meta/" + dir + indexfile + self._dir = dir # The dirlogcache is kept on the root manifest log if dir: @@ -1224,7 +1250,7 @@ def _addtree(self, m, transaction, link, m1, m2, readtree): # If the manifest is unchanged compared to one parent, # don't write a new revision - if m.unmodifiedsince(m1) or m.unmodifiedsince(m2): + if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)): return m.node() def writesubtree(subm, subp1, subp2): sublog = self.dirlog(subm.dir()) @@ -1232,13 +1258,17 @@ readtree=readtree) m.writesubtrees(m1, m2, writesubtree) text = m.dirtext(self._usemanifestv2) - # Double-check whether contents are unchanged to one parent - if text == m1.dirtext(self._usemanifestv2): - n = m1.node() - elif text == m2.dirtext(self._usemanifestv2): - n = m2.node() - else: + n = None + if self._dir != '': + # Double-check whether contents are unchanged to one parent + if text == m1.dirtext(self._usemanifestv2): + n = m1.node() + elif text == m2.dirtext(self._usemanifestv2): + n = m2.node() + + if not n: n = self.addrevision(text, transaction, link, m1.node(), m2.node()) + # Save nodeid so parent manifest can calculate its nodeid m.setnode(n) return n @@ -1252,8 +1282,6 @@ class do not care about the implementation details of the actual manifests they receive (i.e. tree or flat or lazily loaded, etc).""" def __init__(self, opener, repo): - self._repo = repo - usetreemanifest = False cachesize = 4 @@ -1300,7 +1328,7 @@ if node not in dirlog.nodemap: raise LookupError(node, dirlog.indexfile, _('no node')) - m = treemanifestctx(self._repo, dir, node) + m = treemanifestctx(self, dir, node) else: raise error.Abort( _("cannot ask for manifest directory '%s' in a flat " @@ -1311,9 +1339,9 @@ raise LookupError(node, self._revlog.indexfile, _('no node')) if self._treeinmem: - m = treemanifestctx(self._repo, '', node) + m = treemanifestctx(self, '', node) else: - m = manifestctx(self._repo, node) + m = manifestctx(self, node) if node != revlog.nullid: mancache = self._dirmancache.get(dir) @@ -1328,18 +1356,18 @@ self._revlog.clearcaches() class memmanifestctx(object): - def __init__(self, repo): - self._repo = repo + def __init__(self, manifestlog): + self._manifestlog = manifestlog self._manifestdict = manifestdict() def _revlog(self): - return self._repo.manifestlog._revlog + return self._manifestlog._revlog def new(self): - return memmanifestctx(self._repo) + return memmanifestctx(self._manifestlog) def copy(self): - memmf = memmanifestctx(self._repo) + memmf = memmanifestctx(self._manifestlog) memmf._manifestdict = self.read().copy() return memmf @@ -1354,8 +1382,8 @@ """A class representing a single revision of a manifest, including its contents, its parent revs, and its linkrev. """ - def __init__(self, repo, node): - self._repo = repo + def __init__(self, manifestlog, node): + self._manifestlog = manifestlog self._data = None self._node = node @@ -1368,16 +1396,16 @@ #self.linkrev = revlog.linkrev(rev) def _revlog(self): - return self._repo.manifestlog._revlog + return self._manifestlog._revlog def node(self): return self._node def new(self): - return memmanifestctx(self._repo) + return memmanifestctx(self._manifestlog) def copy(self): - memmf = memmanifestctx(self._repo) + memmf = memmanifestctx(self._manifestlog) memmf._manifestdict = self.read().copy() return memmf @@ -1386,7 +1414,7 @@ return self._revlog().parents(self._node) def read(self): - if not self._data: + if self._data is None: if self._node == revlog.nullid: self._data = manifestdict() else: @@ -1422,7 +1450,7 @@ if revlog._usemanifestv2: # Need to perform a slow delta r0 = revlog.deltaparent(revlog.rev(self._node)) - m0 = self._repo.manifestlog[revlog.node(r0)].read() + m0 = self._manifestlog[revlog.node(r0)].read() m1 = self.read() md = manifestdict() for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems(): @@ -1440,19 +1468,19 @@ return self.read().find(key) class memtreemanifestctx(object): - def __init__(self, repo, dir=''): - self._repo = repo + def __init__(self, manifestlog, dir=''): + self._manifestlog = manifestlog self._dir = dir self._treemanifest = treemanifest() def _revlog(self): - return self._repo.manifestlog._revlog + return self._manifestlog._revlog def new(self, dir=''): - return memtreemanifestctx(self._repo, dir=dir) + return memtreemanifestctx(self._manifestlog, dir=dir) def copy(self): - memmf = memtreemanifestctx(self._repo, dir=self._dir) + memmf = memtreemanifestctx(self._manifestlog, dir=self._dir) memmf._treemanifest = self._treemanifest.copy() return memmf @@ -1461,13 +1489,13 @@ def write(self, transaction, link, p1, p2, added, removed): def readtree(dir, node): - return self._repo.manifestlog.get(dir, node).read() + return self._manifestlog.get(dir, node).read() return self._revlog().add(self._treemanifest, transaction, link, p1, p2, added, removed, readtree=readtree) class treemanifestctx(object): - def __init__(self, repo, dir, node): - self._repo = repo + def __init__(self, manifestlog, dir, node): + self._manifestlog = manifestlog self._dir = dir self._data = None @@ -1481,10 +1509,10 @@ #self.linkrev = revlog.linkrev(rev) def _revlog(self): - return self._repo.manifestlog._revlog.dirlog(self._dir) + return self._manifestlog._revlog.dirlog(self._dir) def read(self): - if not self._data: + if self._data is None: rl = self._revlog() if self._node == revlog.nullid: self._data = treemanifest() @@ -1495,8 +1523,7 @@ def readsubtree(dir, subm): # Set verify to False since we need to be able to create # subtrees for trees that don't exist on disk. - return self._repo.manifestlog.get(dir, subm, - verify=False).read() + return self._manifestlog.get(dir, subm, verify=False).read() m.read(gettext, readsubtree) m.setnode(self._node) self._data = m @@ -1512,10 +1539,10 @@ return self._node def new(self, dir=''): - return memtreemanifestctx(self._repo, dir=dir) + return memtreemanifestctx(self._manifestlog, dir=dir) def copy(self): - memmf = memtreemanifestctx(self._repo, dir=self._dir) + memmf = memtreemanifestctx(self._manifestlog, dir=self._dir) memmf._treemanifest = self.read().copy() return memmf @@ -1542,7 +1569,7 @@ else: # Need to perform a slow delta r0 = revlog.deltaparent(revlog.rev(self._node)) - m0 = self._repo.manifestlog.get(self._dir, revlog.node(r0)).read() + m0 = self._manifestlog.get(self._dir, revlog.node(r0)).read() m1 = self.read() md = treemanifest(dir=self._dir) for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
--- a/mercurial/match.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/match.py Sat Mar 11 13:53:14 2017 -0500 @@ -104,7 +104,10 @@ a pattern is one of: 'glob:<glob>' - a glob relative to cwd 're:<regexp>' - a regular expression - 'path:<path>' - a path relative to repository root + 'path:<path>' - a path relative to repository root, which is matched + recursively + 'rootfilesin:<path>' - a path relative to repository root, which is + matched non-recursively (will not match subdirectories) 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs) 'relpath:<path>' - a path relative to cwd 'relre:<regexp>' - a regexp that needn't match the start of a name @@ -122,9 +125,12 @@ self._always = False self._pathrestricted = bool(include or exclude or patterns) self._warn = warn + + # roots are directories which are recursively included/excluded. self._includeroots = set() + self._excluderoots = set() + # dirs are directories which are non-recursively included. self._includedirs = set(['.']) - self._excluderoots = set() if badfn is not None: self.bad = badfn @@ -134,14 +140,20 @@ kindpats = self._normalize(include, 'glob', root, cwd, auditor) self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)', listsubrepos, root) - self._includeroots.update(_roots(kindpats)) - self._includedirs.update(util.dirs(self._includeroots)) + roots, dirs = _rootsanddirs(kindpats) + self._includeroots.update(roots) + self._includedirs.update(dirs) matchfns.append(im) if exclude: kindpats = self._normalize(exclude, 'glob', root, cwd, auditor) self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)', listsubrepos, root) if not _anypats(kindpats): + # Only consider recursive excludes as such - if a non-recursive + # exclude is used, we must still recurse into the excluded + # directory, at least to find subdirectories. In such a case, + # the regex still won't match the non-recursively-excluded + # files. self._excluderoots.update(_roots(kindpats)) matchfns.append(lambda f: not em(f)) if exact: @@ -153,7 +165,7 @@ elif patterns: kindpats = self._normalize(patterns, default, root, cwd, auditor) if not _kindpatsalwaysmatch(kindpats): - self._files = _roots(kindpats) + self._files = _explicitfiles(kindpats) self._anypats = self._anypats or _anypats(kindpats) self.patternspat, pm = _buildmatch(ctx, kindpats, '$', listsubrepos, root) @@ -238,7 +250,7 @@ return 'all' if dir in self._excluderoots: return False - if (self._includeroots and + if ((self._includeroots or self._includedirs != set(['.'])) and '.' not in self._includeroots and dir not in self._includeroots and dir not in self._includedirs and @@ -286,7 +298,7 @@ for kind, pat in [_patsplit(p, default) for p in patterns]: if kind in ('glob', 'relpath'): pat = pathutil.canonpath(root, cwd, pat, auditor) - elif kind in ('relglob', 'path'): + elif kind in ('relglob', 'path', 'rootfilesin'): pat = util.normpath(pat) elif kind in ('listfile', 'listfile0'): try: @@ -419,7 +431,9 @@ # m.exact(file) must be based off of the actual user input, otherwise # inexact case matches are treated as exact, and not noted without -v. if self._files: - self._fileroots = set(_roots(self._kp)) + roots, dirs = _rootsanddirs(self._kp) + self._fileroots = set(roots) + self._fileroots.update(dirs) def _normalize(self, patterns, default, root, cwd, auditor): self._kp = super(icasefsmatcher, self)._normalize(patterns, default, @@ -447,7 +461,8 @@ if ':' in pattern: kind, pat = pattern.split(':', 1) if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre', - 'listfile', 'listfile0', 'set', 'include', 'subinclude'): + 'listfile', 'listfile0', 'set', 'include', 'subinclude', + 'rootfilesin'): return kind, pat return default, pattern @@ -540,6 +555,14 @@ if pat == '.': return '' return '^' + util.re.escape(pat) + '(?:/|$)' + if kind == 'rootfilesin': + if pat == '.': + escaped = '' + else: + # Pattern is a directory name. + escaped = util.re.escape(pat) + '/' + # Anything after the pattern must be a non-directory. + return '^' + escaped + '[^/]+$' if kind == 'relglob': return '(?:|.*/)' + _globre(pat) + globsuffix if kind == 'relpath': @@ -609,17 +632,16 @@ raise error.Abort(_("invalid pattern (%s): %s") % (k, p)) raise error.Abort(_("invalid pattern")) -def _roots(kindpats): - '''return roots and exact explicitly listed files from patterns +def _patternrootsanddirs(kindpats): + '''Returns roots and directories corresponding to each pattern. - >>> _roots([('glob', 'g/*', ''), ('glob', 'g', ''), ('glob', 'g*', '')]) - ['g', 'g', '.'] - >>> _roots([('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')]) - ['r', 'p/p', '.'] - >>> _roots([('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')]) - ['.', '.', '.'] + This calculates the roots and directories exactly matching the patterns and + returns a tuple of (roots, dirs) for each. It does not return other + directories which may also need to be considered, like the parent + directories. ''' r = [] + d = [] for kind, pat, source in kindpats: if kind == 'glob': # find the non-glob prefix root = [] @@ -630,13 +652,63 @@ r.append('/'.join(root) or '.') elif kind in ('relpath', 'path'): r.append(pat or '.') + elif kind in ('rootfilesin',): + d.append(pat or '.') else: # relglob, re, relre r.append('.') - return r + return r, d + +def _roots(kindpats): + '''Returns root directories to match recursively from the given patterns.''' + roots, dirs = _patternrootsanddirs(kindpats) + return roots + +def _rootsanddirs(kindpats): + '''Returns roots and exact directories from patterns. + + roots are directories to match recursively, whereas exact directories should + be matched non-recursively. The returned (roots, dirs) tuple will also + include directories that need to be implicitly considered as either, such as + parent directories. + + >>> _rootsanddirs(\ + [('glob', 'g/h/*', ''), ('glob', 'g/h', ''), ('glob', 'g*', '')]) + (['g/h', 'g/h', '.'], ['g']) + >>> _rootsanddirs(\ + [('rootfilesin', 'g/h', ''), ('rootfilesin', '', '')]) + ([], ['g/h', '.', 'g']) + >>> _rootsanddirs(\ + [('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')]) + (['r', 'p/p', '.'], ['p']) + >>> _rootsanddirs(\ + [('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')]) + (['.', '.', '.'], []) + ''' + r, d = _patternrootsanddirs(kindpats) + + # Append the parents as non-recursive/exact directories, since they must be + # scanned to get to either the roots or the other exact directories. + d.extend(util.dirs(d)) + d.extend(util.dirs(r)) + + return r, d + +def _explicitfiles(kindpats): + '''Returns the potential explicit filenames from the patterns. + + >>> _explicitfiles([('path', 'foo/bar', '')]) + ['foo/bar'] + >>> _explicitfiles([('rootfilesin', 'foo/bar', '')]) + [] + ''' + # Keep only the pattern kinds where one can specify filenames (vs only + # directory names). + filable = [kp for kp in kindpats if kp[0] not in ('rootfilesin',)] + return _roots(filable) def _anypats(kindpats): for kind, pat, source in kindpats: - if kind in ('glob', 're', 'relglob', 'relre', 'set'): + if kind in ('glob', 're', 'relglob', 'relre', 'set', 'rootfilesin'): return True _commentre = None
--- a/mercurial/mdiff.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/mdiff.py Sat Mar 11 13:53:14 2017 -0500 @@ -196,15 +196,23 @@ yield s1, '=' def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts): + """Return a unified diff as a (headers, hunks) tuple. + + If the diff is not null, `headers` is a list with unified diff header + lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding + (hunkrange, hunklines) coming from _unidiff(). + Otherwise, `headers` and `hunks` are empty. + """ def datetag(date, fn=None): if not opts.git and not opts.nodates: - return '\t%s\n' % date + return '\t%s' % date if fn and ' ' in fn: - return '\t\n' - return '\n' + return '\t' + return '' + sentinel = [], () if not a and not b: - return "" + return sentinel if opts.noprefix: aprefix = bprefix = '' @@ -217,10 +225,17 @@ fn1 = util.pconvert(fn1) fn2 = util.pconvert(fn2) + def checknonewline(lines): + for text in lines: + if text[-1] != '\n': + text += "\n\ No newline at end of file\n" + yield text + if not opts.text and (util.binary(a) or util.binary(b)): if a and b and len(a) == len(b) and a == b: - return "" - l = ['Binary file %s has changed\n' % fn1] + return sentinel + headerlines = [] + hunks = (None, ['Binary file %s has changed\n' % fn1]), elif not a: b = splitnewlines(b) if a is None: @@ -228,8 +243,11 @@ else: l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)) l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2)) - l3 = "@@ -0,0 +1,%d @@\n" % len(b) - l = [l1, l2, l3] + ["+" + e for e in b] + headerlines = [l1, l2] + size = len(b) + hunkrange = (0, 0, 1, size) + hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b] + hunks = (hunkrange, checknonewline(hunklines)), elif not b: a = splitnewlines(a) l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)) @@ -237,28 +255,42 @@ l2 = '+++ /dev/null%s' % datetag(epoch) else: l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)) - l3 = "@@ -1,%d +0,0 @@\n" % len(a) - l = [l1, l2, l3] + ["-" + e for e in a] + headerlines = [l1, l2] + size = len(a) + hunkrange = (1, size, 0, 0) + hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a] + hunks = (hunkrange, checknonewline(hunklines)), else: - al = splitnewlines(a) - bl = splitnewlines(b) - l = list(_unidiff(a, b, al, bl, opts=opts)) - if not l: - return "" + diffhunks = _unidiff(a, b, opts=opts) + try: + hunkrange, hunklines = next(diffhunks) + except StopIteration: + return sentinel - l.insert(0, "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))) - l.insert(1, "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))) + headerlines = [ + "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)), + "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)), + ] + def rewindhunks(): + yield hunkrange, checknonewline(hunklines) + for hr, hl in diffhunks: + yield hr, checknonewline(hl) - for ln in xrange(len(l)): - if l[ln][-1] != '\n': - l[ln] += "\n\ No newline at end of file\n" + hunks = rewindhunks() - return "".join(l) + return headerlines, hunks + +def _unidiff(t1, t2, opts=defaultopts): + """Yield hunks of a headerless unified diff from t1 and t2 texts. -# creates a headerless unified diff -# t1 and t2 are the text to be diffed -# l1 and l2 are the text broken up into lines -def _unidiff(t1, t2, l1, l2, opts=defaultopts): + Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a + tuple (s1, l1, s2, l2) representing the range information of the hunk to + form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines + of the hunk combining said header followed by line additions and + deletions. + """ + l1 = splitnewlines(t1) + l2 = splitnewlines(t2) def contextend(l, len): ret = l + opts.context if ret > len: @@ -300,12 +332,13 @@ if blen: bstart += 1 - yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen, - bstart, blen, func) - for x in delta: - yield x - for x in xrange(a2, aend): - yield ' ' + l1[x] + hunkrange = astart, alen, bstart, blen + hunklines = ( + ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))] + + delta + + [' ' + l1[x] for x in xrange(a2, aend)] + ) + yield hunkrange, hunklines # bdiff.blocks gives us the matching sequences in the files. The loop # below finds the spaces between those matching sequences and translates
--- a/mercurial/merge.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/merge.py Sat Mar 11 13:53:14 2017 -0500 @@ -27,6 +27,7 @@ copies, error, filemerge, + match as matchmod, obsolete, pycompat, scmutil, @@ -818,11 +819,10 @@ if any(wctx.sub(s).dirty() for s in wctx.substate): m1['.hgsubstate'] = modifiednodeid - # Compare manifests - if matcher is not None: - m1 = m1.matches(matcher) - m2 = m2.matches(matcher) - diff = m1.diff(m2) + diff = m1.diff(m2, match=matcher) + + if matcher is None: + matcher = matchmod.always('', '') actions = {} for f, ((n1, fl1), (n2, fl2)) in diff.iteritems(): @@ -858,7 +858,7 @@ pass # we'll deal with it on m2 side elif f in movewithdir: # directory rename, move local f2 = movewithdir[f] - if f2 in m2: + if matcher(f2) and f2 in m2: actions[f2] = ('m', (f, f2, None, True, pa.node()), "remote directory rename, both created") else: @@ -887,7 +887,7 @@ pass # we'll deal with it on m1 side elif f in movewithdir: f2 = movewithdir[f] - if f2 in m1: + if matcher(f2) and f2 in m1: actions[f2] = ('m', (f2, f, None, False, pa.node()), "local directory rename, both created") else: @@ -895,7 +895,7 @@ "local directory rename - get from " + f) elif f in copy: f2 = copy[f] - if f2 in m2: + if matcher(f2) and f2 in m2: actions[f] = ('m', (f2, f, f2, False, pa.node()), "remote copied from " + f2) else: @@ -927,7 +927,7 @@ # new file added in a directory that was moved df = dirmove[d] + f[len(d):] break - if df in m1: + if matcher(df) and df in m1: actions[df] = ('m', (df, f, f, False, pa.node()), "local directory rename - respect move from " + f) elif acceptremote: @@ -1444,11 +1444,12 @@ repo.dirstate.normal(f) def update(repo, node, branchmerge, force, ancestor=None, - mergeancestor=False, labels=None, matcher=None, mergeforce=False): + mergeancestor=False, labels=None, matcher=None, mergeforce=False, + updatecheck=None): """ Perform a merge between the working directory and the given node - node = the node to update to, or None if unspecified + node = the node to update to branchmerge = whether to merge between branches force = whether to force branch merging or file overwriting matcher = a matcher to filter file lists (dirstate not updated) @@ -1464,34 +1465,47 @@ The table below shows all the behaviors of the update command given the -c and -C or no options, whether the working directory is dirty, whether a revision is specified, and the relationship of - the parent rev to the target rev (linear, on the same named - branch, or on another named branch). + the parent rev to the target rev (linear or not). Match from top first. The + -n option doesn't exist on the command line, but represents the + experimental.updatecheck=noconflict option. This logic is tested by test-update-branches.t. - -c -C dirty rev | linear same cross - n n n n | ok (1) x - n n n y | ok ok ok - n n y n | merge (2) (2) - n n y y | merge (3) (3) - n y * * | discard discard discard - y n y * | (4) (4) (4) - y n n * | ok ok ok - y y * * | (5) (5) (5) + -c -C -n -m dirty rev linear | result + y y * * * * * | (1) + y * y * * * * | (1) + y * * y * * * | (1) + * y y * * * * | (1) + * y * y * * * | (1) + * * y y * * * | (1) + * * * * * n n | x + * * * * n * * | ok + n n n n y * y | merge + n n n n y y n | (2) + n n n y y * * | merge + n n y n y * * | merge if no conflict + n y n n y * * | discard + y n n n y * * | (3) x = can't happen * = don't-care - 1 = abort: not a linear update (merge or update --check to force update) - 2 = abort: uncommitted changes (commit and merge, or update --clean to - discard changes) - 3 = abort: uncommitted changes (commit or update --clean to discard changes) - 4 = abort: uncommitted changes (checked in commands.py) - 5 = incompatible options (checked in commands.py) + 1 = incompatible options (checked in commands.py) + 2 = abort: uncommitted changes (commit or update --clean to discard changes) + 3 = abort: uncommitted changes (checked in commands.py) Return the same tuple as applyupdates(). """ - onode = node + # This function used to find the default destination if node was None, but + # that's now in destutil.py. + assert node is not None + if not branchmerge and not force: + # TODO: remove the default once all callers that pass branchmerge=False + # and force=False pass a value for updatecheck. We may want to allow + # updatecheck='abort' to better suppport some of these callers. + if updatecheck is None: + updatecheck = 'linear' + assert updatecheck in ('none', 'linear', 'noconflict') # If we're doing a partial update, we need to skip updating # the dirstate, so make a note of any partial-ness to the # update here. @@ -1548,39 +1562,33 @@ repo.hook('update', parent1=xp2, parent2='', error=0) return 0, 0, 0, 0 - if pas not in ([p1], [p2]): # nonlinear + if (updatecheck == 'linear' and + pas not in ([p1], [p2])): # nonlinear dirty = wc.dirty(missing=True) - if dirty or onode is None: + if dirty: # Branching is a bit strange to ensure we do the minimal - # amount of call to obsolete.background. + # amount of call to obsolete.foreground. foreground = obsolete.foreground(repo, [p1.node()]) # note: the <node> variable contains a random identifier if repo[node].node() in foreground: - pas = [p1] # allow updating to successors - elif dirty: + pass # allow updating to successors + else: msg = _("uncommitted changes") - if onode is None: - hint = _("commit and merge, or update --clean to" - " discard changes") - else: - hint = _("commit or update --clean to discard" - " changes") - raise error.Abort(msg, hint=hint) - else: # node is none - msg = _("not a linear update") - hint = _("merge or update --check to force update") - raise error.Abort(msg, hint=hint) + hint = _("commit or update --clean to discard changes") + raise error.UpdateAbort(msg, hint=hint) else: # Allow jumping branches if clean and specific rev given - pas = [p1] + pass + + if overwrite: + pas = [wc] + elif not branchmerge: + pas = [p1] # deprecated config: merge.followcopies followcopies = repo.ui.configbool('merge', 'followcopies', True) if overwrite: - pas = [wc] followcopies = False - elif pas == [p2]: # backwards - pas = [p1] elif not pas[0]: followcopies = False if not branchmerge and not wc.dirty(missing=True): @@ -1591,6 +1599,13 @@ repo, wc, p2, pas, branchmerge, force, mergeancestor, followcopies, matcher=matcher, mergeforce=mergeforce) + if updatecheck == 'noconflict': + for f, (m, args, msg) in actionbyfile.iteritems(): + if m not in ('g', 'k', 'r'): + msg = _("conflicting changes") + hint = _("commit or update --clean to discard changes") + raise error.Abort(msg, hint=hint) + # Prompt and create actions. Most of this is in the resolve phase # already, but we can't handle .hgsubstate in filemerge or # subrepo.submerge yet so we have to keep prompting for it.
--- a/mercurial/minirst.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/minirst.py Sat Mar 11 13:53:14 2017 -0500 @@ -138,7 +138,7 @@ i += 1 return blocks -_bulletre = re.compile(r'(-|[0-9A-Za-z]+\.|\(?[0-9A-Za-z]+\)|\|) ') +_bulletre = re.compile(r'(\*|-|[0-9A-Za-z]+\.|\(?[0-9A-Za-z]+\)|\|) ') _optionre = re.compile(r'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)' r'((.*) +)(.*)$') _fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):[ ]+(.*)') @@ -411,18 +411,33 @@ i += 1 return blocks -_admonitionre = re.compile(r"\.\. (admonition|attention|caution|danger|" - r"error|hint|important|note|tip|warning)::", - flags=re.IGNORECASE) -def findadmonitions(blocks): +_admonitions = set([ + 'admonition', + 'attention', + 'caution', + 'danger', + 'error', + 'hint', + 'important', + 'note', + 'tip', + 'warning', +]) + +def findadmonitions(blocks, admonitions=None): """ Makes the type of the block an admonition block if the first line is an admonition directive """ + admonitions = admonitions or _admonitions + + admonitionre = re.compile(r'\.\. (%s)::' % '|'.join(sorted(admonitions)), + flags=re.IGNORECASE) + i = 0 while i < len(blocks): - m = _admonitionre.match(blocks[i]['lines'][0]) + m = admonitionre.match(blocks[i]['lines'][0]) if m: blocks[i]['type'] = 'admonition' admonitiontitle = blocks[i]['lines'][0][3:m.end() - 2].lower() @@ -596,7 +611,7 @@ out.append(' <dt>%s\n <dd>%s\n' % (term, text)) elif btype == 'bullet': bullet, head = lines[0].split(' ', 1) - if bullet == '-': + if bullet in ('*', '-'): openlist('ul', level) else: openlist('ol', level) @@ -629,7 +644,7 @@ return ''.join(out) -def parse(text, indent=0, keep=None): +def parse(text, indent=0, keep=None, admonitions=None): """Parse text into a list of blocks""" pruned = [] blocks = findblocks(text) @@ -644,7 +659,7 @@ blocks = splitparagraphs(blocks) blocks = updatefieldlists(blocks) blocks = updateoptionlists(blocks) - blocks = findadmonitions(blocks) + blocks = findadmonitions(blocks, admonitions=admonitions) blocks = addmargins(blocks) blocks = prunecomments(blocks) return blocks, pruned
--- a/mercurial/obsolete.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/obsolete.py Sat Mar 11 13:53:14 2017 -0500 @@ -1120,7 +1120,7 @@ """the set of obsolete revisions""" obs = set() getnode = repo.changelog.node - notpublic = repo.revs("not public()") + notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret)) for r in notpublic: if getnode(r) in repo.obsstore.successors: obs.add(r)
--- a/mercurial/parsers.c Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/parsers.c Sat Mar 11 13:53:14 2017 -0500 @@ -560,11 +560,11 @@ } /* - * Build a set of non-normal entries from the dirstate dmap + * Build a set of non-normal and other parent entries from the dirstate dmap */ -static PyObject *nonnormalentries(PyObject *self, PyObject *args) -{ - PyObject *dmap, *nonnset = NULL, *fname, *v; +static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args) { + PyObject *dmap, *fname, *v; + PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL; Py_ssize_t pos; if (!PyArg_ParseTuple(args, "O!:nonnormalentries", @@ -575,6 +575,10 @@ if (nonnset == NULL) goto bail; + otherpset = PySet_New(NULL); + if (otherpset == NULL) + goto bail; + pos = 0; while (PyDict_Next(dmap, &pos, &fname, &v)) { dirstateTupleObject *t; @@ -585,15 +589,28 @@ } t = (dirstateTupleObject *)v; + if (t->state == 'n' && t->size == -2) { + if (PySet_Add(otherpset, fname) == -1) { + goto bail; + } + } + if (t->state == 'n' && t->mtime != -1) continue; if (PySet_Add(nonnset, fname) == -1) goto bail; } - return nonnset; + result = Py_BuildValue("(OO)", nonnset, otherpset); + if (result == NULL) + goto bail; + Py_DECREF(nonnset); + Py_DECREF(otherpset); + return result; bail: Py_XDECREF(nonnset); + Py_XDECREF(otherpset); + Py_XDECREF(result); return NULL; } @@ -2814,8 +2831,9 @@ static PyMethodDef methods[] = { {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"}, - {"nonnormalentries", nonnormalentries, METH_VARARGS, - "create a set containing non-normal entries of given dirstate\n"}, + {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS, + "create a set containing non-normal and other parent entries of given " + "dirstate\n"}, {"parse_manifest", parse_manifest, METH_VARARGS, "parse a manifest\n"}, {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"}, {"parse_index2", parse_index2, METH_VARARGS, "parse a revlog index\n"},
--- a/mercurial/patch.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/patch.py Sat Mar 11 13:53:14 2017 -0500 @@ -34,9 +34,11 @@ mail, mdiff, pathutil, + pycompat, scmutil, similar, util, + vfs as vfsmod, ) stringio = util.stringio @@ -209,7 +211,7 @@ data = {} fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') - tmpfp = os.fdopen(fd, 'w') + tmpfp = os.fdopen(fd, pycompat.sysstr('w')) try: msg = email.Parser.Parser().parse(fileobj) @@ -448,7 +450,7 @@ class fsbackend(abstractbackend): def __init__(self, ui, basedir): super(fsbackend, self).__init__(ui) - self.opener = scmutil.opener(basedir) + self.opener = vfsmod.vfs(basedir) def _join(self, f): return os.path.join(self.opener.base, f) @@ -559,7 +561,7 @@ else: if self.opener is None: root = tempfile.mkdtemp(prefix='hg-patch-') - self.opener = scmutil.opener(root) + self.opener = vfsmod.vfs(root) # Avoid filename issues with these simple names fn = str(self.created) self.opener.write(fn, data) @@ -1055,7 +1057,7 @@ ncpatchfp = None try: # Write the initial patch - f = os.fdopen(patchfd, "w") + f = os.fdopen(patchfd, pycompat.sysstr("w")) chunk.header.write(f) chunk.write(f) f.write('\n'.join(['# ' + i for i in phelp.splitlines()])) @@ -1063,7 +1065,8 @@ # Start the editor and wait for it to complete editor = ui.geteditor() ret = ui.system("%s \"%s\"" % (editor, patchfn), - environ={'HGUSER': ui.username()}) + environ={'HGUSER': ui.username()}, + blockedtag='filterpatch') if ret != 0: ui.warn(_("editor exited with exit code %d\n") % ret) continue @@ -2212,8 +2215,8 @@ return mdiff.diffopts(**buildopts) -def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None, - losedatafn=None, prefix='', relroot='', copy=None): +def diff(repo, node1=None, node2=None, match=None, changes=None, + opts=None, losedatafn=None, prefix='', relroot='', copy=None): '''yields diff of changes to files between two nodes, or node and working directory. @@ -2236,6 +2239,24 @@ copy, if not empty, should contain mappings {dst@y: src@x} of copy information.''' + for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match, + changes=changes, opts=opts, + losedatafn=losedatafn, prefix=prefix, + relroot=relroot, copy=copy): + text = ''.join(sum((list(hlines) for hrange, hlines in hunks), [])) + if header and (text or len(header) > 1): + yield '\n'.join(header) + '\n' + if text: + yield text + +def diffhunks(repo, node1=None, node2=None, match=None, changes=None, + opts=None, losedatafn=None, prefix='', relroot='', copy=None): + """Yield diff of changes to files in the form of (`header`, `hunks`) tuples + where `header` is a list of diff headers and `hunks` is an iterable of + (`hunkrange`, `hunklines`) tuples. + + See diff() for the meaning of parameters. + """ if opts is None: opts = mdiff.defaultopts @@ -2536,6 +2557,7 @@ if text: header.append('index %s..%s' % (gitindex(content1), gitindex(content2))) + hunks = (None, [text]), else: if opts.git and opts.index > 0: flag = flag1 @@ -2546,13 +2568,11 @@ gitindex(content2)[0:opts.index], gitmode[flag])) - text = mdiff.unidiff(content1, date1, - content2, date2, - path1, path2, opts=opts) - if header and (text or len(header) > 1): - yield '\n'.join(header) + '\n' - if text: - yield text + uheaders, hunks = mdiff.unidiff(content1, date1, + content2, date2, + path1, path2, opts=opts) + header.extend(uheaders) + yield header, hunks def diffstatsum(stats): maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
--- a/mercurial/phases.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/phases.py Sat Mar 11 13:53:14 2017 -0500 @@ -113,8 +113,9 @@ short, ) from . import ( - encoding, error, + smartset, + txnutil, ) allphases = public, draft, secret = range(3) @@ -136,15 +137,7 @@ dirty = False roots = [set() for i in allphases] try: - f = None - if 'HG_PENDING' in encoding.environ: - try: - f = repo.svfs('phaseroots.pending') - except IOError as inst: - if inst.errno != errno.ENOENT: - raise - if f is None: - f = repo.svfs('phaseroots') + f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots') try: for line in f: phase, nh = line.split() @@ -170,6 +163,27 @@ self.filterunknown(repo) self.opener = repo.svfs + def getrevset(self, repo, phases): + """return a smartset for the given phases""" + self.loadphaserevs(repo) # ensure phase's sets are loaded + + if self._phasesets and all(self._phasesets[p] is not None + for p in phases): + # fast path - use _phasesets + revs = self._phasesets[phases[0]] + if len(phases) > 1: + revs = revs.copy() # only copy when needed + for p in phases[1:]: + revs.update(self._phasesets[p]) + if repo.changelog.filteredrevs: + revs = revs - repo.changelog.filteredrevs + return smartset.baseset(revs) + else: + # slow path - enumerate all revisions + phase = self.phase + revs = (r for r in repo if phase(repo, r) in phases) + return smartset.generatorset(revs, iterasc=True) + def copy(self): # Shallow copy meant to ensure isolation in # advance/retractboundary(), nothing more. @@ -210,12 +224,8 @@ """ensure phase information is loaded in the object""" if self._phaserevs is None: try: - if repo.ui.configbool('experimental', - 'nativephaseskillswitch'): - self._computephaserevspure(repo) - else: - res = self._getphaserevsnative(repo) - self._phaserevs, self._phasesets = res + res = self._getphaserevsnative(repo) + self._phaserevs, self._phasesets = res except AttributeError: self._computephaserevspure(repo)
--- a/mercurial/profiling.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/profiling.py Sat Mar 11 13:53:14 2017 -0500 @@ -8,7 +8,6 @@ from __future__ import absolute_import, print_function import contextlib -import time from .i18n import _ from . import ( @@ -66,7 +65,7 @@ collapse_recursion = True thread = flamegraph.ProfileThread(fp, 1.0 / freq, filter_, collapse_recursion) - start_time = time.clock() + start_time = util.timer() try: thread.start() yield @@ -74,7 +73,7 @@ thread.stop() thread.join() print('Collected %d stack frames (%d unique) in %2.2f seconds.' % ( - time.clock() - start_time, thread.num_frames(), + util.timer() - start_time, thread.num_frames(), thread.num_frames(unique=True))) @contextlib.contextmanager @@ -103,6 +102,7 @@ 'bymethod': statprof.DisplayFormats.ByMethod, 'hotpath': statprof.DisplayFormats.Hotpath, 'json': statprof.DisplayFormats.Json, + 'chrome': statprof.DisplayFormats.Chrome, } if profformat in formats: @@ -111,7 +111,23 @@ ui.warn(_('unknown profiler output format: %s\n') % profformat) displayformat = statprof.DisplayFormats.Hotpath - statprof.display(fp, data=data, format=displayformat) + kwargs = {} + + def fraction(s): + if s.endswith('%'): + v = float(s[:-1]) / 100 + else: + v = float(s) + if 0 <= v <= 1: + return v + raise ValueError(s) + + if profformat == 'chrome': + showmin = ui.configwith(fraction, 'profiling', 'showmin', 0.005) + showmax = ui.configwith(fraction, 'profiling', 'showmax', 0.999) + kwargs.update(minthreshold=showmin, maxthreshold=showmax) + + statprof.display(fp, data=data, format=displayformat, **kwargs) @contextlib.contextmanager def profile(ui):
--- a/mercurial/pure/osutil.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/pure/osutil.py Sat Mar 11 13:53:14 2017 -0500 @@ -338,7 +338,7 @@ _kernel32.CloseHandle(fh) _raiseioerror(name) - f = os.fdopen(fd, mode, bufsize) + f = os.fdopen(fd, pycompat.sysstr(mode), bufsize) # unfortunately, f.name is '<fdopen>' at this point -- so we store # the name on this wrapper. We cannot just assign to f.name, # because that attribute is read-only.
--- a/mercurial/pure/parsers.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/pure/parsers.py Sat Mar 11 13:53:14 2017 -0500 @@ -14,6 +14,9 @@ from . import pycompat stringio = pycompat.stringio +if pycompat.ispy3: + long = int + _pack = struct.pack _unpack = struct.unpack _compress = zlib.compress
--- a/mercurial/pycompat.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/pycompat.py Sat Mar 11 13:53:14 2017 -0500 @@ -72,6 +72,9 @@ if getattr(sys, 'argv', None) is not None: sysargv = list(map(os.fsencode, sys.argv)) + def bytechr(i): + return bytes([i]) + def sysstr(s): """Return a keyword str to be passed to Python functions such as getattr() and str.encode() @@ -97,6 +100,9 @@ setattr = _wrapattrfunc(builtins.setattr) xrange = builtins.range + def open(name, mode='r', buffering=-1): + return builtins.open(name, sysstr(mode), buffering) + # getopt.getopt() on Python 3 deals with unicodes internally so we cannot # pass bytes there. Passing unicodes will result in unicodes as return # values which we need to convert again to bytes. @@ -132,6 +138,8 @@ return [a.encode('latin-1') for a in ret] else: + bytechr = chr + def sysstr(s): return s
--- a/mercurial/repair.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/repair.py Sat Mar 11 13:53:14 2017 -0500 @@ -12,7 +12,6 @@ import hashlib import stat import tempfile -import time from .i18n import _ from .node import short @@ -27,6 +26,7 @@ revlog, scmutil, util, + vfs as vfsmod, ) def _bundle(repo, bases, heads, node, suffix, compress=True): @@ -883,7 +883,7 @@ ui.write(_('data fully migrated to temporary repository\n')) backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path) - backupvfs = scmutil.vfs(backuppath) + backupvfs = vfsmod.vfs(backuppath) # Make a backup of requires file first, as it is the first to be modified. util.copyfile(srcrepo.join('requires'), backupvfs.join('requires')) @@ -905,10 +905,10 @@ # the operation nearly instantaneous and atomic (at least in well-behaved # environments). ui.write(_('replacing store...\n')) - tstart = time.time() + tstart = util.timer() util.rename(srcrepo.spath, backupvfs.join('store')) util.rename(dstrepo.spath, srcrepo.spath) - elapsed = time.time() - tstart + elapsed = util.timer() - tstart ui.write(_('store replacement complete; repository was inconsistent for ' '%0.1fs\n') % elapsed)
--- a/mercurial/repoview.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/repoview.py Sat Mar 11 13:53:14 2017 -0500 @@ -139,15 +139,13 @@ if wlock: wlock.release() -def tryreadcache(repo, hideable): - """read a cache if the cache exists and is valid, otherwise returns None.""" +def _readhiddencache(repo, cachefilename, newhash): hidden = fh = None try: if repo.vfs.exists(cachefile): fh = repo.vfs.open(cachefile, 'rb') version, = struct.unpack(">H", fh.read(2)) oldhash = fh.read(20) - newhash = cachehash(repo, hideable) if (cacheversion, oldhash) == (version, newhash): # cache is valid, so we can start reading the hidden revs data = fh.read() @@ -165,6 +163,11 @@ if fh: fh.close() +def tryreadcache(repo, hideable): + """read a cache if the cache exists and is valid, otherwise returns None.""" + newhash = cachehash(repo, hideable) + return _readhiddencache(repo, cachefile, newhash) + def computehidden(repo): """compute the set of hidden revision to filter @@ -297,10 +300,10 @@ """ def __init__(self, repo, filtername): - object.__setattr__(self, '_unfilteredrepo', repo) - object.__setattr__(self, 'filtername', filtername) - object.__setattr__(self, '_clcachekey', None) - object.__setattr__(self, '_clcache', None) + object.__setattr__(self, r'_unfilteredrepo', repo) + object.__setattr__(self, r'filtername', filtername) + object.__setattr__(self, r'_clcachekey', None) + object.__setattr__(self, r'_clcache', None) # not a propertycache on purpose we shall implement a proper cache later @property
--- a/mercurial/revset.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/revset.py Sat Mar 11 13:53:14 2017 -0500 @@ -9,7 +9,6 @@ import heapq import re -import string from .i18n import _ from . import ( @@ -20,15 +19,34 @@ match as matchmod, node, obsolete as obsmod, - parser, pathutil, phases, - pycompat, registrar, repoview, + revsetlang, + smartset, util, ) +# helpers for processing parsed tree +getsymbol = revsetlang.getsymbol +getstring = revsetlang.getstring +getinteger = revsetlang.getinteger +getlist = revsetlang.getlist +getrange = revsetlang.getrange +getargs = revsetlang.getargs +getargsdict = revsetlang.getargsdict + +# constants used as an argument of match() and matchany() +anyorder = revsetlang.anyorder +defineorder = revsetlang.defineorder +followorder = revsetlang.followorder + +baseset = smartset.baseset +generatorset = smartset.generatorset +spanset = smartset.spanset +fullreposet = smartset.fullreposet + def _revancestors(repo, revs, followfirst): """Like revlog.ancestors(), but supports followfirst.""" if followfirst: @@ -146,213 +164,8 @@ revs.sort() return revs -elements = { - # token-type: binding-strength, primary, prefix, infix, suffix - "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None), - "##": (20, None, None, ("_concat", 20), None), - "~": (18, None, None, ("ancestor", 18), None), - "^": (18, None, None, ("parent", 18), "parentpost"), - "-": (5, None, ("negate", 19), ("minus", 5), None), - "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"), - "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"), - ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"), - "not": (10, None, ("not", 10), None, None), - "!": (10, None, ("not", 10), None, None), - "and": (5, None, None, ("and", 5), None), - "&": (5, None, None, ("and", 5), None), - "%": (5, None, None, ("only", 5), "onlypost"), - "or": (4, None, None, ("or", 4), None), - "|": (4, None, None, ("or", 4), None), - "+": (4, None, None, ("or", 4), None), - "=": (3, None, None, ("keyvalue", 3), None), - ",": (2, None, None, ("list", 2), None), - ")": (0, None, None, None, None), - "symbol": (0, "symbol", None, None, None), - "string": (0, "string", None, None, None), - "end": (0, None, None, None, None), -} - -keywords = set(['and', 'or', 'not']) - -# default set of valid characters for the initial letter of symbols -_syminitletters = set( - string.ascii_letters + - string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256))) - -# default set of valid characters for non-initial letters of symbols -_symletters = _syminitletters | set(pycompat.sysstr('-/')) - -def tokenize(program, lookup=None, syminitletters=None, symletters=None): - ''' - Parse a revset statement into a stream of tokens - - ``syminitletters`` is the set of valid characters for the initial - letter of symbols. - - By default, character ``c`` is recognized as valid for initial - letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``. - - ``symletters`` is the set of valid characters for non-initial - letters of symbols. - - By default, character ``c`` is recognized as valid for non-initial - letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``. - - Check that @ is a valid unquoted token character (issue3686): - >>> list(tokenize("@::")) - [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)] - - ''' - if syminitletters is None: - syminitletters = _syminitletters - if symletters is None: - symletters = _symletters - - if program and lookup: - # attempt to parse old-style ranges first to deal with - # things like old-tag which contain query metacharacters - parts = program.split(':', 1) - if all(lookup(sym) for sym in parts if sym): - if parts[0]: - yield ('symbol', parts[0], 0) - if len(parts) > 1: - s = len(parts[0]) - yield (':', None, s) - if parts[1]: - yield ('symbol', parts[1], s + 1) - yield ('end', None, len(program)) - return - - pos, l = 0, len(program) - while pos < l: - c = program[pos] - if c.isspace(): # skip inter-token whitespace - pass - elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully - yield ('::', None, pos) - pos += 1 # skip ahead - elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully - yield ('..', None, pos) - pos += 1 # skip ahead - elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully - yield ('##', None, pos) - pos += 1 # skip ahead - elif c in "():=,-|&+!~^%": # handle simple operators - yield (c, None, pos) - elif (c in '"\'' or c == 'r' and - program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings - if c == 'r': - pos += 1 - c = program[pos] - decode = lambda x: x - else: - decode = parser.unescapestr - pos += 1 - s = pos - while pos < l: # find closing quote - d = program[pos] - if d == '\\': # skip over escaped characters - pos += 2 - continue - if d == c: - yield ('string', decode(program[s:pos]), s) - break - pos += 1 - else: - raise error.ParseError(_("unterminated string"), s) - # gather up a symbol/keyword - elif c in syminitletters: - s = pos - pos += 1 - while pos < l: # find end of symbol - d = program[pos] - if d not in symletters: - break - if d == '.' and program[pos - 1] == '.': # special case for .. - pos -= 1 - break - pos += 1 - sym = program[s:pos] - if sym in keywords: # operator keywords - yield (sym, None, s) - elif '-' in sym: - # some jerk gave us foo-bar-baz, try to check if it's a symbol - if lookup and lookup(sym): - # looks like a real symbol - yield ('symbol', sym, s) - else: - # looks like an expression - parts = sym.split('-') - for p in parts[:-1]: - if p: # possible consecutive - - yield ('symbol', p, s) - s += len(p) - yield ('-', None, pos) - s += 1 - if parts[-1]: # possible trailing - - yield ('symbol', parts[-1], s) - else: - yield ('symbol', sym, s) - pos -= 1 - else: - raise error.ParseError(_("syntax error in revset '%s'") % - program, pos) - pos += 1 - yield ('end', None, pos) - # helpers -_notset = object() - -def getsymbol(x): - if x and x[0] == 'symbol': - return x[1] - raise error.ParseError(_('not a symbol')) - -def getstring(x, err): - if x and (x[0] == 'string' or x[0] == 'symbol'): - return x[1] - raise error.ParseError(err) - -def getinteger(x, err, default=_notset): - if not x and default is not _notset: - return default - try: - return int(getstring(x, err)) - except ValueError: - raise error.ParseError(err) - -def getlist(x): - if not x: - return [] - if x[0] == 'list': - return list(x[1:]) - return [x] - -def getrange(x, err): - if not x: - raise error.ParseError(err) - op = x[0] - if op == 'range': - return x[1], x[2] - elif op == 'rangepre': - return None, x[1] - elif op == 'rangepost': - return x[1], None - elif op == 'rangeall': - return None, None - raise error.ParseError(err) - -def getargs(x, min, max, err): - l = getlist(x) - if len(l) < min or (max >= 0 and len(l) > max): - raise error.ParseError(err) - return l - -def getargsdict(x, funcname, keys): - return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys), - keyvaluenode='keyvalue', keynode='symbol') - def getset(repo, subset, x): if not x: raise error.ParseError(_("missing argument")) @@ -501,7 +314,7 @@ @predicate('_destupdate') def _destupdate(repo, subset, x): # experimental revset for update destination - args = getargsdict(x, 'limit', 'clean check') + args = getargsdict(x, 'limit', 'clean') return subset & baseset([destutil.destupdate(repo, **args)[0]]) @predicate('_destmerge') @@ -1139,7 +952,8 @@ fromline -= 1 fctx = repo[rev].filectx(fname) - revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline)) + revs = (c.rev() for c, _linerange + in context.blockancestors(fctx, fromline, toline)) return subset & generatorset(revs, iterasc=False) @predicate('all()', safe=True) @@ -1638,19 +1452,10 @@ ps -= set([node.nullrev]) return subset & ps -def _phase(repo, subset, target): - """helper to select all rev in phase <target>""" - repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded - if repo._phasecache._phasesets: - s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs - s = baseset(s) - s.sort() # set are non ordered, so we enforce ascending - return subset & s - else: - phase = repo._phasecache.phase - condition = lambda r: phase(repo, r) == target - return subset.filter(condition, condrepr=('<phase %r>', target), - cache=False) +def _phase(repo, subset, *targets): + """helper to select all rev in <targets> phases""" + s = repo._phasecache.getrevset(repo, targets) + return subset & s @predicate('draft()', safe=True) def draft(repo, subset, x): @@ -1711,20 +1516,7 @@ @predicate('_notpublic', safe=True) def _notpublic(repo, subset, x): getargs(x, 0, 0, "_notpublic takes no arguments") - repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded - if repo._phasecache._phasesets: - s = set() - for u in repo._phasecache._phasesets[1:]: - s.update(u) - s = baseset(s - repo.changelog.filteredrevs) - s.sort() - return subset & s - else: - phase = repo._phasecache.phase - target = phases.public - condition = lambda r: phase(repo, r) != target - return subset.filter(condition, condrepr=('<phase %r>', target), - cache=False) + return _phase(repo, subset, phases.draft, phases.secret) @predicate('public()', safe=True) def public(repo, subset, x): @@ -2428,350 +2220,6 @@ "parentpost": parentpost, } -# Constants for ordering requirement, used in _analyze(): -# -# If 'define', any nested functions and operations can change the ordering of -# the entries in the set. If 'follow', any nested functions and operations -# should take the ordering specified by the first operand to the '&' operator. -# -# For instance, -# -# X & (Y | Z) -# ^ ^^^^^^^ -# | follow -# define -# -# will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order -# of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't. -# -# 'any' means the order doesn't matter. For instance, -# -# X & !Y -# ^ -# any -# -# 'y()' can either enforce its ordering requirement or take the ordering -# specified by 'x()' because 'not()' doesn't care the order. -# -# Transition of ordering requirement: -# -# 1. starts with 'define' -# 2. shifts to 'follow' by 'x & y' -# 3. changes back to 'define' on function call 'f(x)' or function-like -# operation 'x (f) y' because 'f' may have its own ordering requirement -# for 'x' and 'y' (e.g. 'first(x)') -# -anyorder = 'any' # don't care the order -defineorder = 'define' # should define the order -followorder = 'follow' # must follow the current order - -# transition table for 'x & y', from the current expression 'x' to 'y' -_tofolloworder = { - anyorder: anyorder, - defineorder: followorder, - followorder: followorder, -} - -def _matchonly(revs, bases): - """ - >>> f = lambda *args: _matchonly(*map(parse, args)) - >>> f('ancestors(A)', 'not ancestors(B)') - ('list', ('symbol', 'A'), ('symbol', 'B')) - """ - if (revs is not None - and revs[0] == 'func' - and getsymbol(revs[1]) == 'ancestors' - and bases is not None - and bases[0] == 'not' - and bases[1][0] == 'func' - and getsymbol(bases[1][1]) == 'ancestors'): - return ('list', revs[2], bases[1][2]) - -def _fixops(x): - """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be - handled well by our simple top-down parser""" - if not isinstance(x, tuple): - return x - - op = x[0] - if op == 'parent': - # x^:y means (x^) : y, not x ^ (:y) - # x^: means (x^) :, not x ^ (:) - post = ('parentpost', x[1]) - if x[2][0] == 'dagrangepre': - return _fixops(('dagrange', post, x[2][1])) - elif x[2][0] == 'rangepre': - return _fixops(('range', post, x[2][1])) - elif x[2][0] == 'rangeall': - return _fixops(('rangepost', post)) - elif op == 'or': - # make number of arguments deterministic: - # x + y + z -> (or x y z) -> (or (list x y z)) - return (op, _fixops(('list',) + x[1:])) - - return (op,) + tuple(_fixops(y) for y in x[1:]) - -def _analyze(x, order): - if x is None: - return x - - op = x[0] - if op == 'minus': - return _analyze(('and', x[1], ('not', x[2])), order) - elif op == 'only': - t = ('func', ('symbol', 'only'), ('list', x[1], x[2])) - return _analyze(t, order) - elif op == 'onlypost': - return _analyze(('func', ('symbol', 'only'), x[1]), order) - elif op == 'dagrangepre': - return _analyze(('func', ('symbol', 'ancestors'), x[1]), order) - elif op == 'dagrangepost': - return _analyze(('func', ('symbol', 'descendants'), x[1]), order) - elif op == 'negate': - s = getstring(x[1], _("can't negate that")) - return _analyze(('string', '-' + s), order) - elif op in ('string', 'symbol'): - return x - elif op == 'and': - ta = _analyze(x[1], order) - tb = _analyze(x[2], _tofolloworder[order]) - return (op, ta, tb, order) - elif op == 'or': - return (op, _analyze(x[1], order), order) - elif op == 'not': - return (op, _analyze(x[1], anyorder), order) - elif op == 'rangeall': - return (op, None, order) - elif op in ('rangepre', 'rangepost', 'parentpost'): - return (op, _analyze(x[1], defineorder), order) - elif op == 'group': - return _analyze(x[1], order) - elif op in ('dagrange', 'range', 'parent', 'ancestor'): - ta = _analyze(x[1], defineorder) - tb = _analyze(x[2], defineorder) - return (op, ta, tb, order) - elif op == 'list': - return (op,) + tuple(_analyze(y, order) for y in x[1:]) - elif op == 'keyvalue': - return (op, x[1], _analyze(x[2], order)) - elif op == 'func': - f = getsymbol(x[1]) - d = defineorder - if f == 'present': - # 'present(set)' is known to return the argument set with no - # modification, so forward the current order to its argument - d = order - return (op, x[1], _analyze(x[2], d), order) - raise ValueError('invalid operator %r' % op) - -def analyze(x, order=defineorder): - """Transform raw parsed tree to evaluatable tree which can be fed to - optimize() or getset() - - All pseudo operations should be mapped to real operations or functions - defined in methods or symbols table respectively. - - 'order' specifies how the current expression 'x' is ordered (see the - constants defined above.) - """ - return _analyze(x, order) - -def _optimize(x, small): - if x is None: - return 0, x - - smallbonus = 1 - if small: - smallbonus = .5 - - op = x[0] - if op in ('string', 'symbol'): - return smallbonus, x # single revisions are small - elif op == 'and': - wa, ta = _optimize(x[1], True) - wb, tb = _optimize(x[2], True) - order = x[3] - w = min(wa, wb) - - # (::x and not ::y)/(not ::y and ::x) have a fast path - tm = _matchonly(ta, tb) or _matchonly(tb, ta) - if tm: - return w, ('func', ('symbol', 'only'), tm, order) - - if tb is not None and tb[0] == 'not': - return wa, ('difference', ta, tb[1], order) - - if wa > wb: - return w, (op, tb, ta, order) - return w, (op, ta, tb, order) - elif op == 'or': - # fast path for machine-generated expression, that is likely to have - # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()' - order = x[2] - ws, ts, ss = [], [], [] - def flushss(): - if not ss: - return - if len(ss) == 1: - w, t = ss[0] - else: - s = '\0'.join(t[1] for w, t in ss) - y = ('func', ('symbol', '_list'), ('string', s), order) - w, t = _optimize(y, False) - ws.append(w) - ts.append(t) - del ss[:] - for y in getlist(x[1]): - w, t = _optimize(y, False) - if t is not None and (t[0] == 'string' or t[0] == 'symbol'): - ss.append((w, t)) - continue - flushss() - ws.append(w) - ts.append(t) - flushss() - if len(ts) == 1: - return ws[0], ts[0] # 'or' operation is fully optimized out - # we can't reorder trees by weight because it would change the order. - # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a") - # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0])) - return max(ws), (op, ('list',) + tuple(ts), order) - elif op == 'not': - # Optimize not public() to _notpublic() because we have a fast version - if x[1][:3] == ('func', ('symbol', 'public'), None): - order = x[1][3] - newsym = ('func', ('symbol', '_notpublic'), None, order) - o = _optimize(newsym, not small) - return o[0], o[1] - else: - o = _optimize(x[1], not small) - order = x[2] - return o[0], (op, o[1], order) - elif op == 'rangeall': - return smallbonus, x - elif op in ('rangepre', 'rangepost', 'parentpost'): - o = _optimize(x[1], small) - order = x[2] - return o[0], (op, o[1], order) - elif op in ('dagrange', 'range', 'parent', 'ancestor'): - wa, ta = _optimize(x[1], small) - wb, tb = _optimize(x[2], small) - order = x[3] - return wa + wb, (op, ta, tb, order) - elif op == 'list': - ws, ts = zip(*(_optimize(y, small) for y in x[1:])) - return sum(ws), (op,) + ts - elif op == 'keyvalue': - w, t = _optimize(x[2], small) - return w, (op, x[1], t) - elif op == 'func': - f = getsymbol(x[1]) - wa, ta = _optimize(x[2], small) - if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep', - 'keyword', 'outgoing', 'user', 'destination'): - w = 10 # slow - elif f in ('modifies', 'adds', 'removes'): - w = 30 # slower - elif f == "contains": - w = 100 # very slow - elif f == "ancestor": - w = 1 * smallbonus - elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'): - w = 0 - elif f == "sort": - w = 10 # assume most sorts look at changelog - else: - w = 1 - order = x[3] - return w + wa, (op, x[1], ta, order) - raise ValueError('invalid operator %r' % op) - -def optimize(tree): - """Optimize evaluatable tree - - All pseudo operations should be transformed beforehand. - """ - _weight, newtree = _optimize(tree, small=True) - return newtree - -# the set of valid characters for the initial letter of symbols in -# alias declarations and definitions -_aliassyminitletters = _syminitletters | set(pycompat.sysstr('$')) - -def _parsewith(spec, lookup=None, syminitletters=None): - """Generate a parse tree of given spec with given tokenizing options - - >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters) - ('func', ('symbol', 'foo'), ('symbol', '$1')) - >>> _parsewith('$1') - Traceback (most recent call last): - ... - ParseError: ("syntax error in revset '$1'", 0) - >>> _parsewith('foo bar') - Traceback (most recent call last): - ... - ParseError: ('invalid token', 4) - """ - p = parser.parser(elements) - tree, pos = p.parse(tokenize(spec, lookup=lookup, - syminitletters=syminitletters)) - if pos != len(spec): - raise error.ParseError(_('invalid token'), pos) - return _fixops(parser.simplifyinfixops(tree, ('list', 'or'))) - -class _aliasrules(parser.basealiasrules): - """Parsing and expansion rule set of revset aliases""" - _section = _('revset alias') - - @staticmethod - def _parse(spec): - """Parse alias declaration/definition ``spec`` - - This allows symbol names to use also ``$`` as an initial letter - (for backward compatibility), and callers of this function should - examine whether ``$`` is used also for unexpected symbols or not. - """ - return _parsewith(spec, syminitletters=_aliassyminitletters) - - @staticmethod - def _trygetfunc(tree): - if tree[0] == 'func' and tree[1][0] == 'symbol': - return tree[1][1], getlist(tree[2]) - -def expandaliases(ui, tree): - aliases = _aliasrules.buildmap(ui.configitems('revsetalias')) - tree = _aliasrules.expand(aliases, tree) - # warn about problematic (but not referred) aliases - for name, alias in sorted(aliases.iteritems()): - if alias.error and not alias.warned: - ui.warn(_('warning: %s\n') % (alias.error)) - alias.warned = True - return tree - -def foldconcat(tree): - """Fold elements to be concatenated by `##` - """ - if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): - return tree - if tree[0] == '_concat': - pending = [tree] - l = [] - while pending: - e = pending.pop() - if e[0] == '_concat': - pending.extend(reversed(e[1:])) - elif e[0] in ('string', 'symbol'): - l.append(e[1]) - else: - msg = _("\"##\" can't concatenate \"%s\" element") % (e[0]) - raise error.ParseError(msg) - return ('string', ''.join(l)) - else: - return tuple(foldconcat(t) for t in tree) - -def parse(spec, lookup=None): - return _parsewith(spec, lookup=lookup) - def posttreebuilthook(tree, repo): # hook for extensions to execute code on the optimized tree pass @@ -2801,15 +2249,16 @@ if repo: lookup = repo.__contains__ if len(specs) == 1: - tree = parse(specs[0], lookup) + tree = revsetlang.parse(specs[0], lookup) else: - tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs)) + tree = ('or', + ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs)) if ui: - tree = expandaliases(ui, tree) - tree = foldconcat(tree) - tree = analyze(tree, order) - tree = optimize(tree) + tree = revsetlang.expandaliases(ui, tree) + tree = revsetlang.foldconcat(tree) + tree = revsetlang.analyze(tree, order) + tree = revsetlang.optimize(tree) posttreebuilthook(tree, repo) return makematcher(tree) @@ -2825,1082 +2274,6 @@ return result return mfunc -def formatspec(expr, *args): - ''' - This is a convenience function for using revsets internally, and - escapes arguments appropriately. Aliases are intentionally ignored - so that intended expression behavior isn't accidentally subverted. - - Supported arguments: - - %r = revset expression, parenthesized - %d = int(arg), no quoting - %s = string(arg), escaped and single-quoted - %b = arg.branch(), escaped and single-quoted - %n = hex(arg), single-quoted - %% = a literal '%' - - Prefixing the type with 'l' specifies a parenthesized list of that type. - - >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()")) - '(10 or 11):: and ((this()) or (that()))' - >>> formatspec('%d:: and not %d::', 10, 20) - '10:: and not 20::' - >>> formatspec('%ld or %ld', [], [1]) - "_list('') or 1" - >>> formatspec('keyword(%s)', 'foo\\xe9') - "keyword('foo\\\\xe9')" - >>> b = lambda: 'default' - >>> b.branch = b - >>> formatspec('branch(%b)', b) - "branch('default')" - >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd']) - "root(_list('a\\x00b\\x00c\\x00d'))" - ''' - - def quote(s): - return repr(str(s)) - - def argtype(c, arg): - if c == 'd': - return str(int(arg)) - elif c == 's': - return quote(arg) - elif c == 'r': - parse(arg) # make sure syntax errors are confined - return '(%s)' % arg - elif c == 'n': - return quote(node.hex(arg)) - elif c == 'b': - return quote(arg.branch()) - - def listexp(s, t): - l = len(s) - if l == 0: - return "_list('')" - elif l == 1: - return argtype(t, s[0]) - elif t == 'd': - return "_intlist('%s')" % "\0".join(str(int(a)) for a in s) - elif t == 's': - return "_list('%s')" % "\0".join(s) - elif t == 'n': - return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s) - elif t == 'b': - return "_list('%s')" % "\0".join(a.branch() for a in s) - - m = l // 2 - return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t)) - - ret = '' - pos = 0 - arg = 0 - while pos < len(expr): - c = expr[pos] - if c == '%': - pos += 1 - d = expr[pos] - if d == '%': - ret += d - elif d in 'dsnbr': - ret += argtype(d, args[arg]) - arg += 1 - elif d == 'l': - # a list of some type - pos += 1 - d = expr[pos] - ret += listexp(list(args[arg]), d) - arg += 1 - else: - raise error.Abort(_('unexpected revspec format character %s') - % d) - else: - ret += c - pos += 1 - - return ret - -def prettyformat(tree): - return parser.prettyformat(tree, ('string', 'symbol')) - -def depth(tree): - if isinstance(tree, tuple): - return max(map(depth, tree)) + 1 - else: - return 0 - -def funcsused(tree): - if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): - return set() - else: - funcs = set() - for s in tree[1:]: - funcs |= funcsused(s) - if tree[0] == 'func': - funcs.add(tree[1][1]) - return funcs - -def _formatsetrepr(r): - """Format an optional printable representation of a set - - ======== ================================= - type(r) example - ======== ================================= - tuple ('<not %r>', other) - str '<branch closed>' - callable lambda: '<branch %r>' % sorted(b) - object other - ======== ================================= - """ - if r is None: - return '' - elif isinstance(r, tuple): - return r[0] % r[1:] - elif isinstance(r, str): - return r - elif callable(r): - return r() - else: - return repr(r) - -class abstractsmartset(object): - - def __nonzero__(self): - """True if the smartset is not empty""" - raise NotImplementedError() - - def __contains__(self, rev): - """provide fast membership testing""" - raise NotImplementedError() - - def __iter__(self): - """iterate the set in the order it is supposed to be iterated""" - raise NotImplementedError() - - # Attributes containing a function to perform a fast iteration in a given - # direction. A smartset can have none, one, or both defined. - # - # Default value is None instead of a function returning None to avoid - # initializing an iterator just for testing if a fast method exists. - fastasc = None - fastdesc = None - - def isascending(self): - """True if the set will iterate in ascending order""" - raise NotImplementedError() - - def isdescending(self): - """True if the set will iterate in descending order""" - raise NotImplementedError() - - def istopo(self): - """True if the set will iterate in topographical order""" - raise NotImplementedError() - - def min(self): - """return the minimum element in the set""" - if self.fastasc is None: - v = min(self) - else: - for v in self.fastasc(): - break - else: - raise ValueError('arg is an empty sequence') - self.min = lambda: v - return v - - def max(self): - """return the maximum element in the set""" - if self.fastdesc is None: - return max(self) - else: - for v in self.fastdesc(): - break - else: - raise ValueError('arg is an empty sequence') - self.max = lambda: v - return v - - def first(self): - """return the first element in the set (user iteration perspective) - - Return None if the set is empty""" - raise NotImplementedError() - - def last(self): - """return the last element in the set (user iteration perspective) - - Return None if the set is empty""" - raise NotImplementedError() - - def __len__(self): - """return the length of the smartsets - - This can be expensive on smartset that could be lazy otherwise.""" - raise NotImplementedError() - - def reverse(self): - """reverse the expected iteration order""" - raise NotImplementedError() - - def sort(self, reverse=True): - """get the set to iterate in an ascending or descending order""" - raise NotImplementedError() - - def __and__(self, other): - """Returns a new object with the intersection of the two collections. - - This is part of the mandatory API for smartset.""" - if isinstance(other, fullreposet): - return self - return self.filter(other.__contains__, condrepr=other, cache=False) - - def __add__(self, other): - """Returns a new object with the union of the two collections. - - This is part of the mandatory API for smartset.""" - return addset(self, other) - - def __sub__(self, other): - """Returns a new object with the substraction of the two collections. - - This is part of the mandatory API for smartset.""" - c = other.__contains__ - return self.filter(lambda r: not c(r), condrepr=('<not %r>', other), - cache=False) - - def filter(self, condition, condrepr=None, cache=True): - """Returns this smartset filtered by condition as a new smartset. - - `condition` is a callable which takes a revision number and returns a - boolean. Optional `condrepr` provides a printable representation of - the given `condition`. - - This is part of the mandatory API for smartset.""" - # builtin cannot be cached. but do not needs to - if cache and util.safehasattr(condition, 'func_code'): - condition = util.cachefunc(condition) - return filteredset(self, condition, condrepr) - -class baseset(abstractsmartset): - """Basic data structure that represents a revset and contains the basic - operation that it should be able to perform. - - Every method in this class should be implemented by any smartset class. - """ - def __init__(self, data=(), datarepr=None, istopo=False): - """ - datarepr: a tuple of (format, obj, ...), a function or an object that - provides a printable representation of the given data. - """ - self._ascending = None - self._istopo = istopo - if not isinstance(data, list): - if isinstance(data, set): - self._set = data - # set has no order we pick one for stability purpose - self._ascending = True - data = list(data) - self._list = data - self._datarepr = datarepr - - @util.propertycache - def _set(self): - return set(self._list) - - @util.propertycache - def _asclist(self): - asclist = self._list[:] - asclist.sort() - return asclist - - def __iter__(self): - if self._ascending is None: - return iter(self._list) - elif self._ascending: - return iter(self._asclist) - else: - return reversed(self._asclist) - - def fastasc(self): - return iter(self._asclist) - - def fastdesc(self): - return reversed(self._asclist) - - @util.propertycache - def __contains__(self): - return self._set.__contains__ - - def __nonzero__(self): - return bool(self._list) - - def sort(self, reverse=False): - self._ascending = not bool(reverse) - self._istopo = False - - def reverse(self): - if self._ascending is None: - self._list.reverse() - else: - self._ascending = not self._ascending - self._istopo = False - - def __len__(self): - return len(self._list) - - def isascending(self): - """Returns True if the collection is ascending order, False if not. - - This is part of the mandatory API for smartset.""" - if len(self) <= 1: - return True - return self._ascending is not None and self._ascending - - def isdescending(self): - """Returns True if the collection is descending order, False if not. - - This is part of the mandatory API for smartset.""" - if len(self) <= 1: - return True - return self._ascending is not None and not self._ascending - - def istopo(self): - """Is the collection is in topographical order or not. - - This is part of the mandatory API for smartset.""" - if len(self) <= 1: - return True - return self._istopo - - def first(self): - if self: - if self._ascending is None: - return self._list[0] - elif self._ascending: - return self._asclist[0] - else: - return self._asclist[-1] - return None - - def last(self): - if self: - if self._ascending is None: - return self._list[-1] - elif self._ascending: - return self._asclist[-1] - else: - return self._asclist[0] - return None - - def __repr__(self): - d = {None: '', False: '-', True: '+'}[self._ascending] - s = _formatsetrepr(self._datarepr) - if not s: - l = self._list - # if _list has been built from a set, it might have a different - # order from one python implementation to another. - # We fallback to the sorted version for a stable output. - if self._ascending is not None: - l = self._asclist - s = repr(l) - return '<%s%s %s>' % (type(self).__name__, d, s) - -class filteredset(abstractsmartset): - """Duck type for baseset class which iterates lazily over the revisions in - the subset and contains a function which tests for membership in the - revset - """ - def __init__(self, subset, condition=lambda x: True, condrepr=None): - """ - condition: a function that decide whether a revision in the subset - belongs to the revset or not. - condrepr: a tuple of (format, obj, ...), a function or an object that - provides a printable representation of the given condition. - """ - self._subset = subset - self._condition = condition - self._condrepr = condrepr - - def __contains__(self, x): - return x in self._subset and self._condition(x) - - def __iter__(self): - return self._iterfilter(self._subset) - - def _iterfilter(self, it): - cond = self._condition - for x in it: - if cond(x): - yield x - - @property - def fastasc(self): - it = self._subset.fastasc - if it is None: - return None - return lambda: self._iterfilter(it()) - - @property - def fastdesc(self): - it = self._subset.fastdesc - if it is None: - return None - return lambda: self._iterfilter(it()) - - def __nonzero__(self): - fast = None - candidates = [self.fastasc if self.isascending() else None, - self.fastdesc if self.isdescending() else None, - self.fastasc, - self.fastdesc] - for candidate in candidates: - if candidate is not None: - fast = candidate - break - - if fast is not None: - it = fast() - else: - it = self - - for r in it: - return True - return False - - def __len__(self): - # Basic implementation to be changed in future patches. - # until this gets improved, we use generator expression - # here, since list comprehensions are free to call __len__ again - # causing infinite recursion - l = baseset(r for r in self) - return len(l) - - def sort(self, reverse=False): - self._subset.sort(reverse=reverse) - - def reverse(self): - self._subset.reverse() - - def isascending(self): - return self._subset.isascending() - - def isdescending(self): - return self._subset.isdescending() - - def istopo(self): - return self._subset.istopo() - - def first(self): - for x in self: - return x - return None - - def last(self): - it = None - if self.isascending(): - it = self.fastdesc - elif self.isdescending(): - it = self.fastasc - if it is not None: - for x in it(): - return x - return None #empty case - else: - x = None - for x in self: - pass - return x - - def __repr__(self): - xs = [repr(self._subset)] - s = _formatsetrepr(self._condrepr) - if s: - xs.append(s) - return '<%s %s>' % (type(self).__name__, ', '.join(xs)) - -def _iterordered(ascending, iter1, iter2): - """produce an ordered iteration from two iterators with the same order - - The ascending is used to indicated the iteration direction. - """ - choice = max - if ascending: - choice = min - - val1 = None - val2 = None - try: - # Consume both iterators in an ordered way until one is empty - while True: - if val1 is None: - val1 = next(iter1) - if val2 is None: - val2 = next(iter2) - n = choice(val1, val2) - yield n - if val1 == n: - val1 = None - if val2 == n: - val2 = None - except StopIteration: - # Flush any remaining values and consume the other one - it = iter2 - if val1 is not None: - yield val1 - it = iter1 - elif val2 is not None: - # might have been equality and both are empty - yield val2 - for val in it: - yield val - -class addset(abstractsmartset): - """Represent the addition of two sets - - Wrapper structure for lazily adding two structures without losing much - performance on the __contains__ method - - If the ascending attribute is set, that means the two structures are - ordered in either an ascending or descending way. Therefore, we can add - them maintaining the order by iterating over both at the same time - - >>> xs = baseset([0, 3, 2]) - >>> ys = baseset([5, 2, 4]) - - >>> rs = addset(xs, ys) - >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last() - (True, True, False, True, 0, 4) - >>> rs = addset(xs, baseset([])) - >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last() - (True, True, False, 0, 2) - >>> rs = addset(baseset([]), baseset([])) - >>> bool(rs), 0 in rs, rs.first(), rs.last() - (False, False, None, None) - - iterate unsorted: - >>> rs = addset(xs, ys) - >>> # (use generator because pypy could call len()) - >>> list(x for x in rs) # without _genlist - [0, 3, 2, 5, 4] - >>> assert not rs._genlist - >>> len(rs) - 5 - >>> [x for x in rs] # with _genlist - [0, 3, 2, 5, 4] - >>> assert rs._genlist - - iterate ascending: - >>> rs = addset(xs, ys, ascending=True) - >>> # (use generator because pypy could call len()) - >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist - ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) - >>> assert not rs._asclist - >>> len(rs) - 5 - >>> [x for x in rs], [x for x in rs.fastasc()] - ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) - >>> assert rs._asclist - - iterate descending: - >>> rs = addset(xs, ys, ascending=False) - >>> # (use generator because pypy could call len()) - >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist - ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) - >>> assert not rs._asclist - >>> len(rs) - 5 - >>> [x for x in rs], [x for x in rs.fastdesc()] - ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) - >>> assert rs._asclist - - iterate ascending without fastasc: - >>> rs = addset(xs, generatorset(ys), ascending=True) - >>> assert rs.fastasc is None - >>> [x for x in rs] - [0, 2, 3, 4, 5] - - iterate descending without fastdesc: - >>> rs = addset(generatorset(xs), ys, ascending=False) - >>> assert rs.fastdesc is None - >>> [x for x in rs] - [5, 4, 3, 2, 0] - """ - def __init__(self, revs1, revs2, ascending=None): - self._r1 = revs1 - self._r2 = revs2 - self._iter = None - self._ascending = ascending - self._genlist = None - self._asclist = None - - def __len__(self): - return len(self._list) - - def __nonzero__(self): - return bool(self._r1) or bool(self._r2) - - @util.propertycache - def _list(self): - if not self._genlist: - self._genlist = baseset(iter(self)) - return self._genlist - - def __iter__(self): - """Iterate over both collections without repeating elements - - If the ascending attribute is not set, iterate over the first one and - then over the second one checking for membership on the first one so we - dont yield any duplicates. - - If the ascending attribute is set, iterate over both collections at the - same time, yielding only one value at a time in the given order. - """ - if self._ascending is None: - if self._genlist: - return iter(self._genlist) - def arbitraryordergen(): - for r in self._r1: - yield r - inr1 = self._r1.__contains__ - for r in self._r2: - if not inr1(r): - yield r - return arbitraryordergen() - # try to use our own fast iterator if it exists - self._trysetasclist() - if self._ascending: - attr = 'fastasc' - else: - attr = 'fastdesc' - it = getattr(self, attr) - if it is not None: - return it() - # maybe half of the component supports fast - # get iterator for _r1 - iter1 = getattr(self._r1, attr) - if iter1 is None: - # let's avoid side effect (not sure it matters) - iter1 = iter(sorted(self._r1, reverse=not self._ascending)) - else: - iter1 = iter1() - # get iterator for _r2 - iter2 = getattr(self._r2, attr) - if iter2 is None: - # let's avoid side effect (not sure it matters) - iter2 = iter(sorted(self._r2, reverse=not self._ascending)) - else: - iter2 = iter2() - return _iterordered(self._ascending, iter1, iter2) - - def _trysetasclist(self): - """populate the _asclist attribute if possible and necessary""" - if self._genlist is not None and self._asclist is None: - self._asclist = sorted(self._genlist) - - @property - def fastasc(self): - self._trysetasclist() - if self._asclist is not None: - return self._asclist.__iter__ - iter1 = self._r1.fastasc - iter2 = self._r2.fastasc - if None in (iter1, iter2): - return None - return lambda: _iterordered(True, iter1(), iter2()) - - @property - def fastdesc(self): - self._trysetasclist() - if self._asclist is not None: - return self._asclist.__reversed__ - iter1 = self._r1.fastdesc - iter2 = self._r2.fastdesc - if None in (iter1, iter2): - return None - return lambda: _iterordered(False, iter1(), iter2()) - - def __contains__(self, x): - return x in self._r1 or x in self._r2 - - def sort(self, reverse=False): - """Sort the added set - - For this we use the cached list with all the generated values and if we - know they are ascending or descending we can sort them in a smart way. - """ - self._ascending = not reverse - - def isascending(self): - return self._ascending is not None and self._ascending - - def isdescending(self): - return self._ascending is not None and not self._ascending - - def istopo(self): - # not worth the trouble asserting if the two sets combined are still - # in topographical order. Use the sort() predicate to explicitly sort - # again instead. - return False - - def reverse(self): - if self._ascending is None: - self._list.reverse() - else: - self._ascending = not self._ascending - - def first(self): - for x in self: - return x - return None - - def last(self): - self.reverse() - val = self.first() - self.reverse() - return val - - def __repr__(self): - d = {None: '', False: '-', True: '+'}[self._ascending] - return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2) - -class generatorset(abstractsmartset): - """Wrap a generator for lazy iteration - - Wrapper structure for generators that provides lazy membership and can - be iterated more than once. - When asked for membership it generates values until either it finds the - requested one or has gone through all the elements in the generator - """ - def __init__(self, gen, iterasc=None): - """ - gen: a generator producing the values for the generatorset. - """ - self._gen = gen - self._asclist = None - self._cache = {} - self._genlist = [] - self._finished = False - self._ascending = True - if iterasc is not None: - if iterasc: - self.fastasc = self._iterator - self.__contains__ = self._asccontains - else: - self.fastdesc = self._iterator - self.__contains__ = self._desccontains - - def __nonzero__(self): - # Do not use 'for r in self' because it will enforce the iteration - # order (default ascending), possibly unrolling a whole descending - # iterator. - if self._genlist: - return True - for r in self._consumegen(): - return True - return False - - def __contains__(self, x): - if x in self._cache: - return self._cache[x] - - # Use new values only, as existing values would be cached. - for l in self._consumegen(): - if l == x: - return True - - self._cache[x] = False - return False - - def _asccontains(self, x): - """version of contains optimised for ascending generator""" - if x in self._cache: - return self._cache[x] - - # Use new values only, as existing values would be cached. - for l in self._consumegen(): - if l == x: - return True - if l > x: - break - - self._cache[x] = False - return False - - def _desccontains(self, x): - """version of contains optimised for descending generator""" - if x in self._cache: - return self._cache[x] - - # Use new values only, as existing values would be cached. - for l in self._consumegen(): - if l == x: - return True - if l < x: - break - - self._cache[x] = False - return False - - def __iter__(self): - if self._ascending: - it = self.fastasc - else: - it = self.fastdesc - if it is not None: - return it() - # we need to consume the iterator - for x in self._consumegen(): - pass - # recall the same code - return iter(self) - - def _iterator(self): - if self._finished: - return iter(self._genlist) - - # We have to use this complex iteration strategy to allow multiple - # iterations at the same time. We need to be able to catch revision - # removed from _consumegen and added to genlist in another instance. - # - # Getting rid of it would provide an about 15% speed up on this - # iteration. - genlist = self._genlist - nextrev = self._consumegen().next - _len = len # cache global lookup - def gen(): - i = 0 - while True: - if i < _len(genlist): - yield genlist[i] - else: - yield nextrev() - i += 1 - return gen() - - def _consumegen(self): - cache = self._cache - genlist = self._genlist.append - for item in self._gen: - cache[item] = True - genlist(item) - yield item - if not self._finished: - self._finished = True - asc = self._genlist[:] - asc.sort() - self._asclist = asc - self.fastasc = asc.__iter__ - self.fastdesc = asc.__reversed__ - - def __len__(self): - for x in self._consumegen(): - pass - return len(self._genlist) - - def sort(self, reverse=False): - self._ascending = not reverse - - def reverse(self): - self._ascending = not self._ascending - - def isascending(self): - return self._ascending - - def isdescending(self): - return not self._ascending - - def istopo(self): - # not worth the trouble asserting if the two sets combined are still - # in topographical order. Use the sort() predicate to explicitly sort - # again instead. - return False - - def first(self): - if self._ascending: - it = self.fastasc - else: - it = self.fastdesc - if it is None: - # we need to consume all and try again - for x in self._consumegen(): - pass - return self.first() - return next(it(), None) - - def last(self): - if self._ascending: - it = self.fastdesc - else: - it = self.fastasc - if it is None: - # we need to consume all and try again - for x in self._consumegen(): - pass - return self.first() - return next(it(), None) - - def __repr__(self): - d = {False: '-', True: '+'}[self._ascending] - return '<%s%s>' % (type(self).__name__, d) - -class spanset(abstractsmartset): - """Duck type for baseset class which represents a range of revisions and - can work lazily and without having all the range in memory - - Note that spanset(x, y) behave almost like xrange(x, y) except for two - notable points: - - when x < y it will be automatically descending, - - revision filtered with this repoview will be skipped. - - """ - def __init__(self, repo, start=0, end=None): - """ - start: first revision included the set - (default to 0) - end: first revision excluded (last+1) - (default to len(repo) - - Spanset will be descending if `end` < `start`. - """ - if end is None: - end = len(repo) - self._ascending = start <= end - if not self._ascending: - start, end = end + 1, start +1 - self._start = start - self._end = end - self._hiddenrevs = repo.changelog.filteredrevs - - def sort(self, reverse=False): - self._ascending = not reverse - - def reverse(self): - self._ascending = not self._ascending - - def istopo(self): - # not worth the trouble asserting if the two sets combined are still - # in topographical order. Use the sort() predicate to explicitly sort - # again instead. - return False - - def _iterfilter(self, iterrange): - s = self._hiddenrevs - for r in iterrange: - if r not in s: - yield r - - def __iter__(self): - if self._ascending: - return self.fastasc() - else: - return self.fastdesc() - - def fastasc(self): - iterrange = xrange(self._start, self._end) - if self._hiddenrevs: - return self._iterfilter(iterrange) - return iter(iterrange) - - def fastdesc(self): - iterrange = xrange(self._end - 1, self._start - 1, -1) - if self._hiddenrevs: - return self._iterfilter(iterrange) - return iter(iterrange) - - def __contains__(self, rev): - hidden = self._hiddenrevs - return ((self._start <= rev < self._end) - and not (hidden and rev in hidden)) - - def __nonzero__(self): - for r in self: - return True - return False - - def __len__(self): - if not self._hiddenrevs: - return abs(self._end - self._start) - else: - count = 0 - start = self._start - end = self._end - for rev in self._hiddenrevs: - if (end < rev <= start) or (start <= rev < end): - count += 1 - return abs(self._end - self._start) - count - - def isascending(self): - return self._ascending - - def isdescending(self): - return not self._ascending - - def first(self): - if self._ascending: - it = self.fastasc - else: - it = self.fastdesc - for x in it(): - return x - return None - - def last(self): - if self._ascending: - it = self.fastdesc - else: - it = self.fastasc - for x in it(): - return x - return None - - def __repr__(self): - d = {False: '-', True: '+'}[self._ascending] - return '<%s%s %d:%d>' % (type(self).__name__, d, - self._start, self._end - 1) - -class fullreposet(spanset): - """a set containing all revisions in the repo - - This class exists to host special optimization and magic to handle virtual - revisions such as "null". - """ - - def __init__(self, repo): - super(fullreposet, self).__init__(repo) - - def __and__(self, other): - """As self contains the whole repo, all of the other set should also be - in self. Therefore `self & other = other`. - - This boldly assumes the other contains valid revs only. - """ - # other not a smartset, make is so - if not util.safehasattr(other, 'isascending'): - # filter out hidden revision - # (this boldly assumes all smartset are pure) - # - # `other` was used with "&", let's assume this is a set like - # object. - other = baseset(other - self._hiddenrevs) - - other.sort(reverse=self.isdescending()) - return other - -def prettyformatset(revs): - lines = [] - rs = repr(revs) - p = 0 - while p < len(rs): - q = rs.find('<', p + 1) - if q < 0: - q = len(rs) - l = rs.count('<', 0, p) - rs.count('>', 0, p) - assert l >= 0 - lines.append((l, rs[p:q].rstrip())) - p = q - return '\n'.join(' ' * l + s for l, s in lines) - def loadpredicate(ui, extname, registrarobj): """Load revset predicates from specified registrarobj """
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/revsetlang.py Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,684 @@ +# revsetlang.py - parser, tokenizer and utility for revision set language +# +# Copyright 2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import string + +from .i18n import _ +from . import ( + error, + node, + parser, + pycompat, +) + +elements = { + # token-type: binding-strength, primary, prefix, infix, suffix + "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None), + "##": (20, None, None, ("_concat", 20), None), + "~": (18, None, None, ("ancestor", 18), None), + "^": (18, None, None, ("parent", 18), "parentpost"), + "-": (5, None, ("negate", 19), ("minus", 5), None), + "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"), + "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"), + ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"), + "not": (10, None, ("not", 10), None, None), + "!": (10, None, ("not", 10), None, None), + "and": (5, None, None, ("and", 5), None), + "&": (5, None, None, ("and", 5), None), + "%": (5, None, None, ("only", 5), "onlypost"), + "or": (4, None, None, ("or", 4), None), + "|": (4, None, None, ("or", 4), None), + "+": (4, None, None, ("or", 4), None), + "=": (3, None, None, ("keyvalue", 3), None), + ",": (2, None, None, ("list", 2), None), + ")": (0, None, None, None, None), + "symbol": (0, "symbol", None, None, None), + "string": (0, "string", None, None, None), + "end": (0, None, None, None, None), +} + +keywords = set(['and', 'or', 'not']) + +# default set of valid characters for the initial letter of symbols +_syminitletters = set( + string.ascii_letters + + string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256))) + +# default set of valid characters for non-initial letters of symbols +_symletters = _syminitletters | set(pycompat.sysstr('-/')) + +def tokenize(program, lookup=None, syminitletters=None, symletters=None): + ''' + Parse a revset statement into a stream of tokens + + ``syminitletters`` is the set of valid characters for the initial + letter of symbols. + + By default, character ``c`` is recognized as valid for initial + letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``. + + ``symletters`` is the set of valid characters for non-initial + letters of symbols. + + By default, character ``c`` is recognized as valid for non-initial + letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``. + + Check that @ is a valid unquoted token character (issue3686): + >>> list(tokenize("@::")) + [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)] + + ''' + if syminitletters is None: + syminitletters = _syminitletters + if symletters is None: + symletters = _symletters + + if program and lookup: + # attempt to parse old-style ranges first to deal with + # things like old-tag which contain query metacharacters + parts = program.split(':', 1) + if all(lookup(sym) for sym in parts if sym): + if parts[0]: + yield ('symbol', parts[0], 0) + if len(parts) > 1: + s = len(parts[0]) + yield (':', None, s) + if parts[1]: + yield ('symbol', parts[1], s + 1) + yield ('end', None, len(program)) + return + + pos, l = 0, len(program) + while pos < l: + c = program[pos] + if c.isspace(): # skip inter-token whitespace + pass + elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully + yield ('::', None, pos) + pos += 1 # skip ahead + elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully + yield ('..', None, pos) + pos += 1 # skip ahead + elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully + yield ('##', None, pos) + pos += 1 # skip ahead + elif c in "():=,-|&+!~^%": # handle simple operators + yield (c, None, pos) + elif (c in '"\'' or c == 'r' and + program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings + if c == 'r': + pos += 1 + c = program[pos] + decode = lambda x: x + else: + decode = parser.unescapestr + pos += 1 + s = pos + while pos < l: # find closing quote + d = program[pos] + if d == '\\': # skip over escaped characters + pos += 2 + continue + if d == c: + yield ('string', decode(program[s:pos]), s) + break + pos += 1 + else: + raise error.ParseError(_("unterminated string"), s) + # gather up a symbol/keyword + elif c in syminitletters: + s = pos + pos += 1 + while pos < l: # find end of symbol + d = program[pos] + if d not in symletters: + break + if d == '.' and program[pos - 1] == '.': # special case for .. + pos -= 1 + break + pos += 1 + sym = program[s:pos] + if sym in keywords: # operator keywords + yield (sym, None, s) + elif '-' in sym: + # some jerk gave us foo-bar-baz, try to check if it's a symbol + if lookup and lookup(sym): + # looks like a real symbol + yield ('symbol', sym, s) + else: + # looks like an expression + parts = sym.split('-') + for p in parts[:-1]: + if p: # possible consecutive - + yield ('symbol', p, s) + s += len(p) + yield ('-', None, pos) + s += 1 + if parts[-1]: # possible trailing - + yield ('symbol', parts[-1], s) + else: + yield ('symbol', sym, s) + pos -= 1 + else: + raise error.ParseError(_("syntax error in revset '%s'") % + program, pos) + pos += 1 + yield ('end', None, pos) + +# helpers + +_notset = object() + +def getsymbol(x): + if x and x[0] == 'symbol': + return x[1] + raise error.ParseError(_('not a symbol')) + +def getstring(x, err): + if x and (x[0] == 'string' or x[0] == 'symbol'): + return x[1] + raise error.ParseError(err) + +def getinteger(x, err, default=_notset): + if not x and default is not _notset: + return default + try: + return int(getstring(x, err)) + except ValueError: + raise error.ParseError(err) + +def getlist(x): + if not x: + return [] + if x[0] == 'list': + return list(x[1:]) + return [x] + +def getrange(x, err): + if not x: + raise error.ParseError(err) + op = x[0] + if op == 'range': + return x[1], x[2] + elif op == 'rangepre': + return None, x[1] + elif op == 'rangepost': + return x[1], None + elif op == 'rangeall': + return None, None + raise error.ParseError(err) + +def getargs(x, min, max, err): + l = getlist(x) + if len(l) < min or (max >= 0 and len(l) > max): + raise error.ParseError(err) + return l + +def getargsdict(x, funcname, keys): + return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys), + keyvaluenode='keyvalue', keynode='symbol') + +# Constants for ordering requirement, used in _analyze(): +# +# If 'define', any nested functions and operations can change the ordering of +# the entries in the set. If 'follow', any nested functions and operations +# should take the ordering specified by the first operand to the '&' operator. +# +# For instance, +# +# X & (Y | Z) +# ^ ^^^^^^^ +# | follow +# define +# +# will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order +# of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't. +# +# 'any' means the order doesn't matter. For instance, +# +# X & !Y +# ^ +# any +# +# 'y()' can either enforce its ordering requirement or take the ordering +# specified by 'x()' because 'not()' doesn't care the order. +# +# Transition of ordering requirement: +# +# 1. starts with 'define' +# 2. shifts to 'follow' by 'x & y' +# 3. changes back to 'define' on function call 'f(x)' or function-like +# operation 'x (f) y' because 'f' may have its own ordering requirement +# for 'x' and 'y' (e.g. 'first(x)') +# +anyorder = 'any' # don't care the order +defineorder = 'define' # should define the order +followorder = 'follow' # must follow the current order + +# transition table for 'x & y', from the current expression 'x' to 'y' +_tofolloworder = { + anyorder: anyorder, + defineorder: followorder, + followorder: followorder, +} + +def _matchonly(revs, bases): + """ + >>> f = lambda *args: _matchonly(*map(parse, args)) + >>> f('ancestors(A)', 'not ancestors(B)') + ('list', ('symbol', 'A'), ('symbol', 'B')) + """ + if (revs is not None + and revs[0] == 'func' + and getsymbol(revs[1]) == 'ancestors' + and bases is not None + and bases[0] == 'not' + and bases[1][0] == 'func' + and getsymbol(bases[1][1]) == 'ancestors'): + return ('list', revs[2], bases[1][2]) + +def _fixops(x): + """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be + handled well by our simple top-down parser""" + if not isinstance(x, tuple): + return x + + op = x[0] + if op == 'parent': + # x^:y means (x^) : y, not x ^ (:y) + # x^: means (x^) :, not x ^ (:) + post = ('parentpost', x[1]) + if x[2][0] == 'dagrangepre': + return _fixops(('dagrange', post, x[2][1])) + elif x[2][0] == 'rangepre': + return _fixops(('range', post, x[2][1])) + elif x[2][0] == 'rangeall': + return _fixops(('rangepost', post)) + elif op == 'or': + # make number of arguments deterministic: + # x + y + z -> (or x y z) -> (or (list x y z)) + return (op, _fixops(('list',) + x[1:])) + + return (op,) + tuple(_fixops(y) for y in x[1:]) + +def _analyze(x, order): + if x is None: + return x + + op = x[0] + if op == 'minus': + return _analyze(('and', x[1], ('not', x[2])), order) + elif op == 'only': + t = ('func', ('symbol', 'only'), ('list', x[1], x[2])) + return _analyze(t, order) + elif op == 'onlypost': + return _analyze(('func', ('symbol', 'only'), x[1]), order) + elif op == 'dagrangepre': + return _analyze(('func', ('symbol', 'ancestors'), x[1]), order) + elif op == 'dagrangepost': + return _analyze(('func', ('symbol', 'descendants'), x[1]), order) + elif op == 'negate': + s = getstring(x[1], _("can't negate that")) + return _analyze(('string', '-' + s), order) + elif op in ('string', 'symbol'): + return x + elif op == 'and': + ta = _analyze(x[1], order) + tb = _analyze(x[2], _tofolloworder[order]) + return (op, ta, tb, order) + elif op == 'or': + return (op, _analyze(x[1], order), order) + elif op == 'not': + return (op, _analyze(x[1], anyorder), order) + elif op == 'rangeall': + return (op, None, order) + elif op in ('rangepre', 'rangepost', 'parentpost'): + return (op, _analyze(x[1], defineorder), order) + elif op == 'group': + return _analyze(x[1], order) + elif op in ('dagrange', 'range', 'parent', 'ancestor'): + ta = _analyze(x[1], defineorder) + tb = _analyze(x[2], defineorder) + return (op, ta, tb, order) + elif op == 'list': + return (op,) + tuple(_analyze(y, order) for y in x[1:]) + elif op == 'keyvalue': + return (op, x[1], _analyze(x[2], order)) + elif op == 'func': + f = getsymbol(x[1]) + d = defineorder + if f == 'present': + # 'present(set)' is known to return the argument set with no + # modification, so forward the current order to its argument + d = order + return (op, x[1], _analyze(x[2], d), order) + raise ValueError('invalid operator %r' % op) + +def analyze(x, order=defineorder): + """Transform raw parsed tree to evaluatable tree which can be fed to + optimize() or getset() + + All pseudo operations should be mapped to real operations or functions + defined in methods or symbols table respectively. + + 'order' specifies how the current expression 'x' is ordered (see the + constants defined above.) + """ + return _analyze(x, order) + +def _optimize(x, small): + if x is None: + return 0, x + + smallbonus = 1 + if small: + smallbonus = .5 + + op = x[0] + if op in ('string', 'symbol'): + return smallbonus, x # single revisions are small + elif op == 'and': + wa, ta = _optimize(x[1], True) + wb, tb = _optimize(x[2], True) + order = x[3] + w = min(wa, wb) + + # (::x and not ::y)/(not ::y and ::x) have a fast path + tm = _matchonly(ta, tb) or _matchonly(tb, ta) + if tm: + return w, ('func', ('symbol', 'only'), tm, order) + + if tb is not None and tb[0] == 'not': + return wa, ('difference', ta, tb[1], order) + + if wa > wb: + return w, (op, tb, ta, order) + return w, (op, ta, tb, order) + elif op == 'or': + # fast path for machine-generated expression, that is likely to have + # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()' + order = x[2] + ws, ts, ss = [], [], [] + def flushss(): + if not ss: + return + if len(ss) == 1: + w, t = ss[0] + else: + s = '\0'.join(t[1] for w, t in ss) + y = ('func', ('symbol', '_list'), ('string', s), order) + w, t = _optimize(y, False) + ws.append(w) + ts.append(t) + del ss[:] + for y in getlist(x[1]): + w, t = _optimize(y, False) + if t is not None and (t[0] == 'string' or t[0] == 'symbol'): + ss.append((w, t)) + continue + flushss() + ws.append(w) + ts.append(t) + flushss() + if len(ts) == 1: + return ws[0], ts[0] # 'or' operation is fully optimized out + # we can't reorder trees by weight because it would change the order. + # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a") + # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0])) + return max(ws), (op, ('list',) + tuple(ts), order) + elif op == 'not': + # Optimize not public() to _notpublic() because we have a fast version + if x[1][:3] == ('func', ('symbol', 'public'), None): + order = x[1][3] + newsym = ('func', ('symbol', '_notpublic'), None, order) + o = _optimize(newsym, not small) + return o[0], o[1] + else: + o = _optimize(x[1], not small) + order = x[2] + return o[0], (op, o[1], order) + elif op == 'rangeall': + return smallbonus, x + elif op in ('rangepre', 'rangepost', 'parentpost'): + o = _optimize(x[1], small) + order = x[2] + return o[0], (op, o[1], order) + elif op in ('dagrange', 'range', 'parent', 'ancestor'): + wa, ta = _optimize(x[1], small) + wb, tb = _optimize(x[2], small) + order = x[3] + return wa + wb, (op, ta, tb, order) + elif op == 'list': + ws, ts = zip(*(_optimize(y, small) for y in x[1:])) + return sum(ws), (op,) + ts + elif op == 'keyvalue': + w, t = _optimize(x[2], small) + return w, (op, x[1], t) + elif op == 'func': + f = getsymbol(x[1]) + wa, ta = _optimize(x[2], small) + if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep', + 'keyword', 'outgoing', 'user', 'destination'): + w = 10 # slow + elif f in ('modifies', 'adds', 'removes'): + w = 30 # slower + elif f == "contains": + w = 100 # very slow + elif f == "ancestor": + w = 1 * smallbonus + elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'): + w = 0 + elif f == "sort": + w = 10 # assume most sorts look at changelog + else: + w = 1 + order = x[3] + return w + wa, (op, x[1], ta, order) + raise ValueError('invalid operator %r' % op) + +def optimize(tree): + """Optimize evaluatable tree + + All pseudo operations should be transformed beforehand. + """ + _weight, newtree = _optimize(tree, small=True) + return newtree + +# the set of valid characters for the initial letter of symbols in +# alias declarations and definitions +_aliassyminitletters = _syminitletters | set(pycompat.sysstr('$')) + +def _parsewith(spec, lookup=None, syminitletters=None): + """Generate a parse tree of given spec with given tokenizing options + + >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters) + ('func', ('symbol', 'foo'), ('symbol', '$1')) + >>> _parsewith('$1') + Traceback (most recent call last): + ... + ParseError: ("syntax error in revset '$1'", 0) + >>> _parsewith('foo bar') + Traceback (most recent call last): + ... + ParseError: ('invalid token', 4) + """ + p = parser.parser(elements) + tree, pos = p.parse(tokenize(spec, lookup=lookup, + syminitletters=syminitletters)) + if pos != len(spec): + raise error.ParseError(_('invalid token'), pos) + return _fixops(parser.simplifyinfixops(tree, ('list', 'or'))) + +class _aliasrules(parser.basealiasrules): + """Parsing and expansion rule set of revset aliases""" + _section = _('revset alias') + + @staticmethod + def _parse(spec): + """Parse alias declaration/definition ``spec`` + + This allows symbol names to use also ``$`` as an initial letter + (for backward compatibility), and callers of this function should + examine whether ``$`` is used also for unexpected symbols or not. + """ + return _parsewith(spec, syminitletters=_aliassyminitletters) + + @staticmethod + def _trygetfunc(tree): + if tree[0] == 'func' and tree[1][0] == 'symbol': + return tree[1][1], getlist(tree[2]) + +def expandaliases(ui, tree): + aliases = _aliasrules.buildmap(ui.configitems('revsetalias')) + tree = _aliasrules.expand(aliases, tree) + # warn about problematic (but not referred) aliases + for name, alias in sorted(aliases.iteritems()): + if alias.error and not alias.warned: + ui.warn(_('warning: %s\n') % (alias.error)) + alias.warned = True + return tree + +def foldconcat(tree): + """Fold elements to be concatenated by `##` + """ + if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): + return tree + if tree[0] == '_concat': + pending = [tree] + l = [] + while pending: + e = pending.pop() + if e[0] == '_concat': + pending.extend(reversed(e[1:])) + elif e[0] in ('string', 'symbol'): + l.append(e[1]) + else: + msg = _("\"##\" can't concatenate \"%s\" element") % (e[0]) + raise error.ParseError(msg) + return ('string', ''.join(l)) + else: + return tuple(foldconcat(t) for t in tree) + +def parse(spec, lookup=None): + return _parsewith(spec, lookup=lookup) + +def formatspec(expr, *args): + ''' + This is a convenience function for using revsets internally, and + escapes arguments appropriately. Aliases are intentionally ignored + so that intended expression behavior isn't accidentally subverted. + + Supported arguments: + + %r = revset expression, parenthesized + %d = int(arg), no quoting + %s = string(arg), escaped and single-quoted + %b = arg.branch(), escaped and single-quoted + %n = hex(arg), single-quoted + %% = a literal '%' + + Prefixing the type with 'l' specifies a parenthesized list of that type. + + >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()")) + '(10 or 11):: and ((this()) or (that()))' + >>> formatspec('%d:: and not %d::', 10, 20) + '10:: and not 20::' + >>> formatspec('%ld or %ld', [], [1]) + "_list('') or 1" + >>> formatspec('keyword(%s)', 'foo\\xe9') + "keyword('foo\\\\xe9')" + >>> b = lambda: 'default' + >>> b.branch = b + >>> formatspec('branch(%b)', b) + "branch('default')" + >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd']) + "root(_list('a\\x00b\\x00c\\x00d'))" + ''' + + def quote(s): + return repr(str(s)) + + def argtype(c, arg): + if c == 'd': + return str(int(arg)) + elif c == 's': + return quote(arg) + elif c == 'r': + parse(arg) # make sure syntax errors are confined + return '(%s)' % arg + elif c == 'n': + return quote(node.hex(arg)) + elif c == 'b': + return quote(arg.branch()) + + def listexp(s, t): + l = len(s) + if l == 0: + return "_list('')" + elif l == 1: + return argtype(t, s[0]) + elif t == 'd': + return "_intlist('%s')" % "\0".join(str(int(a)) for a in s) + elif t == 's': + return "_list('%s')" % "\0".join(s) + elif t == 'n': + return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s) + elif t == 'b': + return "_list('%s')" % "\0".join(a.branch() for a in s) + + m = l // 2 + return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t)) + + ret = '' + pos = 0 + arg = 0 + while pos < len(expr): + c = expr[pos] + if c == '%': + pos += 1 + d = expr[pos] + if d == '%': + ret += d + elif d in 'dsnbr': + ret += argtype(d, args[arg]) + arg += 1 + elif d == 'l': + # a list of some type + pos += 1 + d = expr[pos] + ret += listexp(list(args[arg]), d) + arg += 1 + else: + raise error.Abort(_('unexpected revspec format character %s') + % d) + else: + ret += c + pos += 1 + + return ret + +def prettyformat(tree): + return parser.prettyformat(tree, ('string', 'symbol')) + +def depth(tree): + if isinstance(tree, tuple): + return max(map(depth, tree)) + 1 + else: + return 0 + +def funcsused(tree): + if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): + return set() + else: + funcs = set() + for s in tree[1:]: + funcs |= funcsused(s) + if tree[0] == 'func': + funcs.add(tree[1][1]) + return funcs
--- a/mercurial/scmposix.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/scmposix.py Sat Mar 11 13:53:14 2017 -0500 @@ -40,8 +40,15 @@ def userrcpath(): if pycompat.sysplatform == 'plan9': return [encoding.environ['home'] + '/lib/hgrc'] + elif pycompat.sysplatform == 'darwin': + return [os.path.expanduser('~/.hgrc')] else: - return [os.path.expanduser('~/.hgrc')] + confighome = encoding.environ.get('XDG_CONFIG_HOME') + if confighome is None or not os.path.isabs(confighome): + confighome = os.path.expanduser('~/.config') + + return [os.path.expanduser('~/.hgrc'), + os.path.join(confighome, 'hg', 'hgrc')] def termsize(ui): try:
--- a/mercurial/scmutil.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/scmutil.py Sat Mar 11 13:53:14 2017 -0500 @@ -7,17 +7,12 @@ from __future__ import absolute_import -import contextlib import errno import glob import hashlib import os import re -import shutil import socket -import stat -import tempfile -import threading from .i18n import _ from .node import wdirrev @@ -29,9 +24,10 @@ pathutil, phases, pycompat, - revset, + revsetlang, similar, util, + vfs as vfsmod, ) if pycompat.osname == 'nt': @@ -336,455 +332,16 @@ key = s.digest() return key -class abstractvfs(object): - """Abstract base class; cannot be instantiated""" - - def __init__(self, *args, **kwargs): - '''Prevent instantiation; don't call this from subclasses.''' - raise NotImplementedError('attempted instantiating ' + str(type(self))) - - def tryread(self, path): - '''gracefully return an empty string for missing files''' - try: - return self.read(path) - except IOError as inst: - if inst.errno != errno.ENOENT: - raise - return "" - - def tryreadlines(self, path, mode='rb'): - '''gracefully return an empty array for missing files''' - try: - return self.readlines(path, mode=mode) - except IOError as inst: - if inst.errno != errno.ENOENT: - raise - return [] - - @util.propertycache - def open(self): - '''Open ``path`` file, which is relative to vfs root. - - Newly created directories are marked as "not to be indexed by - the content indexing service", if ``notindexed`` is specified - for "write" mode access. - ''' - return self.__call__ - - def read(self, path): - with self(path, 'rb') as fp: - return fp.read() - - def readlines(self, path, mode='rb'): - with self(path, mode=mode) as fp: - return fp.readlines() - - def write(self, path, data, backgroundclose=False): - with self(path, 'wb', backgroundclose=backgroundclose) as fp: - return fp.write(data) - - def writelines(self, path, data, mode='wb', notindexed=False): - with self(path, mode=mode, notindexed=notindexed) as fp: - return fp.writelines(data) - - def append(self, path, data): - with self(path, 'ab') as fp: - return fp.write(data) - - def basename(self, path): - """return base element of a path (as os.path.basename would do) - - This exists to allow handling of strange encoding if needed.""" - return os.path.basename(path) - - def chmod(self, path, mode): - return os.chmod(self.join(path), mode) - - def dirname(self, path): - """return dirname element of a path (as os.path.dirname would do) - - This exists to allow handling of strange encoding if needed.""" - return os.path.dirname(path) - - def exists(self, path=None): - return os.path.exists(self.join(path)) - - def fstat(self, fp): - return util.fstat(fp) - - def isdir(self, path=None): - return os.path.isdir(self.join(path)) - - def isfile(self, path=None): - return os.path.isfile(self.join(path)) - - def islink(self, path=None): - return os.path.islink(self.join(path)) - - def isfileorlink(self, path=None): - '''return whether path is a regular file or a symlink - - Unlike isfile, this doesn't follow symlinks.''' - try: - st = self.lstat(path) - except OSError: - return False - mode = st.st_mode - return stat.S_ISREG(mode) or stat.S_ISLNK(mode) - - def reljoin(self, *paths): - """join various elements of a path together (as os.path.join would do) - - The vfs base is not injected so that path stay relative. This exists - to allow handling of strange encoding if needed.""" - return os.path.join(*paths) - - def split(self, path): - """split top-most element of a path (as os.path.split would do) - - This exists to allow handling of strange encoding if needed.""" - return os.path.split(path) - - def lexists(self, path=None): - return os.path.lexists(self.join(path)) - - def lstat(self, path=None): - return os.lstat(self.join(path)) - - def listdir(self, path=None): - return os.listdir(self.join(path)) - - def makedir(self, path=None, notindexed=True): - return util.makedir(self.join(path), notindexed) - - def makedirs(self, path=None, mode=None): - return util.makedirs(self.join(path), mode) - - def makelock(self, info, path): - return util.makelock(info, self.join(path)) - - def mkdir(self, path=None): - return os.mkdir(self.join(path)) - - def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False): - fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, - dir=self.join(dir), text=text) - dname, fname = util.split(name) - if dir: - return fd, os.path.join(dir, fname) - else: - return fd, fname - - def readdir(self, path=None, stat=None, skip=None): - return osutil.listdir(self.join(path), stat, skip) - - def readlock(self, path): - return util.readlock(self.join(path)) - - def rename(self, src, dst, checkambig=False): - """Rename from src to dst - - checkambig argument is used with util.filestat, and is useful - only if destination file is guarded by any lock - (e.g. repo.lock or repo.wlock). - """ - dstpath = self.join(dst) - oldstat = checkambig and util.filestat(dstpath) - if oldstat and oldstat.stat: - ret = util.rename(self.join(src), dstpath) - newstat = util.filestat(dstpath) - if newstat.isambig(oldstat): - # stat of renamed file is ambiguous to original one - newstat.avoidambig(dstpath, oldstat) - return ret - return util.rename(self.join(src), dstpath) - - def readlink(self, path): - return os.readlink(self.join(path)) - - def removedirs(self, path=None): - """Remove a leaf directory and all empty intermediate ones - """ - return util.removedirs(self.join(path)) - - def rmtree(self, path=None, ignore_errors=False, forcibly=False): - """Remove a directory tree recursively - - If ``forcibly``, this tries to remove READ-ONLY files, too. - """ - if forcibly: - def onerror(function, path, excinfo): - if function is not os.remove: - raise - # read-only files cannot be unlinked under Windows - s = os.stat(path) - if (s.st_mode & stat.S_IWRITE) != 0: - raise - os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE) - os.remove(path) - else: - onerror = None - return shutil.rmtree(self.join(path), - ignore_errors=ignore_errors, onerror=onerror) - - def setflags(self, path, l, x): - return util.setflags(self.join(path), l, x) - - def stat(self, path=None): - return os.stat(self.join(path)) - - def unlink(self, path=None): - return util.unlink(self.join(path)) - - def unlinkpath(self, path=None, ignoremissing=False): - return util.unlinkpath(self.join(path), ignoremissing) - - def utime(self, path=None, t=None): - return os.utime(self.join(path), t) - - def walk(self, path=None, onerror=None): - """Yield (dirpath, dirs, files) tuple for each directories under path - - ``dirpath`` is relative one from the root of this vfs. This - uses ``os.sep`` as path separator, even you specify POSIX - style ``path``. - - "The root of this vfs" is represented as empty ``dirpath``. - """ - root = os.path.normpath(self.join(None)) - # when dirpath == root, dirpath[prefixlen:] becomes empty - # because len(dirpath) < prefixlen. - prefixlen = len(pathutil.normasprefix(root)) - for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror): - yield (dirpath[prefixlen:], dirs, files) - - @contextlib.contextmanager - def backgroundclosing(self, ui, expectedcount=-1): - """Allow files to be closed asynchronously. - - When this context manager is active, ``backgroundclose`` can be passed - to ``__call__``/``open`` to result in the file possibly being closed - asynchronously, on a background thread. - """ - # This is an arbitrary restriction and could be changed if we ever - # have a use case. - vfs = getattr(self, 'vfs', self) - if getattr(vfs, '_backgroundfilecloser', None): - raise error.Abort( - _('can only have 1 active background file closer')) - - with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc: - try: - vfs._backgroundfilecloser = bfc - yield bfc - finally: - vfs._backgroundfilecloser = None - -class vfs(abstractvfs): - '''Operate files relative to a base directory - - This class is used to hide the details of COW semantics and - remote file access from higher level code. - ''' - def __init__(self, base, audit=True, expandpath=False, realpath=False): - if expandpath: - base = util.expandpath(base) - if realpath: - base = os.path.realpath(base) - self.base = base - self.mustaudit = audit - self.createmode = None - self._trustnlink = None - - @property - def mustaudit(self): - return self._audit - - @mustaudit.setter - def mustaudit(self, onoff): - self._audit = onoff - if onoff: - self.audit = pathutil.pathauditor(self.base) - else: - self.audit = util.always - - @util.propertycache - def _cansymlink(self): - return util.checklink(self.base) - - @util.propertycache - def _chmod(self): - return util.checkexec(self.base) - - def _fixfilemode(self, name): - if self.createmode is None or not self._chmod: - return - os.chmod(name, self.createmode & 0o666) - - def __call__(self, path, mode="r", text=False, atomictemp=False, - notindexed=False, backgroundclose=False, checkambig=False): - '''Open ``path`` file, which is relative to vfs root. - - Newly created directories are marked as "not to be indexed by - the content indexing service", if ``notindexed`` is specified - for "write" mode access. - - If ``backgroundclose`` is passed, the file may be closed asynchronously. - It can only be used if the ``self.backgroundclosing()`` context manager - is active. This should only be specified if the following criteria hold: - - 1. There is a potential for writing thousands of files. Unless you - are writing thousands of files, the performance benefits of - asynchronously closing files is not realized. - 2. Files are opened exactly once for the ``backgroundclosing`` - active duration and are therefore free of race conditions between - closing a file on a background thread and reopening it. (If the - file were opened multiple times, there could be unflushed data - because the original file handle hasn't been flushed/closed yet.) - - ``checkambig`` argument is passed to atomictemplfile (valid - only for writing), and is useful only if target file is - guarded by any lock (e.g. repo.lock or repo.wlock). - ''' - if self._audit: - r = util.checkosfilename(path) - if r: - raise error.Abort("%s: %r" % (r, path)) - self.audit(path) - f = self.join(path) - - if not text and "b" not in mode: - mode += "b" # for that other OS - - nlink = -1 - if mode not in ('r', 'rb'): - dirname, basename = util.split(f) - # If basename is empty, then the path is malformed because it points - # to a directory. Let the posixfile() call below raise IOError. - if basename: - if atomictemp: - util.makedirs(dirname, self.createmode, notindexed) - return util.atomictempfile(f, mode, self.createmode, - checkambig=checkambig) - try: - if 'w' in mode: - util.unlink(f) - nlink = 0 - else: - # nlinks() may behave differently for files on Windows - # shares if the file is open. - with util.posixfile(f): - nlink = util.nlinks(f) - if nlink < 1: - nlink = 2 # force mktempcopy (issue1922) - except (OSError, IOError) as e: - if e.errno != errno.ENOENT: - raise - nlink = 0 - util.makedirs(dirname, self.createmode, notindexed) - if nlink > 0: - if self._trustnlink is None: - self._trustnlink = nlink > 1 or util.checknlink(f) - if nlink > 1 or not self._trustnlink: - util.rename(util.mktempcopy(f), f) - fp = util.posixfile(f, mode) - if nlink == 0: - self._fixfilemode(f) - - if checkambig: - if mode in ('r', 'rb'): - raise error.Abort(_('implementation error: mode %s is not' - ' valid for checkambig=True') % mode) - fp = checkambigatclosing(fp) - - if backgroundclose: - if not self._backgroundfilecloser: - raise error.Abort(_('backgroundclose can only be used when a ' - 'backgroundclosing context manager is active') - ) - - fp = delayclosedfile(fp, self._backgroundfilecloser) - - return fp - - def symlink(self, src, dst): - self.audit(dst) - linkname = self.join(dst) - try: - os.unlink(linkname) - except OSError: - pass - - util.makedirs(os.path.dirname(linkname), self.createmode) - - if self._cansymlink: - try: - os.symlink(src, linkname) - except OSError as err: - raise OSError(err.errno, _('could not symlink to %r: %s') % - (src, err.strerror), linkname) - else: - self.write(dst, src) - - def join(self, path, *insidef): - if path: - return os.path.join(self.base, path, *insidef) - else: - return self.base - -opener = vfs - -class auditvfs(object): - def __init__(self, vfs): - self.vfs = vfs - - @property - def mustaudit(self): - return self.vfs.mustaudit - - @mustaudit.setter - def mustaudit(self, onoff): - self.vfs.mustaudit = onoff - - @property - def options(self): - return self.vfs.options - - @options.setter - def options(self, value): - self.vfs.options = value - -class filtervfs(abstractvfs, auditvfs): - '''Wrapper vfs for filtering filenames with a function.''' - - def __init__(self, vfs, filter): - auditvfs.__init__(self, vfs) - self._filter = filter - - def __call__(self, path, *args, **kwargs): - return self.vfs(self._filter(path), *args, **kwargs) - - def join(self, path, *insidef): - if path: - return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef))) - else: - return self.vfs.join(path) - -filteropener = filtervfs - -class readonlyvfs(abstractvfs, auditvfs): - '''Wrapper vfs preventing any writing.''' - - def __init__(self, vfs): - auditvfs.__init__(self, vfs) - - def __call__(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rb'): - raise error.Abort(_('this vfs is read only')) - return self.vfs(path, mode, *args, **kw) - - def join(self, path, *insidef): - return self.vfs.join(path, *insidef) +# compatibility layer since all 'vfs' code moved to 'mercurial.vfs' +# +# This is hard to instal deprecation warning to this since we do not have +# access to a 'ui' object. +opener = vfs = vfsmod.vfs +filteropener = filtervfs = vfsmod.filtervfs +abstractvfs = vfsmod.abstractvfs +readonlyvfs = vfsmod.readonlyvfs +auditvfs = vfsmod.auditvfs +checkambigatclosing = vfsmod.checkambigatclosing def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): '''yield every hg repository under path, always recursively. @@ -890,7 +447,7 @@ return repo[l.last()] def _pairspec(revspec): - tree = revset.parse(revspec) + tree = revsetlang.parse(revspec) return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') def revpair(repo, revs): @@ -936,7 +493,7 @@ revision numbers. It is assumed the revsets are already formatted. If you have arguments - that need to be expanded in the revset, call ``revset.formatspec()`` + that need to be expanded in the revset, call ``revsetlang.formatspec()`` and pass the result as an element of ``specs``. Specifying a single revset is allowed. @@ -947,10 +504,9 @@ allspecs = [] for spec in specs: if isinstance(spec, int): - spec = revset.formatspec('rev(%d)', spec) + spec = revsetlang.formatspec('rev(%d)', spec) allspecs.append(spec) - m = revset.matchany(repo.ui, allspecs, repo) - return m(repo) + return repo.anyrevs(allspecs, user=True) def meaningfulparents(repo, ctx): """Return list of meaningful (or all if debug) parentrevs for rev. @@ -1325,7 +881,7 @@ function to call the appropriate join function on 'obj' (an instance of the class that its member function was decorated). """ - return obj.join(fname) + raise NotImplementedError def __call__(self, func): self.func = func @@ -1409,165 +965,3 @@ """ # experimental config: format.generaldelta return ui.configbool('format', 'generaldelta', False) - -class closewrapbase(object): - """Base class of wrapper, which hooks closing - - Do not instantiate outside of the vfs layer. - """ - def __init__(self, fh): - object.__setattr__(self, '_origfh', fh) - - def __getattr__(self, attr): - return getattr(self._origfh, attr) - - def __setattr__(self, attr, value): - return setattr(self._origfh, attr, value) - - def __delattr__(self, attr): - return delattr(self._origfh, attr) - - def __enter__(self): - return self._origfh.__enter__() - - def __exit__(self, exc_type, exc_value, exc_tb): - raise NotImplementedError('attempted instantiating ' + str(type(self))) - - def close(self): - raise NotImplementedError('attempted instantiating ' + str(type(self))) - -class delayclosedfile(closewrapbase): - """Proxy for a file object whose close is delayed. - - Do not instantiate outside of the vfs layer. - """ - def __init__(self, fh, closer): - super(delayclosedfile, self).__init__(fh) - object.__setattr__(self, '_closer', closer) - - def __exit__(self, exc_type, exc_value, exc_tb): - self._closer.close(self._origfh) - - def close(self): - self._closer.close(self._origfh) - -class backgroundfilecloser(object): - """Coordinates background closing of file handles on multiple threads.""" - def __init__(self, ui, expectedcount=-1): - self._running = False - self._entered = False - self._threads = [] - self._threadexception = None - - # Only Windows/NTFS has slow file closing. So only enable by default - # on that platform. But allow to be enabled elsewhere for testing. - defaultenabled = pycompat.osname == 'nt' - enabled = ui.configbool('worker', 'backgroundclose', defaultenabled) - - if not enabled: - return - - # There is overhead to starting and stopping the background threads. - # Don't do background processing unless the file count is large enough - # to justify it. - minfilecount = ui.configint('worker', 'backgroundcloseminfilecount', - 2048) - # FUTURE dynamically start background threads after minfilecount closes. - # (We don't currently have any callers that don't know their file count) - if expectedcount > 0 and expectedcount < minfilecount: - return - - # Windows defaults to a limit of 512 open files. A buffer of 128 - # should give us enough headway. - maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384) - threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4) - - ui.debug('starting %d threads for background file closing\n' % - threadcount) - - self._queue = util.queue(maxsize=maxqueue) - self._running = True - - for i in range(threadcount): - t = threading.Thread(target=self._worker, name='backgroundcloser') - self._threads.append(t) - t.start() - - def __enter__(self): - self._entered = True - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - self._running = False - - # Wait for threads to finish closing so open files don't linger for - # longer than lifetime of context manager. - for t in self._threads: - t.join() - - def _worker(self): - """Main routine for worker thread.""" - while True: - try: - fh = self._queue.get(block=True, timeout=0.100) - # Need to catch or the thread will terminate and - # we could orphan file descriptors. - try: - fh.close() - except Exception as e: - # Stash so can re-raise from main thread later. - self._threadexception = e - except util.empty: - if not self._running: - break - - def close(self, fh): - """Schedule a file for closing.""" - if not self._entered: - raise error.Abort(_('can only call close() when context manager ' - 'active')) - - # If a background thread encountered an exception, raise now so we fail - # fast. Otherwise we may potentially go on for minutes until the error - # is acted on. - if self._threadexception: - e = self._threadexception - self._threadexception = None - raise e - - # If we're not actively running, close synchronously. - if not self._running: - fh.close() - return - - self._queue.put(fh, block=True, timeout=None) - -class checkambigatclosing(closewrapbase): - """Proxy for a file object, to avoid ambiguity of file stat - - See also util.filestat for detail about "ambiguity of file stat". - - This proxy is useful only if the target file is guarded by any - lock (e.g. repo.lock or repo.wlock) - - Do not instantiate outside of the vfs layer. - """ - def __init__(self, fh): - super(checkambigatclosing, self).__init__(fh) - object.__setattr__(self, '_oldstat', util.filestat(fh.name)) - - def _checkambig(self): - oldstat = self._oldstat - if oldstat.stat: - newstat = util.filestat(self._origfh.name) - if newstat.isambig(oldstat): - # stat of changed file is ambiguous to original one - newstat.avoidambig(self._origfh.name, oldstat) - - def __exit__(self, exc_type, exc_value, exc_tb): - self._origfh.__exit__(exc_type, exc_value, exc_tb) - self._checkambig() - - def close(self): - self._origfh.close() - self._checkambig()
--- a/mercurial/similar.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/similar.py Sat Mar 11 13:53:14 2017 -0500 @@ -35,9 +35,13 @@ for i, fctx in enumerate(added): repo.ui.progress(_('searching for exact renames'), i + len(removed), total=numfiles, unit=_('files')) - h = hashlib.sha1(fctx.data()).digest() + adata = fctx.data() + h = hashlib.sha1(adata).digest() if h in hashes: - yield (hashes[h], fctx) + rfctx = hashes[h] + # compare between actual file contents for exact identity + if adata == rfctx.data(): + yield (rfctx, fctx) # Done repo.ui.progress(_('searching for exact renames'), None)
--- a/mercurial/simplemerge.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/simplemerge.py Sat Mar 11 13:53:14 2017 -0500 @@ -24,8 +24,8 @@ from . import ( error, mdiff, - scmutil, util, + vfs as vfsmod, ) class CantReprocessAndShowBase(Exception): @@ -437,7 +437,7 @@ local = os.path.realpath(local) if not opts.get('print'): - opener = scmutil.opener(os.path.dirname(local)) + opener = vfsmod.vfs(os.path.dirname(local)) out = opener(os.path.basename(local), "w", atomictemp=True) else: out = ui.fout
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/smartset.py Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,1054 @@ +# smartset.py - data structure for revision set +# +# Copyright 2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from . import ( + util, +) + +def _formatsetrepr(r): + """Format an optional printable representation of a set + + ======== ================================= + type(r) example + ======== ================================= + tuple ('<not %r>', other) + str '<branch closed>' + callable lambda: '<branch %r>' % sorted(b) + object other + ======== ================================= + """ + if r is None: + return '' + elif isinstance(r, tuple): + return r[0] % r[1:] + elif isinstance(r, str): + return r + elif callable(r): + return r() + else: + return repr(r) + +class abstractsmartset(object): + + def __nonzero__(self): + """True if the smartset is not empty""" + raise NotImplementedError() + + def __contains__(self, rev): + """provide fast membership testing""" + raise NotImplementedError() + + def __iter__(self): + """iterate the set in the order it is supposed to be iterated""" + raise NotImplementedError() + + # Attributes containing a function to perform a fast iteration in a given + # direction. A smartset can have none, one, or both defined. + # + # Default value is None instead of a function returning None to avoid + # initializing an iterator just for testing if a fast method exists. + fastasc = None + fastdesc = None + + def isascending(self): + """True if the set will iterate in ascending order""" + raise NotImplementedError() + + def isdescending(self): + """True if the set will iterate in descending order""" + raise NotImplementedError() + + def istopo(self): + """True if the set will iterate in topographical order""" + raise NotImplementedError() + + def min(self): + """return the minimum element in the set""" + if self.fastasc is None: + v = min(self) + else: + for v in self.fastasc(): + break + else: + raise ValueError('arg is an empty sequence') + self.min = lambda: v + return v + + def max(self): + """return the maximum element in the set""" + if self.fastdesc is None: + return max(self) + else: + for v in self.fastdesc(): + break + else: + raise ValueError('arg is an empty sequence') + self.max = lambda: v + return v + + def first(self): + """return the first element in the set (user iteration perspective) + + Return None if the set is empty""" + raise NotImplementedError() + + def last(self): + """return the last element in the set (user iteration perspective) + + Return None if the set is empty""" + raise NotImplementedError() + + def __len__(self): + """return the length of the smartsets + + This can be expensive on smartset that could be lazy otherwise.""" + raise NotImplementedError() + + def reverse(self): + """reverse the expected iteration order""" + raise NotImplementedError() + + def sort(self, reverse=True): + """get the set to iterate in an ascending or descending order""" + raise NotImplementedError() + + def __and__(self, other): + """Returns a new object with the intersection of the two collections. + + This is part of the mandatory API for smartset.""" + if isinstance(other, fullreposet): + return self + return self.filter(other.__contains__, condrepr=other, cache=False) + + def __add__(self, other): + """Returns a new object with the union of the two collections. + + This is part of the mandatory API for smartset.""" + return addset(self, other) + + def __sub__(self, other): + """Returns a new object with the substraction of the two collections. + + This is part of the mandatory API for smartset.""" + c = other.__contains__ + return self.filter(lambda r: not c(r), condrepr=('<not %r>', other), + cache=False) + + def filter(self, condition, condrepr=None, cache=True): + """Returns this smartset filtered by condition as a new smartset. + + `condition` is a callable which takes a revision number and returns a + boolean. Optional `condrepr` provides a printable representation of + the given `condition`. + + This is part of the mandatory API for smartset.""" + # builtin cannot be cached. but do not needs to + if cache and util.safehasattr(condition, 'func_code'): + condition = util.cachefunc(condition) + return filteredset(self, condition, condrepr) + +class baseset(abstractsmartset): + """Basic data structure that represents a revset and contains the basic + operation that it should be able to perform. + + Every method in this class should be implemented by any smartset class. + + This class could be constructed by an (unordered) set, or an (ordered) + list-like object. If a set is provided, it'll be sorted lazily. + + >>> x = [4, 0, 7, 6] + >>> y = [5, 6, 7, 3] + + Construct by a set: + >>> xs = baseset(set(x)) + >>> ys = baseset(set(y)) + >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] + [[0, 4, 6, 7, 3, 5], [6, 7], [0, 4]] + >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] + ['addset', 'baseset', 'baseset'] + + Construct by a list-like: + >>> xs = baseset(x) + >>> ys = baseset(i for i in y) + >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] + [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]] + >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] + ['addset', 'filteredset', 'filteredset'] + + Populate "_set" fields in the lists so set optimization may be used: + >>> [1 in xs, 3 in ys] + [False, True] + + Without sort(), results won't be changed: + >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] + [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]] + >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] + ['addset', 'filteredset', 'filteredset'] + + With sort(), set optimization could be used: + >>> xs.sort(reverse=True) + >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] + [[7, 6, 4, 0, 5, 3], [7, 6], [4, 0]] + >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] + ['addset', 'baseset', 'baseset'] + + >>> ys.sort() + >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]] + [[7, 6, 4, 0, 3, 5], [7, 6], [4, 0]] + >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]] + ['addset', 'baseset', 'baseset'] + + istopo is preserved across set operations + >>> xs = baseset(set(x), istopo=True) + >>> rs = xs & ys + >>> type(rs).__name__ + 'baseset' + >>> rs._istopo + True + """ + def __init__(self, data=(), datarepr=None, istopo=False): + """ + datarepr: a tuple of (format, obj, ...), a function or an object that + provides a printable representation of the given data. + """ + self._ascending = None + self._istopo = istopo + if isinstance(data, set): + # converting set to list has a cost, do it lazily + self._set = data + # set has no order we pick one for stability purpose + self._ascending = True + else: + if not isinstance(data, list): + data = list(data) + self._list = data + self._datarepr = datarepr + + @util.propertycache + def _set(self): + return set(self._list) + + @util.propertycache + def _asclist(self): + asclist = self._list[:] + asclist.sort() + return asclist + + @util.propertycache + def _list(self): + # _list is only lazily constructed if we have _set + assert '_set' in self.__dict__ + return list(self._set) + + def __iter__(self): + if self._ascending is None: + return iter(self._list) + elif self._ascending: + return iter(self._asclist) + else: + return reversed(self._asclist) + + def fastasc(self): + return iter(self._asclist) + + def fastdesc(self): + return reversed(self._asclist) + + @util.propertycache + def __contains__(self): + return self._set.__contains__ + + def __nonzero__(self): + return bool(len(self)) + + def sort(self, reverse=False): + self._ascending = not bool(reverse) + self._istopo = False + + def reverse(self): + if self._ascending is None: + self._list.reverse() + else: + self._ascending = not self._ascending + self._istopo = False + + def __len__(self): + if '_list' in self.__dict__: + return len(self._list) + else: + return len(self._set) + + def isascending(self): + """Returns True if the collection is ascending order, False if not. + + This is part of the mandatory API for smartset.""" + if len(self) <= 1: + return True + return self._ascending is not None and self._ascending + + def isdescending(self): + """Returns True if the collection is descending order, False if not. + + This is part of the mandatory API for smartset.""" + if len(self) <= 1: + return True + return self._ascending is not None and not self._ascending + + def istopo(self): + """Is the collection is in topographical order or not. + + This is part of the mandatory API for smartset.""" + if len(self) <= 1: + return True + return self._istopo + + def first(self): + if self: + if self._ascending is None: + return self._list[0] + elif self._ascending: + return self._asclist[0] + else: + return self._asclist[-1] + return None + + def last(self): + if self: + if self._ascending is None: + return self._list[-1] + elif self._ascending: + return self._asclist[-1] + else: + return self._asclist[0] + return None + + def _fastsetop(self, other, op): + # try to use native set operations as fast paths + if (type(other) is baseset and '_set' in other.__dict__ and '_set' in + self.__dict__ and self._ascending is not None): + s = baseset(data=getattr(self._set, op)(other._set), + istopo=self._istopo) + s._ascending = self._ascending + else: + s = getattr(super(baseset, self), op)(other) + return s + + def __and__(self, other): + return self._fastsetop(other, '__and__') + + def __sub__(self, other): + return self._fastsetop(other, '__sub__') + + def __repr__(self): + d = {None: '', False: '-', True: '+'}[self._ascending] + s = _formatsetrepr(self._datarepr) + if not s: + l = self._list + # if _list has been built from a set, it might have a different + # order from one python implementation to another. + # We fallback to the sorted version for a stable output. + if self._ascending is not None: + l = self._asclist + s = repr(l) + return '<%s%s %s>' % (type(self).__name__, d, s) + +class filteredset(abstractsmartset): + """Duck type for baseset class which iterates lazily over the revisions in + the subset and contains a function which tests for membership in the + revset + """ + def __init__(self, subset, condition=lambda x: True, condrepr=None): + """ + condition: a function that decide whether a revision in the subset + belongs to the revset or not. + condrepr: a tuple of (format, obj, ...), a function or an object that + provides a printable representation of the given condition. + """ + self._subset = subset + self._condition = condition + self._condrepr = condrepr + + def __contains__(self, x): + return x in self._subset and self._condition(x) + + def __iter__(self): + return self._iterfilter(self._subset) + + def _iterfilter(self, it): + cond = self._condition + for x in it: + if cond(x): + yield x + + @property + def fastasc(self): + it = self._subset.fastasc + if it is None: + return None + return lambda: self._iterfilter(it()) + + @property + def fastdesc(self): + it = self._subset.fastdesc + if it is None: + return None + return lambda: self._iterfilter(it()) + + def __nonzero__(self): + fast = None + candidates = [self.fastasc if self.isascending() else None, + self.fastdesc if self.isdescending() else None, + self.fastasc, + self.fastdesc] + for candidate in candidates: + if candidate is not None: + fast = candidate + break + + if fast is not None: + it = fast() + else: + it = self + + for r in it: + return True + return False + + def __len__(self): + # Basic implementation to be changed in future patches. + # until this gets improved, we use generator expression + # here, since list comprehensions are free to call __len__ again + # causing infinite recursion + l = baseset(r for r in self) + return len(l) + + def sort(self, reverse=False): + self._subset.sort(reverse=reverse) + + def reverse(self): + self._subset.reverse() + + def isascending(self): + return self._subset.isascending() + + def isdescending(self): + return self._subset.isdescending() + + def istopo(self): + return self._subset.istopo() + + def first(self): + for x in self: + return x + return None + + def last(self): + it = None + if self.isascending(): + it = self.fastdesc + elif self.isdescending(): + it = self.fastasc + if it is not None: + for x in it(): + return x + return None #empty case + else: + x = None + for x in self: + pass + return x + + def __repr__(self): + xs = [repr(self._subset)] + s = _formatsetrepr(self._condrepr) + if s: + xs.append(s) + return '<%s %s>' % (type(self).__name__, ', '.join(xs)) + +def _iterordered(ascending, iter1, iter2): + """produce an ordered iteration from two iterators with the same order + + The ascending is used to indicated the iteration direction. + """ + choice = max + if ascending: + choice = min + + val1 = None + val2 = None + try: + # Consume both iterators in an ordered way until one is empty + while True: + if val1 is None: + val1 = next(iter1) + if val2 is None: + val2 = next(iter2) + n = choice(val1, val2) + yield n + if val1 == n: + val1 = None + if val2 == n: + val2 = None + except StopIteration: + # Flush any remaining values and consume the other one + it = iter2 + if val1 is not None: + yield val1 + it = iter1 + elif val2 is not None: + # might have been equality and both are empty + yield val2 + for val in it: + yield val + +class addset(abstractsmartset): + """Represent the addition of two sets + + Wrapper structure for lazily adding two structures without losing much + performance on the __contains__ method + + If the ascending attribute is set, that means the two structures are + ordered in either an ascending or descending way. Therefore, we can add + them maintaining the order by iterating over both at the same time + + >>> xs = baseset([0, 3, 2]) + >>> ys = baseset([5, 2, 4]) + + >>> rs = addset(xs, ys) + >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last() + (True, True, False, True, 0, 4) + >>> rs = addset(xs, baseset([])) + >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last() + (True, True, False, 0, 2) + >>> rs = addset(baseset([]), baseset([])) + >>> bool(rs), 0 in rs, rs.first(), rs.last() + (False, False, None, None) + + iterate unsorted: + >>> rs = addset(xs, ys) + >>> # (use generator because pypy could call len()) + >>> list(x for x in rs) # without _genlist + [0, 3, 2, 5, 4] + >>> assert not rs._genlist + >>> len(rs) + 5 + >>> [x for x in rs] # with _genlist + [0, 3, 2, 5, 4] + >>> assert rs._genlist + + iterate ascending: + >>> rs = addset(xs, ys, ascending=True) + >>> # (use generator because pypy could call len()) + >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist + ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) + >>> assert not rs._asclist + >>> len(rs) + 5 + >>> [x for x in rs], [x for x in rs.fastasc()] + ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5]) + >>> assert rs._asclist + + iterate descending: + >>> rs = addset(xs, ys, ascending=False) + >>> # (use generator because pypy could call len()) + >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist + ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) + >>> assert not rs._asclist + >>> len(rs) + 5 + >>> [x for x in rs], [x for x in rs.fastdesc()] + ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0]) + >>> assert rs._asclist + + iterate ascending without fastasc: + >>> rs = addset(xs, generatorset(ys), ascending=True) + >>> assert rs.fastasc is None + >>> [x for x in rs] + [0, 2, 3, 4, 5] + + iterate descending without fastdesc: + >>> rs = addset(generatorset(xs), ys, ascending=False) + >>> assert rs.fastdesc is None + >>> [x for x in rs] + [5, 4, 3, 2, 0] + """ + def __init__(self, revs1, revs2, ascending=None): + self._r1 = revs1 + self._r2 = revs2 + self._iter = None + self._ascending = ascending + self._genlist = None + self._asclist = None + + def __len__(self): + return len(self._list) + + def __nonzero__(self): + return bool(self._r1) or bool(self._r2) + + @util.propertycache + def _list(self): + if not self._genlist: + self._genlist = baseset(iter(self)) + return self._genlist + + def __iter__(self): + """Iterate over both collections without repeating elements + + If the ascending attribute is not set, iterate over the first one and + then over the second one checking for membership on the first one so we + dont yield any duplicates. + + If the ascending attribute is set, iterate over both collections at the + same time, yielding only one value at a time in the given order. + """ + if self._ascending is None: + if self._genlist: + return iter(self._genlist) + def arbitraryordergen(): + for r in self._r1: + yield r + inr1 = self._r1.__contains__ + for r in self._r2: + if not inr1(r): + yield r + return arbitraryordergen() + # try to use our own fast iterator if it exists + self._trysetasclist() + if self._ascending: + attr = 'fastasc' + else: + attr = 'fastdesc' + it = getattr(self, attr) + if it is not None: + return it() + # maybe half of the component supports fast + # get iterator for _r1 + iter1 = getattr(self._r1, attr) + if iter1 is None: + # let's avoid side effect (not sure it matters) + iter1 = iter(sorted(self._r1, reverse=not self._ascending)) + else: + iter1 = iter1() + # get iterator for _r2 + iter2 = getattr(self._r2, attr) + if iter2 is None: + # let's avoid side effect (not sure it matters) + iter2 = iter(sorted(self._r2, reverse=not self._ascending)) + else: + iter2 = iter2() + return _iterordered(self._ascending, iter1, iter2) + + def _trysetasclist(self): + """populate the _asclist attribute if possible and necessary""" + if self._genlist is not None and self._asclist is None: + self._asclist = sorted(self._genlist) + + @property + def fastasc(self): + self._trysetasclist() + if self._asclist is not None: + return self._asclist.__iter__ + iter1 = self._r1.fastasc + iter2 = self._r2.fastasc + if None in (iter1, iter2): + return None + return lambda: _iterordered(True, iter1(), iter2()) + + @property + def fastdesc(self): + self._trysetasclist() + if self._asclist is not None: + return self._asclist.__reversed__ + iter1 = self._r1.fastdesc + iter2 = self._r2.fastdesc + if None in (iter1, iter2): + return None + return lambda: _iterordered(False, iter1(), iter2()) + + def __contains__(self, x): + return x in self._r1 or x in self._r2 + + def sort(self, reverse=False): + """Sort the added set + + For this we use the cached list with all the generated values and if we + know they are ascending or descending we can sort them in a smart way. + """ + self._ascending = not reverse + + def isascending(self): + return self._ascending is not None and self._ascending + + def isdescending(self): + return self._ascending is not None and not self._ascending + + def istopo(self): + # not worth the trouble asserting if the two sets combined are still + # in topographical order. Use the sort() predicate to explicitly sort + # again instead. + return False + + def reverse(self): + if self._ascending is None: + self._list.reverse() + else: + self._ascending = not self._ascending + + def first(self): + for x in self: + return x + return None + + def last(self): + self.reverse() + val = self.first() + self.reverse() + return val + + def __repr__(self): + d = {None: '', False: '-', True: '+'}[self._ascending] + return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2) + +class generatorset(abstractsmartset): + """Wrap a generator for lazy iteration + + Wrapper structure for generators that provides lazy membership and can + be iterated more than once. + When asked for membership it generates values until either it finds the + requested one or has gone through all the elements in the generator + """ + def __init__(self, gen, iterasc=None): + """ + gen: a generator producing the values for the generatorset. + """ + self._gen = gen + self._asclist = None + self._cache = {} + self._genlist = [] + self._finished = False + self._ascending = True + if iterasc is not None: + if iterasc: + self.fastasc = self._iterator + self.__contains__ = self._asccontains + else: + self.fastdesc = self._iterator + self.__contains__ = self._desccontains + + def __nonzero__(self): + # Do not use 'for r in self' because it will enforce the iteration + # order (default ascending), possibly unrolling a whole descending + # iterator. + if self._genlist: + return True + for r in self._consumegen(): + return True + return False + + def __contains__(self, x): + if x in self._cache: + return self._cache[x] + + # Use new values only, as existing values would be cached. + for l in self._consumegen(): + if l == x: + return True + + self._cache[x] = False + return False + + def _asccontains(self, x): + """version of contains optimised for ascending generator""" + if x in self._cache: + return self._cache[x] + + # Use new values only, as existing values would be cached. + for l in self._consumegen(): + if l == x: + return True + if l > x: + break + + self._cache[x] = False + return False + + def _desccontains(self, x): + """version of contains optimised for descending generator""" + if x in self._cache: + return self._cache[x] + + # Use new values only, as existing values would be cached. + for l in self._consumegen(): + if l == x: + return True + if l < x: + break + + self._cache[x] = False + return False + + def __iter__(self): + if self._ascending: + it = self.fastasc + else: + it = self.fastdesc + if it is not None: + return it() + # we need to consume the iterator + for x in self._consumegen(): + pass + # recall the same code + return iter(self) + + def _iterator(self): + if self._finished: + return iter(self._genlist) + + # We have to use this complex iteration strategy to allow multiple + # iterations at the same time. We need to be able to catch revision + # removed from _consumegen and added to genlist in another instance. + # + # Getting rid of it would provide an about 15% speed up on this + # iteration. + genlist = self._genlist + nextrev = self._consumegen().next + _len = len # cache global lookup + def gen(): + i = 0 + while True: + if i < _len(genlist): + yield genlist[i] + else: + yield nextrev() + i += 1 + return gen() + + def _consumegen(self): + cache = self._cache + genlist = self._genlist.append + for item in self._gen: + cache[item] = True + genlist(item) + yield item + if not self._finished: + self._finished = True + asc = self._genlist[:] + asc.sort() + self._asclist = asc + self.fastasc = asc.__iter__ + self.fastdesc = asc.__reversed__ + + def __len__(self): + for x in self._consumegen(): + pass + return len(self._genlist) + + def sort(self, reverse=False): + self._ascending = not reverse + + def reverse(self): + self._ascending = not self._ascending + + def isascending(self): + return self._ascending + + def isdescending(self): + return not self._ascending + + def istopo(self): + # not worth the trouble asserting if the two sets combined are still + # in topographical order. Use the sort() predicate to explicitly sort + # again instead. + return False + + def first(self): + if self._ascending: + it = self.fastasc + else: + it = self.fastdesc + if it is None: + # we need to consume all and try again + for x in self._consumegen(): + pass + return self.first() + return next(it(), None) + + def last(self): + if self._ascending: + it = self.fastdesc + else: + it = self.fastasc + if it is None: + # we need to consume all and try again + for x in self._consumegen(): + pass + return self.first() + return next(it(), None) + + def __repr__(self): + d = {False: '-', True: '+'}[self._ascending] + return '<%s%s>' % (type(self).__name__, d) + +class spanset(abstractsmartset): + """Duck type for baseset class which represents a range of revisions and + can work lazily and without having all the range in memory + + Note that spanset(x, y) behave almost like xrange(x, y) except for two + notable points: + - when x < y it will be automatically descending, + - revision filtered with this repoview will be skipped. + + """ + def __init__(self, repo, start=0, end=None): + """ + start: first revision included the set + (default to 0) + end: first revision excluded (last+1) + (default to len(repo) + + Spanset will be descending if `end` < `start`. + """ + if end is None: + end = len(repo) + self._ascending = start <= end + if not self._ascending: + start, end = end + 1, start +1 + self._start = start + self._end = end + self._hiddenrevs = repo.changelog.filteredrevs + + def sort(self, reverse=False): + self._ascending = not reverse + + def reverse(self): + self._ascending = not self._ascending + + def istopo(self): + # not worth the trouble asserting if the two sets combined are still + # in topographical order. Use the sort() predicate to explicitly sort + # again instead. + return False + + def _iterfilter(self, iterrange): + s = self._hiddenrevs + for r in iterrange: + if r not in s: + yield r + + def __iter__(self): + if self._ascending: + return self.fastasc() + else: + return self.fastdesc() + + def fastasc(self): + iterrange = xrange(self._start, self._end) + if self._hiddenrevs: + return self._iterfilter(iterrange) + return iter(iterrange) + + def fastdesc(self): + iterrange = xrange(self._end - 1, self._start - 1, -1) + if self._hiddenrevs: + return self._iterfilter(iterrange) + return iter(iterrange) + + def __contains__(self, rev): + hidden = self._hiddenrevs + return ((self._start <= rev < self._end) + and not (hidden and rev in hidden)) + + def __nonzero__(self): + for r in self: + return True + return False + + def __len__(self): + if not self._hiddenrevs: + return abs(self._end - self._start) + else: + count = 0 + start = self._start + end = self._end + for rev in self._hiddenrevs: + if (end < rev <= start) or (start <= rev < end): + count += 1 + return abs(self._end - self._start) - count + + def isascending(self): + return self._ascending + + def isdescending(self): + return not self._ascending + + def first(self): + if self._ascending: + it = self.fastasc + else: + it = self.fastdesc + for x in it(): + return x + return None + + def last(self): + if self._ascending: + it = self.fastdesc + else: + it = self.fastasc + for x in it(): + return x + return None + + def __repr__(self): + d = {False: '-', True: '+'}[self._ascending] + return '<%s%s %d:%d>' % (type(self).__name__, d, + self._start, self._end - 1) + +class fullreposet(spanset): + """a set containing all revisions in the repo + + This class exists to host special optimization and magic to handle virtual + revisions such as "null". + """ + + def __init__(self, repo): + super(fullreposet, self).__init__(repo) + + def __and__(self, other): + """As self contains the whole repo, all of the other set should also be + in self. Therefore `self & other = other`. + + This boldly assumes the other contains valid revs only. + """ + # other not a smartset, make is so + if not util.safehasattr(other, 'isascending'): + # filter out hidden revision + # (this boldly assumes all smartset are pure) + # + # `other` was used with "&", let's assume this is a set like + # object. + other = baseset(other - self._hiddenrevs) + + other.sort(reverse=self.isdescending()) + return other + +def prettyformat(revs): + lines = [] + rs = repr(revs) + p = 0 + while p < len(rs): + q = rs.find('<', p + 1) + if q < 0: + q = len(rs) + l = rs.count('<', 0, p) - rs.count('>', 0, p) + assert l >= 0 + lines.append((l, rs[p:q].rstrip())) + p = q + return '\n'.join(' ' * l + s for l, s in lines)
--- a/mercurial/sshpeer.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/sshpeer.py Sat Mar 11 13:53:14 2017 -0500 @@ -150,7 +150,7 @@ util.shellquote("%s init %s" % (_serverquote(remotecmd), _serverquote(self.path)))) ui.debug('running %s\n' % cmd) - res = ui.system(cmd) + res = ui.system(cmd, blockedtag='sshpeer') if res != 0: self._abort(error.RepoError(_("could not create remote repo")))
--- a/mercurial/sslutil.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/sslutil.py Sat Mar 11 13:53:14 2017 -0500 @@ -720,7 +720,8 @@ # to load the system CA store. If we're running on Apple Python, use this # trick. if _plainapplepython(): - dummycert = os.path.join(os.path.dirname(__file__), 'dummycert.pem') + dummycert = os.path.join( + os.path.dirname(pycompat.fsencode(__file__)), 'dummycert.pem') if os.path.exists(dummycert): return dummycert @@ -814,6 +815,16 @@ if peerfingerprints[hash].lower() == fingerprint: ui.debug('%s certificate matched fingerprint %s:%s\n' % (host, hash, fmtfingerprint(fingerprint))) + if settings['legacyfingerprint']: + ui.warn(_('(SHA-1 fingerprint for %s found in legacy ' + '[hostfingerprints] section; ' + 'if you trust this fingerprint, set the ' + 'following config value in [hostsecurity] and ' + 'remove the old one from [hostfingerprints] ' + 'to upgrade to a more secure SHA-256 ' + 'fingerprint: ' + '%s.fingerprints=%s)\n') % ( + host, host, nicefingerprint)) return # Pinned fingerprint didn't match. This is a fatal error.
--- a/mercurial/statichttprepo.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/statichttprepo.py Sat Mar 11 13:53:14 2017 -0500 @@ -24,6 +24,7 @@ store, url, util, + vfs as vfsmod, ) urlerr = util.urlerr @@ -86,7 +87,7 @@ urlopener = url.opener(ui, authinfo) urlopener.add_handler(byterange.HTTPRangeHandler()) - class statichttpvfs(scmutil.abstractvfs): + class statichttpvfs(vfsmod.abstractvfs): def __init__(self, base): self.base = base @@ -121,9 +122,8 @@ u = util.url(path.rstrip('/') + "/.hg") self.path, authinfo = u.authinfo() - opener = build_opener(ui, authinfo) - self.opener = opener(self.path) - self.vfs = self.opener + vfsclass = build_opener(ui, authinfo) + self.vfs = vfsclass(self.path) self._phasedefaults = [] self.names = namespaces.namespaces() @@ -148,7 +148,7 @@ raise error.RepoError(msg) # setup store - self.store = store.store(requirements, self.path, opener) + self.store = store.store(requirements, self.path, vfsclass) self.spath = self.store.path self.svfs = self.store.opener self.sjoin = self.store.join
--- a/mercurial/statprof.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/statprof.py Sat Mar 11 13:53:14 2017 -0500 @@ -433,6 +433,7 @@ Hotpath = 3 FlameGraph = 4 Json = 5 + Chrome = 6 def display(fp=None, format=3, data=None, **kwargs): '''Print statistics, either to stdout or the given file object.''' @@ -457,10 +458,12 @@ write_to_flame(data, fp, **kwargs) elif format == DisplayFormats.Json: write_to_json(data, fp) + elif format == DisplayFormats.Chrome: + write_to_chrome(data, fp, **kwargs) else: raise Exception("Invalid display format") - if format != DisplayFormats.Json: + if format not in (DisplayFormats.Json, DisplayFormats.Chrome): print('---', file=fp) print('Sample count: %d' % len(data.samples), file=fp) print('Total time: %f seconds' % data.accumulated_time, file=fp) @@ -713,6 +716,23 @@ os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile)) print("Written to %s" % outputfile, file=fp) +_pathcache = {} +def simplifypath(path): + '''Attempt to make the path to a Python module easier to read by + removing whatever part of the Python search path it was found + on.''' + + if path in _pathcache: + return _pathcache[path] + hgpath = pycompat.fsencode(encoding.__file__).rsplit(os.sep, 2)[0] + for p in [hgpath] + sys.path: + prefix = p + os.sep + if path.startswith(prefix): + path = path[len(prefix):] + break + _pathcache[path] = path + return path + def write_to_json(data, fp): samples = [] @@ -726,6 +746,102 @@ print(json.dumps(samples), file=fp) +def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999): + samples = [] + laststack = collections.deque() + lastseen = collections.deque() + + # The Chrome tracing format allows us to use a compact stack + # representation to save space. It's fiddly but worth it. + # We maintain a bijection between stack and ID. + stack2id = {} + id2stack = [] # will eventually be rendered + + def stackid(stack): + if not stack: + return + if stack in stack2id: + return stack2id[stack] + parent = stackid(stack[1:]) + myid = len(stack2id) + stack2id[stack] = myid + id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0])) + if parent is not None: + id2stack[-1].update(parent=parent) + return myid + + def endswith(a, b): + return list(a)[-len(b):] == list(b) + + # The sampling profiler can sample multiple times without + # advancing the clock, potentially causing the Chrome trace viewer + # to render single-pixel columns that we cannot zoom in on. We + # work around this by pretending that zero-duration samples are a + # millisecond in length. + + clamp = 0.001 + + # We provide knobs that by default attempt to filter out stack + # frames that are too noisy: + # + # * A few take almost all execution time. These are usually boring + # setup functions, giving a stack that is deep but uninformative. + # + # * Numerous samples take almost no time, but introduce lots of + # noisy, oft-deep "spines" into a rendered profile. + + blacklist = set() + totaltime = data.samples[-1].time - data.samples[0].time + minthreshold = totaltime * minthreshold + maxthreshold = max(totaltime * maxthreshold, clamp) + + def poplast(): + oldsid = stackid(tuple(laststack)) + oldcat, oldfunc = laststack.popleft() + oldtime, oldidx = lastseen.popleft() + duration = sample.time - oldtime + if minthreshold <= duration <= maxthreshold: + # ensure no zero-duration events + sampletime = max(oldtime + clamp, sample.time) + samples.append(dict(ph='E', name=oldfunc, cat=oldcat, sf=oldsid, + ts=sampletime*1e6, pid=0)) + else: + blacklist.add(oldidx) + + # Much fiddling to synthesize correctly(ish) nested begin/end + # events given only stack snapshots. + + for sample in data.samples: + tos = sample.stack[0] + name = tos.function + path = simplifypath(tos.path) + category = '%s:%d' % (path, tos.lineno) + stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno), + frame.function) for frame in sample.stack)) + qstack = collections.deque(stack) + if laststack == qstack: + continue + while laststack and qstack and laststack[-1] == qstack[-1]: + laststack.pop() + qstack.pop() + while laststack: + poplast() + for f in reversed(qstack): + lastseen.appendleft((sample.time, len(samples))) + laststack.appendleft(f) + path, name = f + sid = stackid(tuple(laststack)) + samples.append(dict(ph='B', name=name, cat=path, ts=sample.time*1e6, + sf=sid, pid=0)) + laststack = collections.deque(stack) + while laststack: + poplast() + events = [s[1] for s in enumerate(samples) if s[0] not in blacklist] + frames = collections.OrderedDict((str(k), v) + for (k,v) in enumerate(id2stack)) + json.dump(dict(traceEvents=events, stackFrames=frames), fp, indent=1) + fp.write('\n') + def printusage(): print(""" The statprof command line allows you to inspect the last profile's results in
--- a/mercurial/store.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/store.py Sat Mar 11 13:53:14 2017 -0500 @@ -17,8 +17,8 @@ error, parsers, pycompat, - scmutil, util, + vfs as vfsmod, ) # This avoids a collision between a file named foo and a dir named @@ -99,12 +99,8 @@ 'the\\x07quick\\xadshot' ''' e = '_' - if pycompat.ispy3: - xchr = lambda x: bytes([x]) - asciistr = bytes(xrange(127)) - else: - xchr = chr - asciistr = map(chr, xrange(127)) + xchr = pycompat.bytechr + asciistr = list(map(xchr, range(127))) capitals = list(range(ord("A"), ord("Z") + 1)) cmap = dict((x, x) for x in asciistr) @@ -128,7 +124,7 @@ pass else: raise KeyError - return (lambda s: ''.join([cmap[c] for c in s]), + return (lambda s: ''.join([cmap[s[c:c + 1]] for c in xrange(len(s))]), lambda s: ''.join(list(decode(s)))) _encodefname, _decodefname = _buildencodefun() @@ -325,7 +321,7 @@ self.createmode = _calcmode(vfs) vfs.createmode = self.createmode self.rawvfs = vfs - self.vfs = scmutil.filtervfs(vfs, encodedir) + self.vfs = vfsmod.filtervfs(vfs, encodedir) self.opener = self.vfs def join(self, f): @@ -398,7 +394,7 @@ self.createmode = _calcmode(vfs) vfs.createmode = self.createmode self.rawvfs = vfs - self.vfs = scmutil.filtervfs(vfs, encodefilename) + self.vfs = vfsmod.filtervfs(vfs, encodefilename) self.opener = self.vfs def datafiles(self): @@ -477,9 +473,9 @@ self._load() return iter(self.entries) -class _fncachevfs(scmutil.abstractvfs, scmutil.auditvfs): +class _fncachevfs(vfsmod.abstractvfs, vfsmod.auditvfs): def __init__(self, vfs, fnc, encode): - scmutil.auditvfs.__init__(self, vfs) + vfsmod.auditvfs.__init__(self, vfs) self.fncache = fnc self.encode = encode
--- a/mercurial/streamclone.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/streamclone.py Sat Mar 11 13:53:14 2017 -0500 @@ -8,7 +8,6 @@ from __future__ import absolute_import import struct -import time from .i18n import _ from . import ( @@ -297,7 +296,7 @@ (filecount, util.bytecount(bytecount))) handled_bytes = 0 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes')) - start = time.time() + start = util.timer() # TODO: get rid of (potential) inconsistency # @@ -340,7 +339,7 @@ # streamclone-ed file at next access repo.invalidate(clearfilecache=True) - elapsed = time.time() - start + elapsed = util.timer() - start if elapsed <= 0: elapsed = 0.001 repo.ui.progress(_('clone'), None)
--- a/mercurial/subrepo.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/subrepo.py Sat Mar 11 13:53:14 2017 -0500 @@ -35,6 +35,7 @@ pycompat, scmutil, util, + vfs as vfsmod, ) hg = None @@ -542,8 +543,8 @@ """return filename iterator""" raise NotImplementedError - def filedata(self, name): - """return file data""" + def filedata(self, name, decode): + """return file data, optionally passed through repo decoders""" raise NotImplementedError def fileflags(self, name): @@ -558,7 +559,7 @@ """handle the files command for this subrepo""" return 1 - def archive(self, archiver, prefix, match=None): + def archive(self, archiver, prefix, match=None, decode=True): if match is not None: files = [f for f in self.files() if match(f)] else: @@ -572,7 +573,7 @@ mode = 'x' in flags and 0o755 or 0o644 symlink = 'l' in flags archiver.addfile(prefix + self._path + '/' + name, - mode, symlink, self.filedata(name)) + mode, symlink, self.filedata(name, decode)) self.ui.progress(_('archiving (%s)') % relpath, i + 1, unit=_('files'), total=total) self.ui.progress(_('archiving (%s)') % relpath, None) @@ -615,7 +616,7 @@ def wvfs(self): """return vfs to access the working directory of this subrepository """ - return scmutil.vfs(self._ctx.repo().wvfs.join(self._path)) + return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path)) @propertycache def _relpath(self): @@ -677,7 +678,7 @@ @propertycache def _cachestorehashvfs(self): - return scmutil.vfs(self._repo.join('cache/storehash')) + return vfsmod.vfs(self._repo.join('cache/storehash')) def _readstorehashcache(self, remotepath): '''read the store hash cache for a given remote repository''' @@ -782,7 +783,7 @@ % (inst, subrelpath(self))) @annotatesubrepoerror - def archive(self, archiver, prefix, match=None): + def archive(self, archiver, prefix, match=None, decode=True): self._get(self._state + ('hg',)) total = abstractsubrepo.archive(self, archiver, prefix, match) rev = self._state[1] @@ -790,7 +791,8 @@ for subpath in ctx.substate: s = subrepo(ctx, subpath, True) submatch = matchmod.subdirmatcher(subpath, match) - total += s.archive(archiver, prefix + self._path + '/', submatch) + total += s.archive(archiver, prefix + self._path + '/', submatch, + decode) return total @annotatesubrepoerror @@ -956,9 +958,12 @@ ctx = self._repo[rev] return ctx.manifest().keys() - def filedata(self, name): + def filedata(self, name, decode): rev = self._state[1] - return self._repo[rev][name].data() + data = self._repo[rev][name].data() + if decode: + data = self._repo.wwritedata(name, data) + return data def fileflags(self, name): rev = self._state[1] @@ -1292,7 +1297,7 @@ paths.append(name.encode('utf-8')) return paths - def filedata(self, name): + def filedata(self, name, decode): return self._svncommand(['cat'], name)[0] @@ -1410,6 +1415,10 @@ errpipe = None if self.ui.quiet: errpipe = open(os.devnull, 'w') + if self.ui._colormode and len(commands) and commands[0] == "diff": + # insert the argument in the front, + # the end of git diff arguments is used for paths + commands.insert(1, '--color') p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1, cwd=cwd, env=env, close_fds=util.closefds, stdout=subprocess.PIPE, stderr=errpipe) @@ -1772,7 +1781,7 @@ else: self.wvfs.unlink(f) - def archive(self, archiver, prefix, match=None): + def archive(self, archiver, prefix, match=None, decode=True): total = 0 source, revision = self._state if not revision:
--- a/mercurial/tags.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/tags.py Sat Mar 11 13:53:14 2017 -0500 @@ -14,7 +14,6 @@ import array import errno -import time from .node import ( bin, @@ -25,6 +24,7 @@ from . import ( encoding, error, + scmutil, util, ) @@ -278,8 +278,6 @@ If the cache is not up to date, the caller is responsible for reading tag info from each returned head. (See findglobaltags().) ''' - from . import scmutil # avoid cycle - try: cachefile = repo.vfs(_filename(repo), 'r') # force reading the file for static-http @@ -344,7 +342,7 @@ # potentially expensive search. return ([], {}, valid, None, True) - starttime = time.time() + starttime = util.timer() # Now we have to lookup the .hgtags filenode for every new head. # This is the most expensive part of finding tags, so performance @@ -359,7 +357,7 @@ fnodescache.write() - duration = time.time() - starttime + duration = util.timer() - starttime ui.log('tagscache', '%d/%d cache hits/lookups in %0.4f ' 'seconds\n',
--- a/mercurial/templatekw.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/templatekw.py Sat Mar 11 13:53:14 2017 -0500 @@ -204,6 +204,17 @@ return getrenamed +# default templates internally used for rendering of lists +defaulttempl = { + 'parent': '{rev}:{node|formatnode} ', + 'manifest': '{rev}:{node|formatnode}', + 'file_copy': '{name} ({source})', + 'envvar': '{key}={value}', + 'extra': '{key}={value|stringescape}' +} +# filecopy is preserved for compatibility reasons +defaulttempl['filecopy'] = defaulttempl['file_copy'] + # keywords are callables like: # fn(repo, ctx, templ, cache, revcache, **args) # with:
--- a/mercurial/templater.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/templater.py Sat Mar 11 13:53:14 2017 -0500 @@ -20,6 +20,7 @@ pycompat, registrar, revset as revsetmod, + revsetlang, templatefilters, templatekw, util, @@ -543,6 +544,19 @@ return templatefilters.fill(text, width, initindent, hangindent) +@templatefunc('formatnode(node)') +def formatnode(context, mapping, args): + """Obtain the preferred form of a changeset hash. (DEPRECATED)""" + if len(args) != 1: + # i18n: "formatnode" is a keyword + raise error.ParseError(_("formatnode expects one argument")) + + ui = mapping['ui'] + node = evalstring(context, mapping, args[0]) + if ui.debugflag: + return node + return templatefilters.short(node) + @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])') def pad(context, mapping, args): """Pad text with a @@ -778,7 +792,7 @@ if len(args) > 1: formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]] - revs = query(revsetmod.formatspec(raw, *formatargs)) + revs = query(revsetlang.formatspec(raw, *formatargs)) revs = list(revs) else: revsetcache = mapping['cache'].setdefault("revsetcache", {})
--- a/mercurial/templates/gitweb/filelog.tmpl Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/templates/gitweb/filelog.tmpl Sat Mar 11 13:53:14 2017 -0500 @@ -38,6 +38,8 @@ </table> <div class="page_nav"> +<a href="{url|urlescape}log/{symrev}/{file|urlescape}{lessvars%urlparameter}">less</a> +<a href="{url|urlescape}log/{symrev}/{file|urlescape}{morevars%urlparameter}">more</a> {nav%filenav} </div>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/txnutil.py Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,36 @@ +# txnutil.py - transaction related utilities +# +# Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import errno + +from . import ( + encoding, +) + +def mayhavepending(root): + '''return whether 'root' may have pending changes, which are + visible to this process. + ''' + return root == encoding.environ.get('HG_PENDING') + +def trypending(root, vfs, filename, **kwargs): + '''Open file to be read according to HG_PENDING environment variable + + This opens '.pending' of specified 'filename' only when HG_PENDING + is equal to 'root'. + + This returns '(fp, is_pending_opened)' tuple. + ''' + if mayhavepending(root): + try: + return (vfs('%s.pending' % filename, **kwargs), True) + except IOError as inst: + if inst.errno != errno.ENOENT: + raise + return (vfs(filename, **kwargs), False)
--- a/mercurial/ui.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/ui.py Sat Mar 11 13:53:14 2017 -0500 @@ -7,13 +7,17 @@ from __future__ import absolute_import +import atexit +import collections import contextlib import errno import getpass import inspect import os import re +import signal import socket +import subprocess import sys import tempfile import traceback @@ -22,6 +26,7 @@ from .node import hex from . import ( + color, config, encoding, error, @@ -34,6 +39,10 @@ urlreq = util.urlreq +# for use with str.translate(None, _keepalnum), to keep just alphanumerics +_keepalnum = ''.join(c for c in map(pycompat.bytechr, range(256)) + if not c.isalnum()) + samplehgrcs = { 'user': """# example user config (see 'hg help config' for more info) @@ -42,12 +51,14 @@ # username = Jane Doe <jdoe@example.com> username = +# uncomment to colorize command output +# color = auto + [extensions] # uncomment these lines to enable some popular extensions # (see 'hg help extensions' for more info) # -# pager = -# color =""", +# pager =""", 'cloned': """# example repository config (see 'hg help config' for more info) @@ -85,15 +96,38 @@ 'global': """# example system-wide hg config (see 'hg help config' for more info) +[ui] +# uncomment to colorize command output +# color = auto + [extensions] # uncomment these lines to enable some popular extensions # (see 'hg help extensions' for more info) # # blackbox = -# color = # pager =""", } + +class httppasswordmgrdbproxy(object): + """Delays loading urllib2 until it's needed.""" + def __init__(self): + self._mgr = None + + def _get_mgr(self): + if self._mgr is None: + self._mgr = urlreq.httppasswordmgrwithdefaultrealm() + return self._mgr + + def add_password(self, *args, **kwargs): + return self._get_mgr().add_password(*args, **kwargs) + + def find_user_password(self, *args, **kwargs): + return self._get_mgr().find_user_password(*args, **kwargs) + +def _catchterm(*args): + raise error.SignalInterrupt + class ui(object): def __init__(self, src=None): """Create a fresh new ui object if no src given @@ -120,11 +154,19 @@ self.callhooks = True # Insecure server connections requested. self.insecureconnections = False + # Blocked time + self.logblockedtimes = False + # color mode: see mercurial/color.py for possible value + self._colormode = None + self._terminfoparams = {} + self._styles = {} if src: self.fout = src.fout self.ferr = src.ferr self.fin = src.fin + self.pageractive = src.pageractive + self._disablepager = src._disablepager self._tcfg = src._tcfg.copy() self._ucfg = src._ucfg.copy() @@ -134,18 +176,26 @@ self.environ = src.environ self.callhooks = src.callhooks self.insecureconnections = src.insecureconnections + self._colormode = src._colormode + self._terminfoparams = src._terminfoparams.copy() + self._styles = src._styles.copy() + self.fixconfig() self.httppasswordmgrdb = src.httppasswordmgrdb + self._blockedtimes = src._blockedtimes else: self.fout = util.stdout self.ferr = util.stderr self.fin = util.stdin + self.pageractive = False + self._disablepager = False # shared read-only environment self.environ = encoding.environ - self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm() + self.httppasswordmgrdb = httppasswordmgrdbproxy() + self._blockedtimes = collections.defaultdict(int) allowed = self.configlist('experimental', 'exportableenviron') if '*' in allowed: @@ -172,7 +222,17 @@ """Clear internal state that shouldn't persist across commands""" if self._progbar: self._progbar.resetstate() # reset last-print time of progress bar - self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm() + self.httppasswordmgrdb = httppasswordmgrdbproxy() + + @contextlib.contextmanager + def timeblockedsection(self, key): + # this is open-coded below - search for timeblockedsection to find them + starttime = util.timer() + try: + yield + finally: + self._blockedtimes[key + '_blocked'] += \ + (util.timer() - starttime) * 1000 def formatter(self, topic, opts): return formatter.formatter(self, topic, opts) @@ -277,6 +337,7 @@ self._reportuntrusted = self.debugflag or self.configbool("ui", "report_untrusted", True) self.tracebackflag = self.configbool('ui', 'traceback', False) + self.logblockedtimes = self.configbool('ui', 'logblockedtimes') if section in (None, 'trusted'): # update trust information @@ -402,6 +463,41 @@ % (section, name, v)) return b + def configwith(self, convert, section, name, default=None, + desc=None, untrusted=False): + """parse a configuration element with a conversion function + + >>> u = ui(); s = 'foo' + >>> u.setconfig(s, 'float1', '42') + >>> u.configwith(float, s, 'float1') + 42.0 + >>> u.setconfig(s, 'float2', '-4.25') + >>> u.configwith(float, s, 'float2') + -4.25 + >>> u.configwith(float, s, 'unknown', 7) + 7 + >>> u.setconfig(s, 'invalid', 'somevalue') + >>> u.configwith(float, s, 'invalid') + Traceback (most recent call last): + ... + ConfigError: foo.invalid is not a valid float ('somevalue') + >>> u.configwith(float, s, 'invalid', desc='womble') + Traceback (most recent call last): + ... + ConfigError: foo.invalid is not a valid womble ('somevalue') + """ + + v = self.config(section, name, None, untrusted) + if v is None: + return default + try: + return convert(v) + except ValueError: + if desc is None: + desc = convert.__name__ + raise error.ConfigError(_("%s.%s is not a valid %s ('%s')") + % (section, name, desc, v)) + def configint(self, section, name, default=None, untrusted=False): """parse a configuration element as an integer @@ -418,17 +514,11 @@ >>> u.configint(s, 'invalid') Traceback (most recent call last): ... - ConfigError: foo.invalid is not an integer ('somevalue') + ConfigError: foo.invalid is not a valid integer ('somevalue') """ - v = self.config(section, name, None, untrusted) - if v is None: - return default - try: - return int(v) - except ValueError: - raise error.ConfigError(_("%s.%s is not an integer ('%s')") - % (section, name, v)) + return self.configwith(int, section, name, default, 'integer', + untrusted) def configbytes(self, section, name, default=0, untrusted=False): """parse a configuration element as a quantity in bytes @@ -475,37 +565,38 @@ def _parse_plain(parts, s, offset): whitespace = False - while offset < len(s) and (s[offset].isspace() or s[offset] == ','): + while offset < len(s) and (s[offset:offset + 1].isspace() + or s[offset:offset + 1] == ','): whitespace = True offset += 1 if offset >= len(s): return None, parts, offset if whitespace: parts.append('') - if s[offset] == '"' and not parts[-1]: + if s[offset:offset + 1] == '"' and not parts[-1]: return _parse_quote, parts, offset + 1 - elif s[offset] == '"' and parts[-1][-1] == '\\': - parts[-1] = parts[-1][:-1] + s[offset] + elif s[offset:offset + 1] == '"' and parts[-1][-1] == '\\': + parts[-1] = parts[-1][:-1] + s[offset:offset + 1] return _parse_plain, parts, offset + 1 - parts[-1] += s[offset] + parts[-1] += s[offset:offset + 1] return _parse_plain, parts, offset + 1 def _parse_quote(parts, s, offset): - if offset < len(s) and s[offset] == '"': # "" + if offset < len(s) and s[offset:offset + 1] == '"': # "" parts.append('') offset += 1 - while offset < len(s) and (s[offset].isspace() or - s[offset] == ','): + while offset < len(s) and (s[offset:offset + 1].isspace() or + s[offset:offset + 1] == ','): offset += 1 return _parse_plain, parts, offset - while offset < len(s) and s[offset] != '"': - if (s[offset] == '\\' and offset + 1 < len(s) - and s[offset + 1] == '"'): + while offset < len(s) and s[offset:offset + 1] != '"': + if (s[offset:offset + 1] == '\\' and offset + 1 < len(s) + and s[offset + 1:offset + 2] == '"'): offset += 1 parts[-1] += '"' else: - parts[-1] += s[offset] + parts[-1] += s[offset:offset + 1] offset += 1 if offset >= len(s): @@ -519,11 +610,11 @@ return None, parts, offset offset += 1 - while offset < len(s) and s[offset] in [' ', ',']: + while offset < len(s) and s[offset:offset + 1] in [' ', ',']: offset += 1 if offset < len(s): - if offset + 1 == len(s) and s[offset] == '"': + if offset + 1 == len(s) and s[offset:offset + 1] == '"': parts[-1] += '"' offset += 1 else: @@ -696,55 +787,176 @@ def write(self, *args, **opts): '''write args to output - By default, this method simply writes to the buffer or stdout, - but extensions or GUI tools may override this method, - write_err(), popbuffer(), and label() to style output from - various parts of hg. + By default, this method simply writes to the buffer or stdout. + Color mode can be set on the UI class to have the output decorated + with color modifier before being written to stdout. - An optional keyword argument, "label", can be passed in. - This should be a string containing label names separated by - space. Label names take the form of "topic.type". For example, - ui.debug() issues a label of "ui.debug". + The color used is controlled by an optional keyword argument, "label". + This should be a string containing label names separated by space. + Label names take the form of "topic.type". For example, ui.debug() + issues a label of "ui.debug". When labeling output for a specific command, a label of "cmdname.type" is recommended. For example, status issues a label of "status.modified" for modified files. ''' if self._buffers and not opts.get('prompt', False): - self._buffers[-1].extend(a for a in args) + if self._bufferapplylabels: + label = opts.get('label', '') + self._buffers[-1].extend(self.label(a, label) for a in args) + else: + self._buffers[-1].extend(args) + elif self._colormode == 'win32': + # windows color printing is its own can of crab, defer to + # the color module and that is it. + color.win32print(self, self._write, *args, **opts) else: - self._progclear() - for a in args: + msgs = args + if self._colormode is not None: + label = opts.get('label', '') + msgs = [self.label(a, label) for a in args] + self._write(*msgs, **opts) + + def _write(self, *msgs, **opts): + self._progclear() + # opencode timeblockedsection because this is a critical path + starttime = util.timer() + try: + for a in msgs: self.fout.write(a) + finally: + self._blockedtimes['stdio_blocked'] += \ + (util.timer() - starttime) * 1000 def write_err(self, *args, **opts): self._progclear() + if self._bufferstates and self._bufferstates[-1][0]: + self.write(*args, **opts) + elif self._colormode == 'win32': + # windows color printing is its own can of crab, defer to + # the color module and that is it. + color.win32print(self, self._write_err, *args, **opts) + else: + msgs = args + if self._colormode is not None: + label = opts.get('label', '') + msgs = [self.label(a, label) for a in args] + self._write_err(*msgs, **opts) + + def _write_err(self, *msgs, **opts): try: - if self._bufferstates and self._bufferstates[-1][0]: - return self.write(*args, **opts) - if not getattr(self.fout, 'closed', False): - self.fout.flush() - for a in args: - self.ferr.write(a) - # stderr may be buffered under win32 when redirected to files, - # including stdout. - if not getattr(self.ferr, 'closed', False): - self.ferr.flush() + with self.timeblockedsection('stdio'): + if not getattr(self.fout, 'closed', False): + self.fout.flush() + for a in msgs: + self.ferr.write(a) + # stderr may be buffered under win32 when redirected to files, + # including stdout. + if not getattr(self.ferr, 'closed', False): + self.ferr.flush() except IOError as inst: if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF): raise def flush(self): - try: self.fout.flush() - except (IOError, ValueError): pass - try: self.ferr.flush() - except (IOError, ValueError): pass + # opencode timeblockedsection because this is a critical path + starttime = util.timer() + try: + try: self.fout.flush() + except (IOError, ValueError): pass + try: self.ferr.flush() + except (IOError, ValueError): pass + finally: + self._blockedtimes['stdio_blocked'] += \ + (util.timer() - starttime) * 1000 def _isatty(self, fh): if self.configbool('ui', 'nontty', False): return False return util.isatty(fh) + def disablepager(self): + self._disablepager = True + + def pager(self, command): + """Start a pager for subsequent command output. + + Commands which produce a long stream of output should call + this function to activate the user's preferred pagination + mechanism (which may be no pager). Calling this function + precludes any future use of interactive functionality, such as + prompting the user or activating curses. + + Args: + command: The full, non-aliased name of the command. That is, "log" + not "history, "summary" not "summ", etc. + """ + if (self._disablepager + or self.pageractive + or command in self.configlist('pager', 'ignore') + or not self.configbool('pager', 'enable', True) + or not self.configbool('pager', 'attend-' + command, True) + # TODO: if we want to allow HGPLAINEXCEPT=pager, + # formatted() will need some adjustment. + or not self.formatted() + or self.plain() + # TODO: expose debugger-enabled on the UI object + or '--debugger' in sys.argv): + # We only want to paginate if the ui appears to be + # interactive, the user didn't say HGPLAIN or + # HGPLAINEXCEPT=pager, and the user didn't specify --debug. + return + + # TODO: add a "system defaults" config section so this default + # of more(1) can be easily replaced with a global + # configuration file. For example, on OS X the sane default is + # less(1), not more(1), and on debian it's + # sensible-pager(1). We should probably also give the system + # default editor command similar treatment. + envpager = encoding.environ.get('PAGER', 'more') + pagercmd = self.config('pager', 'pager', envpager) + if not pagercmd: + return + + self.debug('starting pager for command %r\n' % command) + self.pageractive = True + # Preserve the formatted-ness of the UI. This is important + # because we mess with stdout, which might confuse + # auto-detection of things being formatted. + self.setconfig('ui', 'formatted', self.formatted(), 'pager') + self.setconfig('ui', 'interactive', False, 'pager') + if util.safehasattr(signal, "SIGPIPE"): + signal.signal(signal.SIGPIPE, _catchterm) + self._runpager(pagercmd) + + def _runpager(self, command): + """Actually start the pager and set up file descriptors. + + This is separate in part so that extensions (like chg) can + override how a pager is invoked. + """ + pager = subprocess.Popen(command, shell=True, bufsize=-1, + close_fds=util.closefds, stdin=subprocess.PIPE, + stdout=util.stdout, stderr=util.stderr) + + # back up original file descriptors + stdoutfd = os.dup(util.stdout.fileno()) + stderrfd = os.dup(util.stderr.fileno()) + + os.dup2(pager.stdin.fileno(), util.stdout.fileno()) + if self._isatty(util.stderr): + os.dup2(pager.stdin.fileno(), util.stderr.fileno()) + + @atexit.register + def killpager(): + if util.safehasattr(signal, "SIGINT"): + signal.signal(signal.SIGINT, signal.SIG_IGN) + # restore original fds, closing pager.stdin copies in the process + os.dup2(stdoutfd, util.stdout.fileno()) + os.dup2(stderrfd, util.stderr.fileno()) + pager.stdin.close() + pager.wait() + def interface(self, feature): """what interface to use for interactive console features? @@ -900,7 +1112,8 @@ sys.stdout = self.fout # prompt ' ' must exist; otherwise readline may delete entire line # - http://bugs.python.org/issue12833 - line = raw_input(' ') + with self.timeblockedsection('stdio'): + line = raw_input(' ') sys.stdin = oldin sys.stdout = oldout @@ -980,13 +1193,14 @@ self.write_err(self.label(prompt or _('password: '), 'ui.prompt')) # disable getpass() only if explicitly specified. it's still valid # to interact with tty even if fin is not a tty. - if self.configbool('ui', 'nontty'): - l = self.fin.readline() - if not l: - raise EOFError - return l.rstrip('\n') - else: - return getpass.getpass('') + with self.timeblockedsection('stdio'): + if self.configbool('ui', 'nontty'): + l = self.fin.readline() + if not l: + raise EOFError + return l.rstrip('\n') + else: + return getpass.getpass('') except EOFError: raise error.ResponseExpected() def status(self, *msg, **opts): @@ -995,14 +1209,14 @@ This adds an output label of "ui.status". ''' if not self.quiet: - opts['label'] = opts.get('label', '') + ' ui.status' + opts[r'label'] = opts.get(r'label', '') + ' ui.status' self.write(*msg, **opts) def warn(self, *msg, **opts): '''write warning message to output (stderr) This adds an output label of "ui.warning". ''' - opts['label'] = opts.get('label', '') + ' ui.warning' + opts[r'label'] = opts.get(r'label', '') + ' ui.warning' self.write_err(*msg, **opts) def note(self, *msg, **opts): '''write note to output (if ui.verbose is True) @@ -1010,7 +1224,7 @@ This adds an output label of "ui.note". ''' if self.verbose: - opts['label'] = opts.get('label', '') + ' ui.note' + opts[r'label'] = opts.get(r'label', '') + ' ui.note' self.write(*msg, **opts) def debug(self, *msg, **opts): '''write debug message to output (if ui.debugflag is True) @@ -1018,7 +1232,7 @@ This adds an output label of "ui.debug". ''' if self.debugflag: - opts['label'] = opts.get('label', '') + ' ui.debug' + opts[r'label'] = opts.get(r'label', '') + ' ui.debug' self.write(*msg, **opts) def edit(self, text, user, extra=None, editform=None, pending=None, @@ -1038,7 +1252,7 @@ suffix=extra['suffix'], text=True, dir=rdir) try: - f = os.fdopen(fd, "w") + f = os.fdopen(fd, pycompat.sysstr("w")) f.write(text) f.close() @@ -1058,7 +1272,8 @@ self.system("%s \"%s\"" % (editor, name), environ=environ, - onerr=error.Abort, errprefix=_("edit failed")) + onerr=error.Abort, errprefix=_("edit failed"), + blockedtag='editor') f = open(name) t = f.read() @@ -1068,15 +1283,33 @@ return t - def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None): + def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None, + blockedtag=None): '''execute shell command with appropriate output stream. command output will be redirected if fout is not stdout. + + if command fails and onerr is None, return status, else raise onerr + object as exception. ''' + if blockedtag is None: + blockedtag = 'unknown_system_' + cmd.translate(None, _keepalnum) out = self.fout if any(s[1] for s in self._bufferstates): out = self - return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr, - errprefix=errprefix, out=out) + with self.timeblockedsection(blockedtag): + rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out) + if rc and onerr: + errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]), + util.explainexit(rc)[0]) + if errprefix: + errmsg = '%s: %s' % (errprefix, errmsg) + raise onerr(errmsg) + return rc + + def _runsystem(self, cmd, environ, cwd, out): + """actually execute the given shell command (can be overridden by + extensions like chg)""" + return util.system(cmd, environ=environ, cwd=cwd, out=out) def traceback(self, exc=None, force=False): '''print exception traceback if traceback printing enabled or forced. @@ -1099,7 +1332,11 @@ ''.join(exconly)) else: output = traceback.format_exception(exc[0], exc[1], exc[2]) - self.write_err(''.join(output)) + data = r''.join(output) + if pycompat.ispy3: + enc = pycompat.sysstr(encoding.encoding) + data = data.encode(enc, errors=r'replace') + self.write_err(data) return self.tracebackflag or force def geteditor(self): @@ -1180,13 +1417,15 @@ def label(self, msg, label): '''style msg based on supplied label - Like ui.write(), this just returns msg unchanged, but extensions - and GUI tools can override it to allow styling output without - writing it. + If some color mode is enabled, this will add the necessary control + characters to apply such color. In addition, 'debug' color mode adds + markup showing which label affects a piece of text. ui.write(s, 'label') is equivalent to ui.write(ui.label(s, 'label')). ''' + if self._colormode is not None: + return color.colorlabel(self, msg, label) return msg def develwarn(self, msg, stacklevel=1, config=None):
--- a/mercurial/unionrepo.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/unionrepo.py Sat Mar 11 13:53:14 2017 -0500 @@ -27,8 +27,8 @@ pathutil, pycompat, revlog, - scmutil, util, + vfs as vfsmod, ) class unionrevlog(revlog.revlog): @@ -39,7 +39,7 @@ # # To differentiate a rev in the second revlog from a rev in the revlog, # we check revision against repotiprev. - opener = scmutil.readonlyvfs(opener) + opener = vfsmod.readonlyvfs(opener) revlog.revlog.__init__(self, opener, indexfile) self.revlog2 = revlog2
--- a/mercurial/util.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/util.py Sat Mar 11 13:53:14 2017 -0500 @@ -63,9 +63,21 @@ urlreq = pycompat.urlreq xmlrpclib = pycompat.xmlrpclib +def isatty(fp): + try: + return fp.isatty() + except AttributeError: + return False + +# glibc determines buffering on first write to stdout - if we replace a TTY +# destined stdout with a pipe destined stdout (e.g. pager), we want line +# buffering +if isatty(stdout): + stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1) + if pycompat.osname == 'nt': from . import windows as platform - stdout = platform.winstdout(pycompat.stdout) + stdout = platform.winstdout(stdout) else: from . import posix as platform @@ -797,7 +809,7 @@ inname, outname = None, None try: infd, inname = tempfile.mkstemp(prefix='hg-filter-in-') - fp = os.fdopen(infd, 'wb') + fp = os.fdopen(infd, pycompat.sysstr('wb')) fp.write(s) fp.close() outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-') @@ -943,10 +955,7 @@ # executable version (py2exe) doesn't support __file__ datapath = os.path.dirname(pycompat.sysexecutable) else: - datapath = os.path.dirname(__file__) - -if not isinstance(datapath, bytes): - datapath = pycompat.fsencode(datapath) + datapath = os.path.dirname(pycompat.fsencode(__file__)) i18n.setdatapath(datapath) @@ -968,8 +977,9 @@ _sethgexecutable(encoding.environ['EXECUTABLEPATH']) else: _sethgexecutable(pycompat.sysexecutable) - elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg': - _sethgexecutable(mainmod.__file__) + elif (os.path.basename( + pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'): + _sethgexecutable(pycompat.fsencode(mainmod.__file__)) else: exe = findexe('hg') or os.path.basename(sys.argv[0]) _sethgexecutable(exe) @@ -999,20 +1009,16 @@ env['HG'] = hgexecutable() return env -def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None): +def system(cmd, environ=None, cwd=None, out=None): '''enhanced shell command execution. run with environment maybe modified, maybe in different dir. - if command fails and onerr is None, return status, else raise onerr - object as exception. - if out is specified, it is assumed to be a file-like object that has a write() method. stdout and stderr will be redirected to out.''' try: stdout.flush() except Exception: pass - origcmd = cmd cmd = quotecommand(cmd) if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2 and sys.version_info[1] < 7): @@ -1036,12 +1042,6 @@ rc = proc.returncode if pycompat.sysplatform == 'OpenVMS' and rc & 1: rc = 0 - if rc and onerr: - errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]), - explainexit(rc)[0]) - if errprefix: - errmsg = '%s: %s' % (errprefix, errmsg) - raise onerr(errmsg) return rc def checksignature(func): @@ -1056,6 +1056,11 @@ return check +# Hardlinks are problematic on CIFS, do not allow hardlinks +# until we find a way to work around it cleanly (issue4546). +# This is a variable so extensions can opt-in to using them. +allowhardlinks = False + def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False): '''copy a file, preserving mode and optionally other stat info like atime/mtime @@ -1072,9 +1077,7 @@ if checkambig: oldstat = checkambig and filestat(dest) unlink(dest) - # hardlinks are problematic on CIFS, quietly ignore this flag - # until we find a way to work around it cleanly (issue4546) - if False and hardlink: + if allowhardlinks and hardlink: try: oslink(src, dest) return @@ -1191,8 +1194,13 @@ if pycompat.osname == 'nt': checkosfilename = checkwinfilename + timer = time.clock else: checkosfilename = platform.checkosfilename + timer = time.time + +if safehasattr(time, "perf_counter"): + timer = time.perf_counter def makelock(info, pathname): try: @@ -2750,12 +2758,6 @@ u.user = u.passwd = None return str(u) -def isatty(fp): - try: - return fp.isatty() - except AttributeError: - return False - timecount = unitcountfn( (1, 1e3, _('%.0f s')), (100, 1, _('%.1f s')), @@ -2786,13 +2788,13 @@ ''' def wrapper(*args, **kwargs): - start = time.time() + start = timer() indent = 2 _timenesting[0] += indent try: return func(*args, **kwargs) finally: - elapsed = time.time() - start + elapsed = timer() - start _timenesting[0] -= indent stderr.write('%s%s: %s\n' % (' ' * _timenesting[0], func.__name__,
--- a/mercurial/verify.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/verify.py Sat Mar 11 13:53:14 2017 -0500 @@ -18,6 +18,7 @@ from . import ( error, revlog, + scmutil, util, ) @@ -32,21 +33,13 @@ f = f.replace('//', '/') return f -def _validpath(repo, path): - """Returns False if a path should NOT be treated as part of a repo. - - For all in-core cases, this returns True, as we have no way for a - path to be mentioned in the history but not actually be - relevant. For narrow clones, this is important because many - filelogs will be missing, and changelog entries may mention - modified files that are outside the narrow scope. - """ - return True - class verifier(object): - def __init__(self, repo): + # The match argument is always None in hg core, but e.g. the narrowhg + # extension will pass in a matcher here. + def __init__(self, repo, match=None): self.repo = repo.unfiltered() self.ui = repo.ui + self.match = match or scmutil.matchall(repo) self.badrevs = set() self.errors = 0 self.warnings = 0 @@ -170,6 +163,7 @@ def _verifychangelog(self): ui = self.ui repo = self.repo + match = self.match cl = repo.changelog ui.status(_("checking changesets\n")) @@ -189,7 +183,7 @@ mflinkrevs.setdefault(changes[0], []).append(i) self.refersmf = True for f in changes[3]: - if _validpath(repo, f): + if match(f): filelinkrevs.setdefault(_normpath(f), []).append(i) except Exception as inst: self.refersmf = True @@ -201,6 +195,7 @@ progress=None): repo = self.repo ui = self.ui + match = self.match mfl = self.repo.manifestlog mf = mfl._revlog.dirlog(dir) @@ -243,12 +238,14 @@ elif f == "/dev/null": # ignore this in very old repos continue fullpath = dir + _normpath(f) - if not _validpath(repo, fullpath): - continue if fl == 't': + if not match.visitdir(fullpath): + continue subdirnodes.setdefault(fullpath + '/', {}).setdefault( fn, []).append(lr) else: + if not match(fullpath): + continue filenodes.setdefault(fullpath, {}).setdefault(fn, lr) except Exception as inst: self.exc(lr, _("reading delta %s") % short(n), inst, label)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/vfs.py Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,636 @@ +# vfs.py - Mercurial 'vfs' classes +# +# Copyright Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +from __future__ import absolute_import + +import contextlib +import errno +import os +import shutil +import stat +import tempfile +import threading + +from .i18n import _ +from . import ( + error, + osutil, + pathutil, + pycompat, + util, +) + +class abstractvfs(object): + """Abstract base class; cannot be instantiated""" + + def __init__(self, *args, **kwargs): + '''Prevent instantiation; don't call this from subclasses.''' + raise NotImplementedError('attempted instantiating ' + str(type(self))) + + def tryread(self, path): + '''gracefully return an empty string for missing files''' + try: + return self.read(path) + except IOError as inst: + if inst.errno != errno.ENOENT: + raise + return "" + + def tryreadlines(self, path, mode='rb'): + '''gracefully return an empty array for missing files''' + try: + return self.readlines(path, mode=mode) + except IOError as inst: + if inst.errno != errno.ENOENT: + raise + return [] + + @util.propertycache + def open(self): + '''Open ``path`` file, which is relative to vfs root. + + Newly created directories are marked as "not to be indexed by + the content indexing service", if ``notindexed`` is specified + for "write" mode access. + ''' + return self.__call__ + + def read(self, path): + with self(path, 'rb') as fp: + return fp.read() + + def readlines(self, path, mode='rb'): + with self(path, mode=mode) as fp: + return fp.readlines() + + def write(self, path, data, backgroundclose=False): + with self(path, 'wb', backgroundclose=backgroundclose) as fp: + return fp.write(data) + + def writelines(self, path, data, mode='wb', notindexed=False): + with self(path, mode=mode, notindexed=notindexed) as fp: + return fp.writelines(data) + + def append(self, path, data): + with self(path, 'ab') as fp: + return fp.write(data) + + def basename(self, path): + """return base element of a path (as os.path.basename would do) + + This exists to allow handling of strange encoding if needed.""" + return os.path.basename(path) + + def chmod(self, path, mode): + return os.chmod(self.join(path), mode) + + def dirname(self, path): + """return dirname element of a path (as os.path.dirname would do) + + This exists to allow handling of strange encoding if needed.""" + return os.path.dirname(path) + + def exists(self, path=None): + return os.path.exists(self.join(path)) + + def fstat(self, fp): + return util.fstat(fp) + + def isdir(self, path=None): + return os.path.isdir(self.join(path)) + + def isfile(self, path=None): + return os.path.isfile(self.join(path)) + + def islink(self, path=None): + return os.path.islink(self.join(path)) + + def isfileorlink(self, path=None): + '''return whether path is a regular file or a symlink + + Unlike isfile, this doesn't follow symlinks.''' + try: + st = self.lstat(path) + except OSError: + return False + mode = st.st_mode + return stat.S_ISREG(mode) or stat.S_ISLNK(mode) + + def reljoin(self, *paths): + """join various elements of a path together (as os.path.join would do) + + The vfs base is not injected so that path stay relative. This exists + to allow handling of strange encoding if needed.""" + return os.path.join(*paths) + + def split(self, path): + """split top-most element of a path (as os.path.split would do) + + This exists to allow handling of strange encoding if needed.""" + return os.path.split(path) + + def lexists(self, path=None): + return os.path.lexists(self.join(path)) + + def lstat(self, path=None): + return os.lstat(self.join(path)) + + def listdir(self, path=None): + return os.listdir(self.join(path)) + + def makedir(self, path=None, notindexed=True): + return util.makedir(self.join(path), notindexed) + + def makedirs(self, path=None, mode=None): + return util.makedirs(self.join(path), mode) + + def makelock(self, info, path): + return util.makelock(info, self.join(path)) + + def mkdir(self, path=None): + return os.mkdir(self.join(path)) + + def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False): + fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, + dir=self.join(dir), text=text) + dname, fname = util.split(name) + if dir: + return fd, os.path.join(dir, fname) + else: + return fd, fname + + def readdir(self, path=None, stat=None, skip=None): + return osutil.listdir(self.join(path), stat, skip) + + def readlock(self, path): + return util.readlock(self.join(path)) + + def rename(self, src, dst, checkambig=False): + """Rename from src to dst + + checkambig argument is used with util.filestat, and is useful + only if destination file is guarded by any lock + (e.g. repo.lock or repo.wlock). + """ + dstpath = self.join(dst) + oldstat = checkambig and util.filestat(dstpath) + if oldstat and oldstat.stat: + ret = util.rename(self.join(src), dstpath) + newstat = util.filestat(dstpath) + if newstat.isambig(oldstat): + # stat of renamed file is ambiguous to original one + newstat.avoidambig(dstpath, oldstat) + return ret + return util.rename(self.join(src), dstpath) + + def readlink(self, path): + return os.readlink(self.join(path)) + + def removedirs(self, path=None): + """Remove a leaf directory and all empty intermediate ones + """ + return util.removedirs(self.join(path)) + + def rmtree(self, path=None, ignore_errors=False, forcibly=False): + """Remove a directory tree recursively + + If ``forcibly``, this tries to remove READ-ONLY files, too. + """ + if forcibly: + def onerror(function, path, excinfo): + if function is not os.remove: + raise + # read-only files cannot be unlinked under Windows + s = os.stat(path) + if (s.st_mode & stat.S_IWRITE) != 0: + raise + os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE) + os.remove(path) + else: + onerror = None + return shutil.rmtree(self.join(path), + ignore_errors=ignore_errors, onerror=onerror) + + def setflags(self, path, l, x): + return util.setflags(self.join(path), l, x) + + def stat(self, path=None): + return os.stat(self.join(path)) + + def unlink(self, path=None): + return util.unlink(self.join(path)) + + def unlinkpath(self, path=None, ignoremissing=False): + return util.unlinkpath(self.join(path), ignoremissing) + + def utime(self, path=None, t=None): + return os.utime(self.join(path), t) + + def walk(self, path=None, onerror=None): + """Yield (dirpath, dirs, files) tuple for each directories under path + + ``dirpath`` is relative one from the root of this vfs. This + uses ``os.sep`` as path separator, even you specify POSIX + style ``path``. + + "The root of this vfs" is represented as empty ``dirpath``. + """ + root = os.path.normpath(self.join(None)) + # when dirpath == root, dirpath[prefixlen:] becomes empty + # because len(dirpath) < prefixlen. + prefixlen = len(pathutil.normasprefix(root)) + for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror): + yield (dirpath[prefixlen:], dirs, files) + + @contextlib.contextmanager + def backgroundclosing(self, ui, expectedcount=-1): + """Allow files to be closed asynchronously. + + When this context manager is active, ``backgroundclose`` can be passed + to ``__call__``/``open`` to result in the file possibly being closed + asynchronously, on a background thread. + """ + # This is an arbitrary restriction and could be changed if we ever + # have a use case. + vfs = getattr(self, 'vfs', self) + if getattr(vfs, '_backgroundfilecloser', None): + raise error.Abort( + _('can only have 1 active background file closer')) + + with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc: + try: + vfs._backgroundfilecloser = bfc + yield bfc + finally: + vfs._backgroundfilecloser = None + +class vfs(abstractvfs): + '''Operate files relative to a base directory + + This class is used to hide the details of COW semantics and + remote file access from higher level code. + ''' + def __init__(self, base, audit=True, expandpath=False, realpath=False): + if expandpath: + base = util.expandpath(base) + if realpath: + base = os.path.realpath(base) + self.base = base + self.mustaudit = audit + self.createmode = None + self._trustnlink = None + + @property + def mustaudit(self): + return self._audit + + @mustaudit.setter + def mustaudit(self, onoff): + self._audit = onoff + if onoff: + self.audit = pathutil.pathauditor(self.base) + else: + self.audit = util.always + + @util.propertycache + def _cansymlink(self): + return util.checklink(self.base) + + @util.propertycache + def _chmod(self): + return util.checkexec(self.base) + + def _fixfilemode(self, name): + if self.createmode is None or not self._chmod: + return + os.chmod(name, self.createmode & 0o666) + + def __call__(self, path, mode="r", text=False, atomictemp=False, + notindexed=False, backgroundclose=False, checkambig=False): + '''Open ``path`` file, which is relative to vfs root. + + Newly created directories are marked as "not to be indexed by + the content indexing service", if ``notindexed`` is specified + for "write" mode access. + + If ``backgroundclose`` is passed, the file may be closed asynchronously. + It can only be used if the ``self.backgroundclosing()`` context manager + is active. This should only be specified if the following criteria hold: + + 1. There is a potential for writing thousands of files. Unless you + are writing thousands of files, the performance benefits of + asynchronously closing files is not realized. + 2. Files are opened exactly once for the ``backgroundclosing`` + active duration and are therefore free of race conditions between + closing a file on a background thread and reopening it. (If the + file were opened multiple times, there could be unflushed data + because the original file handle hasn't been flushed/closed yet.) + + ``checkambig`` argument is passed to atomictemplfile (valid + only for writing), and is useful only if target file is + guarded by any lock (e.g. repo.lock or repo.wlock). + ''' + if self._audit: + r = util.checkosfilename(path) + if r: + raise error.Abort("%s: %r" % (r, path)) + self.audit(path) + f = self.join(path) + + if not text and "b" not in mode: + mode += "b" # for that other OS + + nlink = -1 + if mode not in ('r', 'rb'): + dirname, basename = util.split(f) + # If basename is empty, then the path is malformed because it points + # to a directory. Let the posixfile() call below raise IOError. + if basename: + if atomictemp: + util.makedirs(dirname, self.createmode, notindexed) + return util.atomictempfile(f, mode, self.createmode, + checkambig=checkambig) + try: + if 'w' in mode: + util.unlink(f) + nlink = 0 + else: + # nlinks() may behave differently for files on Windows + # shares if the file is open. + with util.posixfile(f): + nlink = util.nlinks(f) + if nlink < 1: + nlink = 2 # force mktempcopy (issue1922) + except (OSError, IOError) as e: + if e.errno != errno.ENOENT: + raise + nlink = 0 + util.makedirs(dirname, self.createmode, notindexed) + if nlink > 0: + if self._trustnlink is None: + self._trustnlink = nlink > 1 or util.checknlink(f) + if nlink > 1 or not self._trustnlink: + util.rename(util.mktempcopy(f), f) + fp = util.posixfile(f, mode) + if nlink == 0: + self._fixfilemode(f) + + if checkambig: + if mode in ('r', 'rb'): + raise error.Abort(_('implementation error: mode %s is not' + ' valid for checkambig=True') % mode) + fp = checkambigatclosing(fp) + + if backgroundclose: + if not self._backgroundfilecloser: + raise error.Abort(_('backgroundclose can only be used when a ' + 'backgroundclosing context manager is active') + ) + + fp = delayclosedfile(fp, self._backgroundfilecloser) + + return fp + + def symlink(self, src, dst): + self.audit(dst) + linkname = self.join(dst) + try: + os.unlink(linkname) + except OSError: + pass + + util.makedirs(os.path.dirname(linkname), self.createmode) + + if self._cansymlink: + try: + os.symlink(src, linkname) + except OSError as err: + raise OSError(err.errno, _('could not symlink to %r: %s') % + (src, err.strerror), linkname) + else: + self.write(dst, src) + + def join(self, path, *insidef): + if path: + return os.path.join(self.base, path, *insidef) + else: + return self.base + +opener = vfs + +class auditvfs(object): + def __init__(self, vfs): + self.vfs = vfs + + @property + def mustaudit(self): + return self.vfs.mustaudit + + @mustaudit.setter + def mustaudit(self, onoff): + self.vfs.mustaudit = onoff + + @property + def options(self): + return self.vfs.options + + @options.setter + def options(self, value): + self.vfs.options = value + +class filtervfs(abstractvfs, auditvfs): + '''Wrapper vfs for filtering filenames with a function.''' + + def __init__(self, vfs, filter): + auditvfs.__init__(self, vfs) + self._filter = filter + + def __call__(self, path, *args, **kwargs): + return self.vfs(self._filter(path), *args, **kwargs) + + def join(self, path, *insidef): + if path: + return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef))) + else: + return self.vfs.join(path) + +filteropener = filtervfs + +class readonlyvfs(abstractvfs, auditvfs): + '''Wrapper vfs preventing any writing.''' + + def __init__(self, vfs): + auditvfs.__init__(self, vfs) + + def __call__(self, path, mode='r', *args, **kw): + if mode not in ('r', 'rb'): + raise error.Abort(_('this vfs is read only')) + return self.vfs(path, mode, *args, **kw) + + def join(self, path, *insidef): + return self.vfs.join(path, *insidef) + +class closewrapbase(object): + """Base class of wrapper, which hooks closing + + Do not instantiate outside of the vfs layer. + """ + def __init__(self, fh): + object.__setattr__(self, '_origfh', fh) + + def __getattr__(self, attr): + return getattr(self._origfh, attr) + + def __setattr__(self, attr, value): + return setattr(self._origfh, attr, value) + + def __delattr__(self, attr): + return delattr(self._origfh, attr) + + def __enter__(self): + return self._origfh.__enter__() + + def __exit__(self, exc_type, exc_value, exc_tb): + raise NotImplementedError('attempted instantiating ' + str(type(self))) + + def close(self): + raise NotImplementedError('attempted instantiating ' + str(type(self))) + +class delayclosedfile(closewrapbase): + """Proxy for a file object whose close is delayed. + + Do not instantiate outside of the vfs layer. + """ + def __init__(self, fh, closer): + super(delayclosedfile, self).__init__(fh) + object.__setattr__(self, '_closer', closer) + + def __exit__(self, exc_type, exc_value, exc_tb): + self._closer.close(self._origfh) + + def close(self): + self._closer.close(self._origfh) + +class backgroundfilecloser(object): + """Coordinates background closing of file handles on multiple threads.""" + def __init__(self, ui, expectedcount=-1): + self._running = False + self._entered = False + self._threads = [] + self._threadexception = None + + # Only Windows/NTFS has slow file closing. So only enable by default + # on that platform. But allow to be enabled elsewhere for testing. + defaultenabled = pycompat.osname == 'nt' + enabled = ui.configbool('worker', 'backgroundclose', defaultenabled) + + if not enabled: + return + + # There is overhead to starting and stopping the background threads. + # Don't do background processing unless the file count is large enough + # to justify it. + minfilecount = ui.configint('worker', 'backgroundcloseminfilecount', + 2048) + # FUTURE dynamically start background threads after minfilecount closes. + # (We don't currently have any callers that don't know their file count) + if expectedcount > 0 and expectedcount < minfilecount: + return + + # Windows defaults to a limit of 512 open files. A buffer of 128 + # should give us enough headway. + maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384) + threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4) + + ui.debug('starting %d threads for background file closing\n' % + threadcount) + + self._queue = util.queue(maxsize=maxqueue) + self._running = True + + for i in range(threadcount): + t = threading.Thread(target=self._worker, name='backgroundcloser') + self._threads.append(t) + t.start() + + def __enter__(self): + self._entered = True + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._running = False + + # Wait for threads to finish closing so open files don't linger for + # longer than lifetime of context manager. + for t in self._threads: + t.join() + + def _worker(self): + """Main routine for worker thread.""" + while True: + try: + fh = self._queue.get(block=True, timeout=0.100) + # Need to catch or the thread will terminate and + # we could orphan file descriptors. + try: + fh.close() + except Exception as e: + # Stash so can re-raise from main thread later. + self._threadexception = e + except util.empty: + if not self._running: + break + + def close(self, fh): + """Schedule a file for closing.""" + if not self._entered: + raise error.Abort(_('can only call close() when context manager ' + 'active')) + + # If a background thread encountered an exception, raise now so we fail + # fast. Otherwise we may potentially go on for minutes until the error + # is acted on. + if self._threadexception: + e = self._threadexception + self._threadexception = None + raise e + + # If we're not actively running, close synchronously. + if not self._running: + fh.close() + return + + self._queue.put(fh, block=True, timeout=None) + +class checkambigatclosing(closewrapbase): + """Proxy for a file object, to avoid ambiguity of file stat + + See also util.filestat for detail about "ambiguity of file stat". + + This proxy is useful only if the target file is guarded by any + lock (e.g. repo.lock or repo.wlock) + + Do not instantiate outside of the vfs layer. + """ + def __init__(self, fh): + super(checkambigatclosing, self).__init__(fh) + object.__setattr__(self, '_oldstat', util.filestat(fh.name)) + + def _checkambig(self): + oldstat = self._oldstat + if oldstat.stat: + newstat = util.filestat(self._origfh.name) + if newstat.isambig(oldstat): + # stat of changed file is ambiguous to original one + newstat.avoidambig(self._origfh.name, oldstat) + + def __exit__(self, exc_type, exc_value, exc_tb): + self._origfh.__exit__(exc_type, exc_value, exc_tb) + self._checkambig() + + def close(self): + self._origfh.close() + self._checkambig()
--- a/mercurial/wireproto.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/wireproto.py Sat Mar 11 13:53:14 2017 -0500 @@ -26,6 +26,7 @@ exchange, peer, pushkey as pushkeymod, + pycompat, streamclone, util, ) @@ -735,7 +736,7 @@ depending on the request. e.g. you could advertise URLs for the closest data center given the client's IP address. """ - return repo.opener.tryread('clonebundles.manifest') + return repo.vfs.tryread('clonebundles.manifest') wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey', 'known', 'getbundle', 'unbundlehash', 'batch'] @@ -839,7 +840,6 @@ raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint) - #chunks = exchange.getbundlechunks(repo, 'serve', **opts) try: chunks = exchange.getbundlechunks(repo, 'serve', **opts) except error.Abort as exc: @@ -961,7 +961,7 @@ # write bundle data to temporary file because it can be big fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') - fp = os.fdopen(fd, 'wb+') + fp = os.fdopen(fd, pycompat.sysstr('wb+')) r = 0 try: proto.getfile(fp)
--- a/mercurial/worker.py Tue Mar 07 13:24:24 2017 -0500 +++ b/mercurial/worker.py Sat Mar 11 13:53:14 2017 -0500 @@ -164,7 +164,7 @@ os._exit(0) pids.add(pid) os.close(wfd) - fp = os.fdopen(rfd, 'rb', 0) + fp = os.fdopen(rfd, pycompat.sysstr('rb'), 0) def cleanup(): signal.signal(signal.SIGINT, oldhandler) waitforworkers()
--- a/setup.py Tue Mar 07 13:24:24 2017 -0500 +++ b/setup.py Sat Mar 11 13:53:14 2017 -0500 @@ -63,7 +63,10 @@ import shutil import tempfile from distutils import log -if 'FORCE_SETUPTOOLS' in os.environ: +# We have issues with setuptools on some platforms and builders. Until +# those are resolved, setuptools is opt-in except for platforms where +# we don't have issues. +if os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ: from setuptools import setup else: from distutils.core import setup
--- a/tests/dumbhttp.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/dumbhttp.py Sat Mar 11 13:53:14 2017 -0500 @@ -7,7 +7,9 @@ """ import optparse +import os import signal +import socket import sys from mercurial import ( @@ -18,11 +20,17 @@ httpserver = util.httpserver OptionParser = optparse.OptionParser +if os.environ.get('HGIPV6', '0') == '1': + class simplehttpserver(httpserver.httpserver): + address_family = socket.AF_INET6 +else: + simplehttpserver = httpserver.httpserver + class simplehttpservice(object): def __init__(self, host, port): self.address = (host, port) def init(self): - self.httpd = httpserver.httpserver( + self.httpd = simplehttpserver( self.address, httpserver.simplehttprequesthandler) def run(self): self.httpd.serve_forever()
--- a/tests/dummyssh Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/dummyssh Sat Mar 11 13:53:14 2017 -0500 @@ -10,7 +10,7 @@ if sys.argv[1] != "user@dummy": sys.exit(-1) -os.environ["SSH_CLIENT"] = "127.0.0.1 1 2" +os.environ["SSH_CLIENT"] = "%s 1 2" % os.environ.get('LOCALIP', '127.0.0.1') log = open("dummylog", "ab") log.write("Got arguments")
--- a/tests/run-tests.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/run-tests.py Sat Mar 11 13:53:14 2017 -0500 @@ -112,18 +112,51 @@ # For Windows support wifexited = getattr(os, "WIFEXITED", lambda x: False) -def checkportisavailable(port): - """return true if a port seems free to bind on localhost""" +# Whether to use IPv6 +def checksocketfamily(name, port=20058): + """return true if we can listen on localhost using family=name + + name should be either 'AF_INET', or 'AF_INET6'. + port being used is okay - EADDRINUSE is considered as successful. + """ + family = getattr(socket, name, None) + if family is None: + return False try: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s = socket.socket(family, socket.SOCK_STREAM) s.bind(('localhost', port)) s.close() return True except socket.error as exc: - if not exc.errno == errno.EADDRINUSE: + if exc.errno == errno.EADDRINUSE: + return True + elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT): + return False + else: raise + else: return False +# useipv6 will be set by parseargs +useipv6 = None + +def checkportisavailable(port): + """return true if a port seems free to bind on localhost""" + if useipv6: + family = socket.AF_INET6 + else: + family = socket.AF_INET + try: + s = socket.socket(family, socket.SOCK_STREAM) + s.bind(('localhost', port)) + s.close() + return True + except socket.error as exc: + if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL, + errno.EPROTONOSUPPORT): + raise + return False + closefds = os.name == 'posix' def Popen4(cmd, wd, timeout, env=None): processlock.acquire() @@ -269,6 +302,8 @@ help="install and use chg wrapper in place of hg") parser.add_option("--with-chg", metavar="CHG", help="use specified chg wrapper in place of hg") + parser.add_option("--ipv6", action="store_true", + help="prefer IPv6 to IPv4 for network related tests") parser.add_option("-3", "--py3k-warnings", action="store_true", help="enable Py3k warnings on Python 2.6+") # This option should be deleted once test-check-py3-compat.t and other @@ -338,6 +373,14 @@ parser.error('--chg does not work when --with-hg is specified ' '(use --with-chg instead)') + global useipv6 + if options.ipv6: + useipv6 = checksocketfamily('AF_INET6') + else: + # only use IPv6 if IPv4 is unavailable and IPv6 is available + useipv6 = ((not checksocketfamily('AF_INET')) + and checksocketfamily('AF_INET6')) + options.anycoverage = options.cover or options.annotate or options.htmlcov if options.anycoverage: try: @@ -506,7 +549,8 @@ timeout=defaults['timeout'], startport=defaults['port'], extraconfigopts=None, py3kwarnings=False, shell=None, hgcommand=None, - slowtimeout=defaults['slowtimeout'], usechg=False): + slowtimeout=defaults['slowtimeout'], usechg=False, + useipv6=False): """Create a test from parameters. path is the full path to the file defining the test. @@ -554,6 +598,7 @@ self._shell = _bytespath(shell) self._hgcommand = hgcommand or b'hg' self._usechg = usechg + self._useipv6 = useipv6 self._aborted = False self._daemonpids = [] @@ -802,6 +847,7 @@ self._portmap(2), (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$', br'\1 (glob)'), + (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'), ] r.append((self._escapepath(self._testtmp), b'$TESTTMP')) @@ -817,6 +863,12 @@ else: return re.escape(p) + def _localip(self): + if self._useipv6: + return b'::1' + else: + return b'127.0.0.1' + def _getenv(self): """Obtain environment variables to use during test execution.""" def defineport(i): @@ -839,6 +891,11 @@ env["HGUSER"] = "test" env["HGENCODING"] = "ascii" env["HGENCODINGMODE"] = "strict" + env['HGIPV6'] = str(int(self._useipv6)) + + # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw + # IP addresses. + env['LOCALIP'] = self._localip() # Reset some environment variables to well-known values so that # the tests produce repeatable output. @@ -849,6 +906,7 @@ env['TERM'] = 'xterm' for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' + + 'HGPLAIN HGPLAINEXCEPT ' + 'NO_PROXY CHGDEBUG').split(): if k in env: del env[k] @@ -881,6 +939,9 @@ hgrc.write(b'[largefiles]\n') hgrc.write(b'usercache = %s\n' % (os.path.join(self._testtmp, b'.cache/largefiles'))) + hgrc.write(b'[web]\n') + hgrc.write(b'address = localhost\n') + hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii')) for opt in self._extraconfigopts: section, key = opt.split('.', 1) @@ -2288,7 +2349,8 @@ py3kwarnings=self.options.py3k_warnings, shell=self.options.shell, hgcommand=self._hgcommand, - usechg=bool(self.options.with_chg or self.options.chg)) + usechg=bool(self.options.with_chg or self.options.chg), + useipv6=useipv6) t.should_reload = True return t
--- a/tests/test-archive.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-archive.t Sat Mar 11 13:53:14 2017 -0500 @@ -99,7 +99,7 @@ > except AttributeError: > stdout = sys.stdout > try: - > f = util.urlreq.urlopen('http://127.0.0.1:%s/?%s' + > f = util.urlreq.urlopen('http://$LOCALIP:%s/?%s' > % (os.environ['HGPORT'], requeststr)) > stdout.write(f.read()) > except util.urlerr.httperror as e:
--- a/tests/test-basic.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-basic.t Sat Mar 11 13:53:14 2017 -0500 @@ -11,6 +11,8 @@ ui.interactive=False ui.mergemarkers=detailed ui.promptecho=True + web.address=localhost + web\.ipv6=(?:True|False) (re) $ hg init t $ cd t
--- a/tests/test-bdiff.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-bdiff.py Sat Mar 11 13:53:14 2017 -0500 @@ -3,8 +3,6 @@ import struct import unittest -import silenttestrunner - from mercurial import ( bdiff, mpatch, @@ -148,4 +146,5 @@ ['a\n', diffreplace(2, 10, 'a\na\na\na\n', '')]) if __name__ == '__main__': + import silenttestrunner silenttestrunner.main(__name__)
--- a/tests/test-bookmarks.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-bookmarks.t Sat Mar 11 13:53:14 2017 -0500 @@ -1,4 +1,5 @@ - $ hg init + $ hg init repo + $ cd repo no bookmarks @@ -630,7 +631,7 @@ Z 2:db815d6d32e6 x y 2:db815d6d32e6 $ hg -R ../cloned-bookmarks-manual-update-with-divergence pull - pulling from $TESTTMP + pulling from $TESTTMP/repo (glob) searching for changes adding changesets adding manifests @@ -895,3 +896,58 @@ $ touch $TESTTMP/unpause $ cd .. + +check whether HG_PENDING makes pending changes only in related +repositories visible to an external hook. + +(emulate a transaction running concurrently by copied +.hg/bookmarks.pending in subsequent test) + + $ cat > $TESTTMP/savepending.sh <<EOF + > cp .hg/bookmarks.pending .hg/bookmarks.pending.saved + > exit 1 # to avoid adding new bookmark for subsequent tests + > EOF + + $ hg init unrelated + $ cd unrelated + $ echo a > a + $ hg add a + $ hg commit -m '#0' + $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" bookmarks INVISIBLE + transaction abort! + rollback completed + abort: pretxnclose hook exited with status 1 + [255] + $ cp .hg/bookmarks.pending.saved .hg/bookmarks.pending + +(check visible bookmarks while transaction running in repo) + + $ cat > $TESTTMP/checkpending.sh <<EOF + > echo "@repo" + > hg -R $TESTTMP/repo bookmarks + > echo "@unrelated" + > hg -R $TESTTMP/unrelated bookmarks + > exit 1 # to avoid adding new bookmark for subsequent tests + > EOF + + $ cd ../repo + $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" bookmarks NEW + @repo + * NEW 6:81dcce76aa0b + X2 1:925d80f479bb + Y 4:125c9a1d6df6 + Z 5:5fb12f0f2d51 + Z@1 1:925d80f479bb + Z@2 4:125c9a1d6df6 + foo 3:9ba5f110a0b3 + foo@1 0:f7b1eb17ad24 + foo@2 2:db815d6d32e6 + four 3:9ba5f110a0b3 + should-end-on-two 2:db815d6d32e6 + x y 2:db815d6d32e6 + @unrelated + no bookmarks set + transaction abort! + rollback completed + abort: pretxnclose hook exited with status 1 + [255]
--- a/tests/test-branches.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-branches.t Sat Mar 11 13:53:14 2017 -0500 @@ -519,6 +519,12 @@ $ hg branches --closed -T '{if(closed, "{branch}\n")}' c + $ hg branches -T '{word(0, branch)}: {desc|firstline}\n' + b: reopen branch with a change + a: Adding d branch + a: Adding b branch head 2 + default: Adding root node + Tests of revision branch name caching We rev branch cache is updated automatically. In these tests we use a trick to
--- a/tests/test-bundle2-exchange.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-bundle2-exchange.t Sat Mar 11 13:53:14 2017 -0500 @@ -340,7 +340,7 @@ remote: lock: free remote: wlock: free remote: postclose-tip:5fddd98957c8 draft book_5fdd - remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:127.0.0.1 (glob) + remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:$LOCALIP (glob) updating bookmark book_5fdd pre-close-tip:02de42196ebe draft book_02de postclose-tip:02de42196ebe draft book_02de @@ -394,7 +394,7 @@ remote: lock: free remote: wlock: free remote: postclose-tip:32af7686d403 public book_32af - remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:http:127.0.0.1: (glob) + remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:http:*: (glob) updating bookmark book_32af pre-close-tip:02de42196ebe draft book_02de postclose-tip:02de42196ebe draft book_02de
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-check-help.t Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,25 @@ +#require test-repo + + $ . "$TESTDIR/helpers-testrepo.sh" + + $ cat <<'EOF' > scanhelptopics.py + > from __future__ import absolute_import, print_function + > import re + > import sys + > topics = set() + > topicre = re.compile(r':hg:`help ([a-z0-9\-.]+)`') + > for fname in sys.argv: + > with open(fname) as f: + > topics.update(m.group(1) for m in topicre.finditer(f.read())) + > for s in sorted(topics): + > print(s) + > EOF + + $ cd "$TESTDIR"/.. + +Check if ":hg:`help TOPIC`" is valid: +(use "xargs -n1 -t" to see which help commands are executed) + + $ hg files 'glob:{hgext,mercurial}/**/*.py' \ + > | xargs python "$TESTTMP/scanhelptopics.py" \ + > | xargs -n1 hg help > /dev/null
--- a/tests/test-check-py3-compat.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-check-py3-compat.t Sat Mar 11 13:53:14 2017 -0500 @@ -7,7 +7,6 @@ contrib/python-zstandard/setup.py not using absolute_import contrib/python-zstandard/setup_zstd.py not using absolute_import contrib/python-zstandard/tests/common.py not using absolute_import - contrib/python-zstandard/tests/test_cffi.py not using absolute_import contrib/python-zstandard/tests/test_compressor.py not using absolute_import contrib/python-zstandard/tests/test_data_structures.py not using absolute_import contrib/python-zstandard/tests/test_decompressor.py not using absolute_import
--- a/tests/test-check-pyflakes.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-check-pyflakes.t Sat Mar 11 13:53:14 2017 -0500 @@ -7,9 +7,8 @@ (skipping binary file random-seed) $ hg locate 'set:**.py or grep("^#!.*python")' -X hgext/fsmonitor/pywatchman \ - > -X mercurial/pycompat.py \ + > -X mercurial/pycompat.py -X contrib/python-zstandard \ > 2>/dev/null \ > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py" - contrib/python-zstandard/tests/test_data_structures.py:107: local variable 'size' is assigned to but never used tests/filterpyflakes.py:39: undefined name 'undefinedname'
--- a/tests/test-chg.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-chg.t Sat Mar 11 13:53:14 2017 -0500 @@ -32,6 +32,46 @@ $ cd .. +editor +------ + + $ cat >> pushbuffer.py <<EOF + > def reposetup(ui, repo): + > repo.ui.pushbuffer(subproc=True) + > EOF + + $ chg init editor + $ cd editor + +by default, system() should be redirected to the client: + + $ touch foo + $ CHGDEBUG= HGEDITOR=cat chg ci -Am channeled --edit 2>&1 \ + > | egrep "HG:|run 'cat" + chg: debug: run 'cat "*"' at '$TESTTMP/editor' (glob) + HG: Enter commit message. Lines beginning with 'HG:' are removed. + HG: Leave message empty to abort commit. + HG: -- + HG: user: test + HG: branch 'default' + HG: added foo + +but no redirection should be made if output is captured: + + $ touch bar + $ CHGDEBUG= HGEDITOR=cat chg ci -Am bufferred --edit \ + > --config extensions.pushbuffer="$TESTTMP/pushbuffer.py" 2>&1 \ + > | egrep "HG:|run 'cat" + [1] + +check that commit commands succeeded: + + $ hg log -T '{rev}:{desc}\n' + 1:bufferred + 0:channeled + + $ cd .. + pager -----
--- a/tests/test-clone-uncompressed.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-clone-uncompressed.t Sat Mar 11 13:53:14 2017 -0500 @@ -60,12 +60,12 @@ $ cat > delayer.py <<EOF > import time - > from mercurial import extensions, scmutil + > from mercurial import extensions, vfs > def __call__(orig, self, path, *args, **kwargs): > if path == 'data/f1.i': > time.sleep(2) > return orig(self, path, *args, **kwargs) - > extensions.wrapfunction(scmutil.vfs, '__call__', __call__) + > extensions.wrapfunction(vfs.vfs, '__call__', __call__) > EOF prepare repo with small and big file to cover both code paths in emitrevlogdata
--- a/tests/test-clone.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-clone.t Sat Mar 11 13:53:14 2017 -0500 @@ -579,11 +579,11 @@ No remote source #if windows - $ hg clone http://127.0.0.1:3121/a b + $ hg clone http://$LOCALIP:3121/a b abort: error: * (glob) [255] #else - $ hg clone http://127.0.0.1:3121/a b + $ hg clone http://$LOCALIP:3121/a b abort: error: *refused* (glob) [255] #endif
--- a/tests/test-commandserver.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-commandserver.t Sat Mar 11 13:53:14 2017 -0500 @@ -199,6 +199,8 @@ ui.usehttp2=true (?) ui.foo=bar ui.nontty=true + web.address=localhost + web\.ipv6=(?:True|False) (re) *** runcommand init foo *** runcommand -R foo showconfig ui defaults defaults.backout=-d "0 0"
--- a/tests/test-completion.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-completion.t Sat Mar 11 13:53:14 2017 -0500 @@ -73,6 +73,7 @@ debugbuilddag debugbundle debugcheckstate + debugcolor debugcommands debugcomplete debugconfig @@ -129,6 +130,7 @@ Show the global options $ hg debugcomplete --options | sort + --color --config --cwd --debug @@ -138,6 +140,7 @@ --help --hidden --noninteractive + --pager --profile --quiet --repository @@ -157,6 +160,7 @@ --address --certificate --cmdserver + --color --config --cwd --daemon @@ -171,6 +175,7 @@ --ipv6 --name --noninteractive + --pager --pid-file --port --prefix @@ -223,7 +228,7 @@ serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, copies, print0, rev, change, include, exclude, subrepos, template summary: remote - update: clean, check, date, rev, tool + update: clean, check, merge, date, rev, tool addremove: similarity, subrepos, include, exclude, dry-run archive: no-decode, prefix, rev, type, subrepos, include, exclude backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user @@ -240,6 +245,7 @@ debugbuilddag: mergeable-file, overwritten-file, new-file debugbundle: all, spec debugcheckstate: + debugcolor: style debugcommands: debugcomplete: options debugcreatestreamclonebundle:
--- a/tests/test-config.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-config.t Sat Mar 11 13:53:14 2017 -0500 @@ -58,12 +58,12 @@ [ { "name": "Section.KeY", - "source": "*.hgrc:16", (glob) + "source": "*.hgrc:*", (glob) "value": "Case Sensitive" }, { "name": "Section.key", - "source": "*.hgrc:17", (glob) + "source": "*.hgrc:*", (glob) "value": "lower case" } ] @@ -71,7 +71,7 @@ [ { "name": "Section.KeY", - "source": "*.hgrc:16", (glob) + "source": "*.hgrc:*", (glob) "value": "Case Sensitive" } ] @@ -158,3 +158,9 @@ $ hg showconfig paths paths.foo:suboption=~/foo paths.foo=$TESTTMP/foo + +edit failure + + $ HGEDITOR=false hg config --edit + abort: edit failed: false exited with status 1 + [255]
--- a/tests/test-context.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-context.py Sat Mar 11 13:53:14 2017 -0500 @@ -59,7 +59,7 @@ # test performing a diff on a memctx for d in ctxb.diff(ctxa, git=True): - print(d) + print(d, end='') # test safeness and correctness of "ctx.status()" print('= checking context.status():')
--- a/tests/test-context.py.out Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-context.py.out Sat Mar 11 13:53:14 2017 -0500 @@ -4,13 +4,11 @@ UTF-8 : Grüezi! <status modified=['foo'], added=[], removed=[], deleted=[], unknown=[], ignored=[], clean=[]> diff --git a/foo b/foo - --- a/foo +++ b/foo @@ -1,1 +1,2 @@ foo +bar - = checking context.status(): == checking workingctx.status: wctx._status=<status modified=['bar-m'], added=['bar-a'], removed=['bar-r'], deleted=[], unknown=[], ignored=[], clean=[]>
--- a/tests/test-contrib-perf.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-contrib-perf.t Sat Mar 11 13:53:14 2017 -0500 @@ -109,6 +109,7 @@ perfvolatilesets benchmark the computation of various volatile set perfwalk (no help text available) + perfwrite microbenchmark ui.write (use 'hg help -v perfstatusext' to show built-in aliases and global options) $ hg perfaddremove
--- a/tests/test-convert-git.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-convert-git.t Sat Mar 11 13:53:14 2017 -0500 @@ -330,7 +330,7 @@ input validation $ hg convert --config convert.git.similarity=foo --datesort git-repo2 fullrepo - abort: convert.git.similarity is not an integer ('foo') + abort: convert.git.similarity is not a valid integer ('foo') [255] $ hg convert --config convert.git.similarity=-1 --datesort git-repo2 fullrepo abort: similarity must be between 0 and 100
--- a/tests/test-diff-color.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-diff-color.t Sat Mar 11 13:53:14 2017 -0500 @@ -1,10 +1,10 @@ Setup $ cat <<EOF >> $HGRCPATH + > [ui] + > color = always > [color] > mode = ansi - > [extensions] - > color = > EOF $ hg init repo $ cd repo @@ -35,7 +35,7 @@ default context - $ hg diff --nodates --color=always + $ hg diff --nodates \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc) \x1b[0;31;1m--- a/a\x1b[0m (esc) \x1b[0;32;1m+++ b/a\x1b[0m (esc) @@ -51,7 +51,7 @@ --unified=2 - $ hg diff --nodates -U 2 --color=always + $ hg diff --nodates -U 2 \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc) \x1b[0;31;1m--- a/a\x1b[0m (esc) \x1b[0;32;1m+++ b/a\x1b[0m (esc) @@ -65,10 +65,11 @@ diffstat - $ hg diff --stat --color=always + $ hg diff --stat a | 2 \x1b[0;32m+\x1b[0m\x1b[0;31m-\x1b[0m (esc) 1 files changed, 1 insertions(+), 1 deletions(-) $ cat <<EOF >> $HGRCPATH + > [extensions] > record = > [ui] > interactive = true @@ -81,7 +82,7 @@ record $ chmod +x a - $ hg record --color=always -m moda a <<EOF + $ hg record -m moda a <<EOF > y > y > EOF @@ -111,7 +112,7 @@ qrecord - $ hg qrecord --color=always -m moda patch <<EOF + $ hg qrecord -m moda patch <<EOF > y > y > EOF @@ -151,7 +152,7 @@ $ echo aa >> a $ echo bb >> sub/b - $ hg diff --color=always -S + $ hg diff -S \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc) \x1b[0;31;1m--- a/a\x1b[0m (esc) \x1b[0;32;1m+++ b/a\x1b[0m (esc) @@ -176,7 +177,7 @@ > mid tab > all tabs > EOF - $ hg diff --nodates --color=always + $ hg diff --nodates \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc) \x1b[0;31;1m--- a/a\x1b[0m (esc) \x1b[0;32;1m+++ b/a\x1b[0m (esc) @@ -192,7 +193,7 @@ \x1b[0;32m+\x1b[0m \x1b[0;32mall\x1b[0m \x1b[0;32mtabs\x1b[0m\x1b[0;1;41m \x1b[0m (esc) $ echo "[color]" >> $HGRCPATH $ echo "diff.tab = bold magenta" >> $HGRCPATH - $ hg diff --nodates --color=always + $ hg diff --nodates \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc) \x1b[0;31;1m--- a/a\x1b[0m (esc) \x1b[0;32;1m+++ b/a\x1b[0m (esc)
--- a/tests/test-doctest.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-doctest.py Sat Mar 11 13:53:14 2017 -0500 @@ -28,7 +28,8 @@ testmod('mercurial.patch') testmod('mercurial.pathutil') testmod('mercurial.parser') -testmod('mercurial.revset') +testmod('mercurial.revsetlang') +testmod('mercurial.smartset') testmod('mercurial.store') testmod('mercurial.subrepo') testmod('mercurial.templatefilters')
--- a/tests/test-eol.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-eol.t Sat Mar 11 13:53:14 2017 -0500 @@ -470,6 +470,22 @@ > EOF $ hg commit -m 'consistent' + $ hg init subrepo + $ hg -R subrepo pull -qu . + $ echo "subrepo = subrepo" > .hgsub + $ hg ci -Am "add subrepo" + adding .hgeol + adding .hgsub + $ hg archive -S ../archive + $ find ../archive/* | sort + ../archive/a.txt + ../archive/subrepo + ../archive/subrepo/a.txt + $ cat ../archive/a.txt ../archive/subrepo/a.txt + first\r (esc) + second\r (esc) + first\r (esc) + second\r (esc) Test trailing newline
--- a/tests/test-extension.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-extension.t Sat Mar 11 13:53:14 2017 -0500 @@ -532,6 +532,8 @@ all prompts -q --quiet suppress output -v --verbose enable additional output + --color TYPE when to colorize (boolean, always, auto, never, or + debug) --config CONFIG [+] set/override config option (use 'section.name=value') --debug enable debugging output --debugger start debugger @@ -543,6 +545,8 @@ --version output version information and exit -h --help display help and exit --hidden consider hidden changesets + --pager TYPE when to paginate (boolean, always, auto, or never) + (default: auto) @@ -567,6 +571,8 @@ all prompts -q --quiet suppress output -v --verbose enable additional output + --color TYPE when to colorize (boolean, always, auto, never, or + debug) --config CONFIG [+] set/override config option (use 'section.name=value') --debug enable debugging output --debugger start debugger @@ -578,6 +584,8 @@ --version output version information and exit -h --help display help and exit --hidden consider hidden changesets + --pager TYPE when to paginate (boolean, always, auto, or never) + (default: auto) @@ -845,6 +853,8 @@ all prompts -q --quiet suppress output -v --verbose enable additional output + --color TYPE when to colorize (boolean, always, auto, never, or + debug) --config CONFIG [+] set/override config option (use 'section.name=value') --debug enable debugging output --debugger start debugger @@ -856,6 +866,8 @@ --version output version information and exit -h --help display help and exit --hidden consider hidden changesets + --pager TYPE when to paginate (boolean, always, auto, or never) + (default: auto) Make sure that single '-v' option shows help and built-ins only for 'dodo' command $ hg help -v dodo @@ -878,6 +890,8 @@ all prompts -q --quiet suppress output -v --verbose enable additional output + --color TYPE when to colorize (boolean, always, auto, never, or + debug) --config CONFIG [+] set/override config option (use 'section.name=value') --debug enable debugging output --debugger start debugger @@ -889,6 +903,8 @@ --version output version information and exit -h --help display help and exit --hidden consider hidden changesets + --pager TYPE when to paginate (boolean, always, auto, or never) + (default: auto) In case when extension name doesn't match any of its commands, help message should ask for '-v' to get list of built-in aliases @@ -949,6 +965,8 @@ all prompts -q --quiet suppress output -v --verbose enable additional output + --color TYPE when to colorize (boolean, always, auto, never, or + debug) --config CONFIG [+] set/override config option (use 'section.name=value') --debug enable debugging output --debugger start debugger @@ -960,6 +978,8 @@ --version output version information and exit -h --help display help and exit --hidden consider hidden changesets + --pager TYPE when to paginate (boolean, always, auto, or never) + (default: auto) $ hg help -v -e dudu dudu extension - @@ -981,6 +1001,8 @@ all prompts -q --quiet suppress output -v --verbose enable additional output + --color TYPE when to colorize (boolean, always, auto, never, or + debug) --config CONFIG [+] set/override config option (use 'section.name=value') --debug enable debugging output --debugger start debugger @@ -992,6 +1014,8 @@ --version output version information and exit -h --help display help and exit --hidden consider hidden changesets + --pager TYPE when to paginate (boolean, always, auto, or never) + (default: auto) Disabled extension commands: @@ -1089,6 +1113,14 @@ intro=never # never include an introduction message intro=always # always include an introduction message + You can specify a template for flags to be added in subject prefixes. Flags + specified by --flag option are exported as "{flags}" keyword: + + [patchbomb] + flagtemplate = "{separate(' ', + ifeq(branch, 'default', '', branch|upper), + flags)}" + You can set patchbomb to always ask for confirmation by setting "patchbomb.confirm" to true.
--- a/tests/test-filecache.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-filecache.py Sat Mar 11 13:53:14 2017 -0500 @@ -10,24 +10,30 @@ from mercurial import ( extensions, hg, - scmutil, + localrepo, ui as uimod, util, + vfs as vfsmod, ) -filecache = scmutil.filecache - class fakerepo(object): def __init__(self): self._filecache = {} - def join(self, p): - return p + class fakevfs(object): + + def join(self, p): + return p + + vfs = fakevfs() + + def unfiltered(self): + return self def sjoin(self, p): return p - @filecache('x', 'y') + @localrepo.repofilecache('x', 'y') def cached(self): print('creating') return 'string from function' @@ -73,7 +79,7 @@ # atomic replace file, size doesn't change # hopefully st_mtime doesn't change as well so this doesn't use the cache # because of inode change - f = scmutil.opener('.')('x', 'w', atomictemp=True) + f = vfsmod.vfs('.')('x', 'w', atomictemp=True) f.write('b') f.close() @@ -97,7 +103,7 @@ # should recreate the object repo.cached - f = scmutil.opener('.')('y', 'w', atomictemp=True) + f = vfsmod.vfs('.')('y', 'w', atomictemp=True) f.write('B') f.close() @@ -105,10 +111,10 @@ print("* file y changed inode") repo.cached - f = scmutil.opener('.')('x', 'w', atomictemp=True) + f = vfsmod.vfs('.')('x', 'w', atomictemp=True) f.write('c') f.close() - f = scmutil.opener('.')('y', 'w', atomictemp=True) + f = vfsmod.vfs('.')('y', 'w', atomictemp=True) f.write('C') f.close() @@ -200,12 +206,12 @@ # st_mtime is advanced multiple times as expected for i in xrange(repetition): # explicit closing - fp = scmutil.checkambigatclosing(open(filename, 'a')) + fp = vfsmod.checkambigatclosing(open(filename, 'a')) fp.write('FOO') fp.close() # implicit closing by "with" statement - with scmutil.checkambigatclosing(open(filename, 'a')) as fp: + with vfsmod.checkambigatclosing(open(filename, 'a')) as fp: fp.write('BAR') newstat = os.stat(filename)
--- a/tests/test-fileset.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-fileset.t Sat Mar 11 13:53:14 2017 -0500 @@ -88,6 +88,35 @@ $ fileset 'copied()' c1 +Test files status in different revisions + + $ hg status -m + M b2 + $ fileset -r0 'revs("wdir()", modified())' --traceback + b2 + $ hg status -a + A c1 + $ fileset -r0 'revs("wdir()", added())' + c1 + $ hg status --change 0 -a + A a1 + A a2 + A b1 + A b2 + $ hg status -mru + M b2 + R a2 + ? c3 + $ fileset -r0 'added() and revs("wdir()", modified() or removed() or unknown())' + b2 + a2 + $ fileset -r0 'added() or revs("wdir()", added())' + a1 + a2 + b1 + b2 + c1 + Test files properties >>> file('bin', 'wb').write('\0a') @@ -367,3 +396,226 @@ $ fileset 'existingcaller()' 2>&1 | tail -1 AssertionError: unexpected existing() invocation + +Test 'revs(...)' +================ + +small reminder of the repository state + + $ hg log -G + @ changeset: 4:160936123545 + | tag: tip + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: subrepo + | + o changeset: 3:9d594e11b8c9 + |\ parent: 2:55b05bdebf36 + | | parent: 1:830839835f98 + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: merge + | | + | o changeset: 2:55b05bdebf36 + | | parent: 0:8a9576c51c1f + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: diverging + | | + o | changeset: 1:830839835f98 + |/ user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: manychanges + | + o changeset: 0:8a9576c51c1f + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: addfiles + + $ hg status --change 0 + A a1 + A a2 + A b1 + A b2 + $ hg status --change 1 + M b2 + A 1k + A 2k + A b2link + A bin + A c1 + A con.xml + R a2 + $ hg status --change 2 + M b2 + $ hg status --change 3 + M b2 + A 1k + A 2k + A b2link + A bin + A c1 + A con.xml + R a2 + $ hg status --change 4 + A .hgsub + A .hgsubstate + $ hg status + A dos + A mac + A mixed + R con.xml + ! a1 + ? b2.orig + ? c3 + ? unknown + +Test files at -r0 should be filtered by files at wdir +----------------------------------------------------- + + $ fileset -r0 '* and revs("wdir()", *)' + a1 + b1 + b2 + +Test that "revs()" work at all +------------------------------ + + $ fileset "revs('2', modified())" + b2 + +Test that "revs()" work for file missing in the working copy/current context +---------------------------------------------------------------------------- + +(a2 not in working copy) + + $ fileset "revs('0', added())" + a1 + a2 + b1 + b2 + +(none of the file exist in "0") + + $ fileset -r 0 "revs('4', added())" + .hgsub + .hgsubstate + +Call with empty revset +-------------------------- + + $ fileset "revs('2-2', modified())" + +Call with revset matching multiple revs +--------------------------------------- + + $ fileset "revs('0+4', added())" + a1 + a2 + b1 + b2 + .hgsub + .hgsubstate + +overlapping set + + $ fileset "revs('1+2', modified())" + b2 + +test 'status(...)' +================= + +Simple case +----------- + + $ fileset "status(3, 4, added())" + .hgsub + .hgsubstate + +use rev to restrict matched file +----------------------------------------- + + $ hg status --removed --rev 0 --rev 1 + R a2 + $ fileset "status(0, 1, removed())" + a2 + $ fileset "* and status(0, 1, removed())" + $ fileset -r 4 "status(0, 1, removed())" + a2 + $ fileset -r 4 "* and status(0, 1, removed())" + $ fileset "revs('4', * and status(0, 1, removed()))" + $ fileset "revs('0', * and status(0, 1, removed()))" + a2 + +check wdir() +------------ + + $ hg status --removed --rev 4 + R con.xml + $ fileset "status(4, 'wdir()', removed())" + con.xml + + $ hg status --removed --rev 2 + R a2 + $ fileset "status('2', 'wdir()', removed())" + a2 + +test backward status +-------------------- + + $ hg status --removed --rev 0 --rev 4 + R a2 + $ hg status --added --rev 4 --rev 0 + A a2 + $ fileset "status(4, 0, added())" + a2 + +test cross branch status +------------------------ + + $ hg status --added --rev 1 --rev 2 + A a2 + $ fileset "status(1, 2, added())" + a2 + +test with multi revs revset +--------------------------- + $ hg status --added --rev 0:1 --rev 3:4 + A .hgsub + A .hgsubstate + A 1k + A 2k + A b2link + A bin + A c1 + A con.xml + $ fileset "status('0:1', '3:4', added())" + .hgsub + .hgsubstate + 1k + 2k + b2link + bin + c1 + con.xml + +tests with empty value +---------------------- + +Fully empty revset + + $ fileset "status('', '4', added())" + hg: parse error: first argument to status must be a revision + [255] + $ fileset "status('2', '', added())" + hg: parse error: second argument to status must be a revision + [255] + +Empty revset will error at the revset layer + + $ fileset "status(' ', '4', added())" + hg: parse error at 1: not a prefix: end + [255] + $ fileset "status('2', ' ', added())" + hg: parse error at 1: not a prefix: end + [255]
--- a/tests/test-gendoc-ro.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-gendoc-ro.t Sat Mar 11 13:53:14 2017 -0500 @@ -1,4 +1,9 @@ #require docutils gettext +Error: the current ro localization has some rst defects exposed by +moving pager to core. These two warnings about references are expected +until the localization is corrected. $ $TESTDIR/check-gendoc ro checking for parse errors + gendoc.txt:58: (WARNING/2) Inline interpreted text or phrase reference start-string without end-string. + gendoc.txt:58: (WARNING/2) Inline interpreted text or phrase reference start-string without end-string.
--- a/tests/test-globalopts.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-globalopts.t Sat Mar 11 13:53:14 2017 -0500 @@ -340,6 +340,7 @@ additional help topics: + color Colorizing Outputs config Configuration Files dates Date Formats diffs Diff Formats @@ -351,6 +352,7 @@ hgweb Configuring hgweb internals Technical implementation topics merge-tools Merge Tools + pager Pager Support patterns File Name Patterns phases Working with Phases revisions Specifying Revisions @@ -421,6 +423,7 @@ additional help topics: + color Colorizing Outputs config Configuration Files dates Date Formats diffs Diff Formats @@ -432,6 +435,7 @@ hgweb Configuring hgweb internals Technical implementation topics merge-tools Merge Tools + pager Pager Support patterns File Name Patterns phases Working with Phases revisions Specifying Revisions
--- a/tests/test-glog.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-glog.t Sat Mar 11 13:53:14 2017 -0500 @@ -82,18 +82,18 @@ > } $ cat > printrevset.py <<EOF - > from mercurial import extensions, revset, commands, cmdutil + > from mercurial import extensions, revsetlang, commands, cmdutil > > def uisetup(ui): > def printrevset(orig, ui, repo, *pats, **opts): > if opts.get('print_revset'): > expr = cmdutil.getgraphlogrevs(repo, pats, opts)[1] > if expr: - > tree = revset.parse(expr) + > tree = revsetlang.parse(expr) > else: > tree = [] > ui.write('%r\n' % (opts.get('rev', []),)) - > ui.write(revset.prettyformat(tree) + '\n') + > ui.write(revsetlang.prettyformat(tree) + '\n') > return 0 > return orig(ui, repo, *pats, **opts) > entry = extensions.wrapcommand(commands.table, 'log', printrevset)
--- a/tests/test-help.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-help.t Sat Mar 11 13:53:14 2017 -0500 @@ -102,6 +102,7 @@ additional help topics: + color Colorizing Outputs config Configuration Files dates Date Formats diffs Diff Formats @@ -113,6 +114,7 @@ hgweb Configuring hgweb internals Technical implementation topics merge-tools Merge Tools + pager Pager Support patterns File Name Patterns phases Working with Phases revisions Specifying Revisions @@ -177,6 +179,7 @@ additional help topics: + color Colorizing Outputs config Configuration Files dates Date Formats diffs Diff Formats @@ -188,6 +191,7 @@ hgweb Configuring hgweb internals Technical implementation topics merge-tools Merge Tools + pager Pager Support patterns File Name Patterns phases Working with Phases revisions Specifying Revisions @@ -248,7 +252,6 @@ censor erase file content at a given revision churn command to display statistics about repository history clonebundles advertise pre-generated bundles to seed clones - color colorize output from some commands convert import revisions from foreign VCS repositories into Mercurial eol automatically manage newlines in repository files @@ -262,7 +265,6 @@ largefiles track large binary files mq manage a stack of patches notify hooks for sending email push notifications - pager browse command output with an external pager patchbomb command to send changesets as (a series of) patch emails purge command to delete untracked files from the working directory @@ -315,6 +317,8 @@ all prompts -q --quiet suppress output -v --verbose enable additional output + --color TYPE when to colorize (boolean, always, auto, never, or + debug) --config CONFIG [+] set/override config option (use 'section.name=value') --debug enable debugging output --debugger start debugger @@ -326,6 +330,8 @@ --version output version information and exit -h --help display help and exit --hidden consider hidden changesets + --pager TYPE when to paginate (boolean, always, auto, or never) + (default: auto) (use 'hg help' for the full list of commands) @@ -411,6 +417,8 @@ all prompts -q --quiet suppress output -v --verbose enable additional output + --color TYPE when to colorize (boolean, always, auto, never, or + debug) --config CONFIG [+] set/override config option (use 'section.name=value') --debug enable debugging output --debugger start debugger @@ -422,6 +430,8 @@ --version output version information and exit -h --help display help and exit --hidden consider hidden changesets + --pager TYPE when to paginate (boolean, always, auto, or never) + (default: auto) Test the textwidth config option @@ -678,6 +688,7 @@ > ('', 'newline', '', 'line1\nline2')], > 'hg nohelp', > norepo=True) + > @command('debugoptADV', [('', 'aopt', None, 'option is (ADVANCED)')]) > @command('debugoptDEP', [('', 'dopt', None, 'option is (DEPRECATED)')]) > @command('debugoptEXP', [('', 'eopt', None, 'option is (EXPERIMENTAL)')]) > def nohelp(ui, *args, **kwargs): @@ -816,6 +827,7 @@ additional help topics: + color Colorizing Outputs config Configuration Files dates Date Formats diffs Diff Formats @@ -827,6 +839,7 @@ hgweb Configuring hgweb internals Technical implementation topics merge-tools Merge Tools + pager Pager Support patterns File Name Patterns phases Working with Phases revisions Specifying Revisions @@ -853,6 +866,7 @@ debugbundle lists the contents of a bundle debugcheckstate validate the correctness of the current dirstate + debugcolor show available color, effects or style debugcommands list all available commands and options debugcomplete @@ -889,6 +903,7 @@ complete "names" - tags, open branch names, bookmark names debugobsolete create arbitrary obsolete marker + debugoptADV (no help text available) debugoptDEP (no help text available) debugoptEXP (no help text available) debugpathcomplete @@ -925,6 +940,7 @@ """"""""""""""""""""""""""""""" bundles Bundles + censor Censor changegroups Changegroups requirements Repository Requirements revlogs Revision Logs @@ -937,37 +953,51 @@ """""""""""" Changegroups are representations of repository revlog data, specifically - the changelog, manifest, and filelogs. + the changelog data, root/flat manifest data, treemanifest data, and + filelogs. There are 3 versions of changegroups: "1", "2", and "3". From a high- level, versions "1" and "2" are almost exactly the same, with the only - difference being a header on entries in the changeset segment. Version "3" - adds support for exchanging treemanifests and includes revlog flags in the - delta header. - - Changegroups consists of 3 logical segments: + difference being an additional item in the *delta header*. Version "3" + adds support for revlog flags in the *delta header* and optionally + exchanging treemanifests (enabled by setting an option on the + "changegroup" part in the bundle2). + + Changegroups when not exchanging treemanifests consist of 3 logical + segments: +---------------------------------+ | | | | | changeset | manifest | filelogs | | | | | + | | | | +---------------------------------+ + When exchanging treemanifests, there are 4 logical segments: + + +-------------------------------------------------+ + | | | | | + | changeset | root | treemanifests | filelogs | + | | manifest | | | + | | | | | + +-------------------------------------------------+ + The principle building block of each segment is a *chunk*. A *chunk* is a framed piece of data: +---------------------------------------+ | | | | length | data | - | (32 bits) | <length> bytes | + | (4 bytes) | (<length - 4> bytes) | | | | +---------------------------------------+ - Each chunk starts with a 32-bit big-endian signed integer indicating the - length of the raw data that follows. - - There is a special case chunk that has 0 length ("0x00000000"). We call - this an *empty chunk*. + All integers are big-endian signed integers. Each chunk starts with a + 32-bit integer indicating the length of the entire chunk (including the + length field itself). + + There is a special case chunk that has a value of 0 for the length + ("0x00000000"). We call this an *empty chunk*. Delta Groups ============ @@ -981,26 +1011,27 @@ +------------------------------------------------------------------------+ | | | | | | | chunk0 length | chunk0 data | chunk1 length | chunk1 data | 0x0 | - | (32 bits) | (various) | (32 bits) | (various) | (32 bits) | + | (4 bytes) | (various) | (4 bytes) | (various) | (4 bytes) | | | | | | | - +------------------------------------------------------------+-----------+ + +------------------------------------------------------------------------+ Each *chunk*'s data consists of the following: - +-----------------------------------------+ - | | | | - | delta header | mdiff header | delta | - | (various) | (12 bytes) | (various) | - | | | | - +-----------------------------------------+ - - The *length* field is the byte length of the remaining 3 logical pieces of - data. The *delta* is a diff from an existing entry in the changelog. + +---------------------------------------+ + | | | + | delta header | delta data | + | (various by version) | (various) | + | | | + +---------------------------------------+ + + The *delta data* is a series of *delta*s that describe a diff from an + existing entry (either that the recipient already has, or previously + specified in the bundlei/changegroup). The *delta header* is different between versions "1", "2", and "3" of the changegroup format. - Version 1: + Version 1 (headerlen=80): +------------------------------------------------------+ | | | | | @@ -1009,7 +1040,7 @@ | | | | | +------------------------------------------------------+ - Version 2: + Version 2 (headerlen=100): +------------------------------------------------------------------+ | | | | | | @@ -1018,30 +1049,36 @@ | | | | | | +------------------------------------------------------------------+ - Version 3: + Version 3 (headerlen=102): +------------------------------------------------------------------------------+ | | | | | | | - | node | p1 node | p2 node | base node | link node | flags | + | node | p1 node | p2 node | base node | link node | flags | | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | | | | | | | | +------------------------------------------------------------------------------+ - The *mdiff header* consists of 3 32-bit big-endian signed integers - describing offsets at which to apply the following delta content: - - +-------------------------------------+ - | | | | - | offset | old length | new length | - | (32 bits) | (32 bits) | (32 bits) | - | | | | - +-------------------------------------+ + The *delta data* consists of "chunklen - 4 - headerlen" bytes, which + contain a series of *delta*s, densely packed (no separators). These deltas + describe a diff from an existing entry (either that the recipient already + has, or previously specified in the bundle/changegroup). The format is + described more fully in "hg help internals.bdiff", but briefly: + + +---------------------------------------------------------------+ + | | | | | + | start offset | end offset | new length | content | + | (4 bytes) | (4 bytes) | (4 bytes) | (<new length> bytes) | + | | | | | + +---------------------------------------------------------------+ + + Please note that the length field in the delta data does *not* include + itself. In version 1, the delta is always applied against the previous node from the changegroup or the first parent if this is the first entry in the changegroup. - In version 2, the delta base node is encoded in the entry in the + In version 2 and up, the delta base node is encoded in the entry in the changegroup. This allows the delta to be expressed against any parent, which can result in smaller deltas and more efficient encoding of data. @@ -1049,46 +1086,61 @@ ================= The *changeset segment* consists of a single *delta group* holding - changelog data. It is followed by an *empty chunk* to denote the boundary - to the *manifests segment*. + changelog data. The *empty chunk* at the end of the *delta group* denotes + the boundary to the *manifest segment*. Manifest Segment ================ The *manifest segment* consists of a single *delta group* holding manifest - data. It is followed by an *empty chunk* to denote the boundary to the - *filelogs segment*. + data. If treemanifests are in use, it contains only the manifest for the + root directory of the repository. Otherwise, it contains the entire + manifest data. The *empty chunk* at the end of the *delta group* denotes + the boundary to the next segment (either the *treemanifests segment* or + the *filelogs segment*, depending on version and the request options). + + Treemanifests Segment + --------------------- + + The *treemanifests segment* only exists in changegroup version "3", and + only if the 'treemanifest' param is part of the bundle2 changegroup part + (it is not possible to use changegroup version 3 outside of bundle2). + Aside from the filenames in the *treemanifests segment* containing a + trailing "/" character, it behaves identically to the *filelogs segment* + (see below). The final sub-segment is followed by an *empty chunk* + (logically, a sub-segment with filename size 0). This denotes the boundary + to the *filelogs segment*. Filelogs Segment ================ - The *filelogs* segment consists of multiple sub-segments, each + The *filelogs segment* consists of multiple sub-segments, each corresponding to an individual file whose data is being described: - +--------------------------------------+ - | | | | | - | filelog0 | filelog1 | filelog2 | ... | - | | | | | - +--------------------------------------+ - - In version "3" of the changegroup format, filelogs may include directory - logs when treemanifests are in use. directory logs are identified by - having a trailing '/' on their filename (see below). - - The final filelog sub-segment is followed by an *empty chunk* to denote - the end of the segment and the overall changegroup. + +--------------------------------------------------+ + | | | | | | + | filelog0 | filelog1 | filelog2 | ... | 0x0 | + | | | | | (4 bytes) | + | | | | | | + +--------------------------------------------------+ + + The final filelog sub-segment is followed by an *empty chunk* (logically, + a sub-segment with filename size 0). This denotes the end of the segment + and of the overall changegroup. Each filelog sub-segment consists of the following: - +------------------------------------------+ - | | | | - | filename size | filename | delta group | - | (32 bits) | (various) | (various) | - | | | | - +------------------------------------------+ + +------------------------------------------------------+ + | | | | + | filename length | filename | delta group | + | (4 bytes) | (<length - 4> bytes) | (various) | + | | | | + +------------------------------------------------------+ That is, a *chunk* consisting of the filename (not terminated or padded) - followed by N chunks constituting the *delta group* for this file. + followed by N chunks constituting the *delta group* for this file. The + *empty chunk* at the end of each *delta group* denotes the boundary to the + next filelog sub-segment. Test list of commands with command with no help text @@ -1102,7 +1154,15 @@ (use 'hg help -v helpext' to show built-in aliases and global options) -test deprecated and experimental options are hidden in command help +test advanced, deprecated and experimental options are hidden in command help + $ hg help debugoptADV + hg debugoptADV + + (no help text available) + + options: + + (some details hidden, use --verbose to show complete help) $ hg help debugoptDEP hg debugoptDEP @@ -1121,7 +1181,9 @@ (some details hidden, use --verbose to show complete help) -test deprecated and experimental options is shown with -v +test advanced, deprecated and experimental options are shown with -v + $ hg help -v debugoptADV | grep aopt + --aopt option is (ADVANCED) $ hg help -v debugoptDEP | grep dopt --dopt option is (DEPRECATED) $ hg help -v debugoptEXP | grep eopt @@ -1547,11 +1609,11 @@ "default:pushurl" should be used instead. $ hg help glossary.mcguffin - abort: help section not found + abort: help section not found: glossary.mcguffin [255] $ hg help glossary.mc.guffin - abort: help section not found + abort: help section not found: glossary.mc.guffin [255] $ hg help template.files @@ -1792,7 +1854,7 @@ $ hg serve -R "$TESTTMP/test" -n test -p $HGPORT -d --pid-file=hg.pid $ cat hg.pid >> $DAEMON_PIDS - $ get-with-headers.py 127.0.0.1:$HGPORT "help" + $ get-with-headers.py $LOCALIP:$HGPORT "help" 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -1837,6 +1899,13 @@ <tr><td colspan="2"><h2><a name="topics" href="#topics">Topics</a></h2></td></tr> <tr><td> + <a href="/help/color"> + color + </a> + </td><td> + Colorizing Outputs + </td></tr> + <tr><td> <a href="/help/config"> config </a> @@ -1914,6 +1983,13 @@ Merge Tools </td></tr> <tr><td> + <a href="/help/pager"> + pager + </a> + </td><td> + Pager Support + </td></tr> + <tr><td> <a href="/help/patterns"> patterns </a> @@ -2361,7 +2437,7 @@ </html> - $ get-with-headers.py 127.0.0.1:$HGPORT "help/add" + $ get-with-headers.py $LOCALIP:$HGPORT "help/add" 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -2491,6 +2567,9 @@ <td>--verbose</td> <td>enable additional output</td></tr> <tr><td></td> + <td>--color TYPE</td> + <td>when to colorize (boolean, always, auto, never, or debug)</td></tr> + <tr><td></td> <td>--config CONFIG [+]</td> <td>set/override config option (use 'section.name=value')</td></tr> <tr><td></td> @@ -2523,6 +2602,9 @@ <tr><td></td> <td>--hidden</td> <td>consider hidden changesets</td></tr> + <tr><td></td> + <td>--pager TYPE</td> + <td>when to paginate (boolean, always, auto, or never) (default: auto)</td></tr> </table> </div> @@ -2535,7 +2617,7 @@ </html> - $ get-with-headers.py 127.0.0.1:$HGPORT "help/remove" + $ get-with-headers.py $LOCALIP:$HGPORT "help/remove" 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -2686,6 +2768,9 @@ <td>--verbose</td> <td>enable additional output</td></tr> <tr><td></td> + <td>--color TYPE</td> + <td>when to colorize (boolean, always, auto, never, or debug)</td></tr> + <tr><td></td> <td>--config CONFIG [+]</td> <td>set/override config option (use 'section.name=value')</td></tr> <tr><td></td> @@ -2718,6 +2803,9 @@ <tr><td></td> <td>--hidden</td> <td>consider hidden changesets</td></tr> + <tr><td></td> + <td>--pager TYPE</td> + <td>when to paginate (boolean, always, auto, or never) (default: auto)</td></tr> </table> </div> @@ -2730,7 +2818,7 @@ </html> - $ get-with-headers.py 127.0.0.1:$HGPORT "help/dates" + $ get-with-headers.py $LOCALIP:$HGPORT "help/dates" 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -2837,7 +2925,7 @@ Sub-topic indexes rendered properly - $ get-with-headers.py 127.0.0.1:$HGPORT "help/internals" + $ get-with-headers.py $LOCALIP:$HGPORT "help/internals" 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -2889,6 +2977,13 @@ Bundles </td></tr> <tr><td> + <a href="/help/internals.censor"> + censor + </a> + </td><td> + Censor + </td></tr> + <tr><td> <a href="/help/internals.changegroups"> changegroups </a> @@ -2933,7 +3028,7 @@ Sub-topic topics rendered properly - $ get-with-headers.py 127.0.0.1:$HGPORT "help/internals.changegroups" + $ get-with-headers.py $LOCALIP:$HGPORT "help/internals.changegroups" 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -2980,26 +3075,41 @@ <h1>Changegroups</h1> <p> Changegroups are representations of repository revlog data, specifically - the changelog, manifest, and filelogs. + the changelog data, root/flat manifest data, treemanifest data, and + filelogs. </p> <p> There are 3 versions of changegroups: "1", "2", and "3". From a - high-level, versions "1" and "2" are almost exactly the same, with - the only difference being a header on entries in the changeset - segment. Version "3" adds support for exchanging treemanifests and - includes revlog flags in the delta header. + high-level, versions "1" and "2" are almost exactly the same, with the + only difference being an additional item in the *delta header*. Version + "3" adds support for revlog flags in the *delta header* and optionally + exchanging treemanifests (enabled by setting an option on the + "changegroup" part in the bundle2). </p> <p> - Changegroups consists of 3 logical segments: + Changegroups when not exchanging treemanifests consist of 3 logical + segments: </p> <pre> +---------------------------------+ | | | | | changeset | manifest | filelogs | | | | | + | | | | +---------------------------------+ </pre> <p> + When exchanging treemanifests, there are 4 logical segments: + </p> + <pre> + +-------------------------------------------------+ + | | | | | + | changeset | root | treemanifests | filelogs | + | | manifest | | | + | | | | | + +-------------------------------------------------+ + </pre> + <p> The principle building block of each segment is a *chunk*. A *chunk* is a framed piece of data: </p> @@ -3007,17 +3117,18 @@ +---------------------------------------+ | | | | length | data | - | (32 bits) | <length> bytes | + | (4 bytes) | (<length - 4> bytes) | | | | +---------------------------------------+ </pre> <p> - Each chunk starts with a 32-bit big-endian signed integer indicating - the length of the raw data that follows. + All integers are big-endian signed integers. Each chunk starts with a 32-bit + integer indicating the length of the entire chunk (including the length field + itself). </p> <p> - There is a special case chunk that has 0 length ("0x00000000"). We - call this an *empty chunk*. + There is a special case chunk that has a value of 0 for the length + ("0x00000000"). We call this an *empty chunk*. </p> <h2>Delta Groups</h2> <p> @@ -3032,31 +3143,32 @@ +------------------------------------------------------------------------+ | | | | | | | chunk0 length | chunk0 data | chunk1 length | chunk1 data | 0x0 | - | (32 bits) | (various) | (32 bits) | (various) | (32 bits) | + | (4 bytes) | (various) | (4 bytes) | (various) | (4 bytes) | | | | | | | - +------------------------------------------------------------+-----------+ + +------------------------------------------------------------------------+ </pre> <p> Each *chunk*'s data consists of the following: </p> <pre> - +-----------------------------------------+ - | | | | - | delta header | mdiff header | delta | - | (various) | (12 bytes) | (various) | - | | | | - +-----------------------------------------+ + +---------------------------------------+ + | | | + | delta header | delta data | + | (various by version) | (various) | + | | | + +---------------------------------------+ </pre> <p> - The *length* field is the byte length of the remaining 3 logical pieces - of data. The *delta* is a diff from an existing entry in the changelog. + The *delta data* is a series of *delta*s that describe a diff from an existing + entry (either that the recipient already has, or previously specified in the + bundlei/changegroup). </p> <p> The *delta header* is different between versions "1", "2", and "3" of the changegroup format. </p> <p> - Version 1: + Version 1 (headerlen=80): </p> <pre> +------------------------------------------------------+ @@ -3067,7 +3179,7 @@ +------------------------------------------------------+ </pre> <p> - Version 2: + Version 2 (headerlen=100): </p> <pre> +------------------------------------------------------------------+ @@ -3078,85 +3190,104 @@ +------------------------------------------------------------------+ </pre> <p> - Version 3: + Version 3 (headerlen=102): </p> <pre> +------------------------------------------------------------------------------+ | | | | | | | - | node | p1 node | p2 node | base node | link node | flags | + | node | p1 node | p2 node | base node | link node | flags | | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | | | | | | | | +------------------------------------------------------------------------------+ </pre> <p> - The *mdiff header* consists of 3 32-bit big-endian signed integers - describing offsets at which to apply the following delta content: + The *delta data* consists of "chunklen - 4 - headerlen" bytes, which contain a + series of *delta*s, densely packed (no separators). These deltas describe a diff + from an existing entry (either that the recipient already has, or previously + specified in the bundle/changegroup). The format is described more fully in + "hg help internals.bdiff", but briefly: </p> <pre> - +-------------------------------------+ - | | | | - | offset | old length | new length | - | (32 bits) | (32 bits) | (32 bits) | - | | | | - +-------------------------------------+ + +---------------------------------------------------------------+ + | | | | | + | start offset | end offset | new length | content | + | (4 bytes) | (4 bytes) | (4 bytes) | (<new length> bytes) | + | | | | | + +---------------------------------------------------------------+ </pre> <p> + Please note that the length field in the delta data does *not* include itself. + </p> + <p> In version 1, the delta is always applied against the previous node from the changegroup or the first parent if this is the first entry in the changegroup. </p> <p> - In version 2, the delta base node is encoded in the entry in the + In version 2 and up, the delta base node is encoded in the entry in the changegroup. This allows the delta to be expressed against any parent, which can result in smaller deltas and more efficient encoding of data. </p> <h2>Changeset Segment</h2> <p> The *changeset segment* consists of a single *delta group* holding - changelog data. It is followed by an *empty chunk* to denote the - boundary to the *manifests segment*. + changelog data. The *empty chunk* at the end of the *delta group* denotes + the boundary to the *manifest segment*. </p> <h2>Manifest Segment</h2> <p> - The *manifest segment* consists of a single *delta group* holding - manifest data. It is followed by an *empty chunk* to denote the boundary - to the *filelogs segment*. + The *manifest segment* consists of a single *delta group* holding manifest + data. If treemanifests are in use, it contains only the manifest for the + root directory of the repository. Otherwise, it contains the entire + manifest data. The *empty chunk* at the end of the *delta group* denotes + the boundary to the next segment (either the *treemanifests segment* or the + *filelogs segment*, depending on version and the request options). + </p> + <h3>Treemanifests Segment</h3> + <p> + The *treemanifests segment* only exists in changegroup version "3", and + only if the 'treemanifest' param is part of the bundle2 changegroup part + (it is not possible to use changegroup version 3 outside of bundle2). + Aside from the filenames in the *treemanifests segment* containing a + trailing "/" character, it behaves identically to the *filelogs segment* + (see below). The final sub-segment is followed by an *empty chunk* (logically, + a sub-segment with filename size 0). This denotes the boundary to the + *filelogs segment*. </p> <h2>Filelogs Segment</h2> <p> - The *filelogs* segment consists of multiple sub-segments, each + The *filelogs segment* consists of multiple sub-segments, each corresponding to an individual file whose data is being described: </p> <pre> - +--------------------------------------+ - | | | | | - | filelog0 | filelog1 | filelog2 | ... | - | | | | | - +--------------------------------------+ + +--------------------------------------------------+ + | | | | | | + | filelog0 | filelog1 | filelog2 | ... | 0x0 | + | | | | | (4 bytes) | + | | | | | | + +--------------------------------------------------+ </pre> <p> - In version "3" of the changegroup format, filelogs may include - directory logs when treemanifests are in use. directory logs are - identified by having a trailing '/' on their filename (see below). - </p> - <p> - The final filelog sub-segment is followed by an *empty chunk* to denote - the end of the segment and the overall changegroup. + The final filelog sub-segment is followed by an *empty chunk* (logically, + a sub-segment with filename size 0). This denotes the end of the segment + and of the overall changegroup. </p> <p> Each filelog sub-segment consists of the following: </p> <pre> - +------------------------------------------+ - | | | | - | filename size | filename | delta group | - | (32 bits) | (various) | (various) | - | | | | - +------------------------------------------+ + +------------------------------------------------------+ + | | | | + | filename length | filename | delta group | + | (4 bytes) | (<length - 4> bytes) | (various) | + | | | | + +------------------------------------------------------+ </pre> <p> That is, a *chunk* consisting of the filename (not terminated or padded) - followed by N chunks constituting the *delta group* for this file. + followed by N chunks constituting the *delta group* for this file. The + *empty chunk* at the end of each *delta group* denotes the boundary to the + next filelog sub-segment. </p> </div>
--- a/tests/test-hgweb-commands.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgweb-commands.t Sat Mar 11 13:53:14 2017 -0500 @@ -58,7 +58,7 @@ Logs and changes - $ get-with-headers.py 127.0.0.1:$HGPORT 'log/?style=atom' + $ get-with-headers.py $LOCALIP:$HGPORT 'log/?style=atom' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -244,7 +244,7 @@ </entry> </feed> - $ get-with-headers.py 127.0.0.1:$HGPORT 'log/?style=rss' + $ get-with-headers.py $LOCALIP:$HGPORT 'log/?style=rss' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -422,7 +422,7 @@ </channel> </rss> (no-eol) - $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/?style=atom' + $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/?style=atom' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -522,7 +522,7 @@ </entry> </feed> - $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/?style=rss' + $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/?style=rss' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -618,7 +618,7 @@ </channel> </rss> (no-eol) - $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/foo/?style=atom' + $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/foo/?style=atom' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -673,7 +673,7 @@ </entry> </feed> - $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/foo/?style=rss' + $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/foo/?style=rss' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -694,7 +694,7 @@ </channel> </rss> - $ get-with-headers.py 127.0.0.1:$HGPORT 'shortlog/' + $ get-with-headers.py $LOCALIP:$HGPORT 'shortlog/' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -834,7 +834,7 @@ </body> </html> - $ get-with-headers.py 127.0.0.1:$HGPORT 'rev/0/' + $ get-with-headers.py $LOCALIP:$HGPORT 'rev/0/' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -965,7 +965,7 @@ </body> </html> - $ get-with-headers.py 127.0.0.1:$HGPORT 'rev/1/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'rev/1/?style=raw' 200 Script output follows @@ -982,7 +982,7 @@ @@ -0,0 +1,1 @@ +2ef0ac749a14e4f57a5a822464a0902c6f7f448f 1.0 - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=base' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=base' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -1071,12 +1071,12 @@ </body> </html> - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=stable&style=raw' | grep 'revision:' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=stable&style=raw' | grep 'revision:' revision: 2 Search with revset syntax - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=tip^&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=tip^&style=raw' 200 Script output follows @@ -1093,7 +1093,7 @@ branch: stable - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(all(),2)^&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(all(),2)^&style=raw' 200 Script output follows @@ -1117,7 +1117,7 @@ branch: default - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(all(,2)^&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(all(,2)^&style=raw' 200 Script output follows @@ -1127,7 +1127,7 @@ # Mode literal keyword search - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(al(),2)^&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(al(),2)^&style=raw' 200 Script output follows @@ -1137,7 +1137,7 @@ # Mode literal keyword search - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=bookmark(anotherthing)&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=bookmark(anotherthing)&style=raw' 200 Script output follows @@ -1155,7 +1155,7 @@ bookmark: anotherthing - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=bookmark(abc)&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=bookmark(abc)&style=raw' 200 Script output follows @@ -1165,7 +1165,7 @@ # Mode literal keyword search - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=deadbeef:&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=deadbeef:&style=raw' 200 Script output follows @@ -1176,7 +1176,7 @@ - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=user("test")&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=user("test")&style=raw' 200 Script output follows @@ -1217,7 +1217,7 @@ bookmark: anotherthing - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=user("re:test")&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=user("re:test")&style=raw' 200 Script output follows @@ -1230,11 +1230,11 @@ File-related - $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/foo/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/foo/?style=raw' 200 Script output follows foo - $ get-with-headers.py 127.0.0.1:$HGPORT 'annotate/1/foo/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'annotate/1/foo/?style=raw' 200 Script output follows @@ -1243,7 +1243,7 @@ - $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/?style=raw' 200 Script output follows @@ -1259,7 +1259,7 @@ $ hg parents --template "{node|short}\n" -r 1 foo 2ef0ac749a14 - $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/foo' + $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/foo' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -1354,7 +1354,7 @@ </body> </html> - $ get-with-headers.py 127.0.0.1:$HGPORT 'filediff/0/foo/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'filediff/0/foo/?style=raw' 200 Script output follows @@ -1368,7 +1368,7 @@ - $ get-with-headers.py 127.0.0.1:$HGPORT 'filediff/1/foo/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'filediff/1/foo/?style=raw' 200 Script output follows @@ -1384,7 +1384,7 @@ $ hg parents --template "{node|short}\n" -r 2 foo 2ef0ac749a14 - $ get-with-headers.py 127.0.0.1:$HGPORT 'file/2/foo' + $ get-with-headers.py $LOCALIP:$HGPORT 'file/2/foo' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -1483,23 +1483,23 @@ Overviews - $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-tags' + $ get-with-headers.py $LOCALIP:$HGPORT 'raw-tags' 200 Script output follows tip cad8025a2e87f88c06259790adfa15acb4080123 1.0 2ef0ac749a14e4f57a5a822464a0902c6f7f448f - $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-branches' + $ get-with-headers.py $LOCALIP:$HGPORT 'raw-branches' 200 Script output follows unstable cad8025a2e87f88c06259790adfa15acb4080123 open stable 1d22e65f027e5a0609357e7d8e7508cd2ba5d2fe inactive default a4f92ed23982be056b9852de5dfe873eaac7f0de inactive - $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-bookmarks' + $ get-with-headers.py $LOCALIP:$HGPORT 'raw-bookmarks' 200 Script output follows something cad8025a2e87f88c06259790adfa15acb4080123 anotherthing 2ef0ac749a14e4f57a5a822464a0902c6f7f448f - $ get-with-headers.py 127.0.0.1:$HGPORT 'summary/?style=gitweb' + $ get-with-headers.py $LOCALIP:$HGPORT 'summary/?style=gitweb' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -1697,7 +1697,7 @@ </body> </html> - $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/?style=gitweb' + $ get-with-headers.py $LOCALIP:$HGPORT 'graph/?style=gitweb' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -1843,7 +1843,7 @@ raw graph - $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'graph/?style=raw' 200 Script output follows @@ -1893,28 +1893,28 @@ capabilities - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities'; echo + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities'; echo 200 Script output follows lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=*zlib (glob) heads - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=heads' + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=heads' 200 Script output follows cad8025a2e87f88c06259790adfa15acb4080123 branches - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=branches&nodes=0000000000000000000000000000000000000000' + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=branches&nodes=0000000000000000000000000000000000000000' 200 Script output follows 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 changegroup - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=changegroup&roots=0000000000000000000000000000000000000000' + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=changegroup&roots=0000000000000000000000000000000000000000' 200 Script output follows x\x9c\xbd\x94MHTQ\x14\xc7'+\x9d\xc66\x81\x89P\xc1\xa3\x14\xcct\xba\xef\xbe\xfb\xde\xbb\xcfr0\xb3"\x02\x11[%\x98\xdcO\xa7\xd2\x19\x98y\xd2\x07h"\x96\xa0e\xda\xa6lUY-\xca\x08\xa2\x82\x16\x96\xd1\xa2\xf0#\xc8\x95\x1b\xdd$!m*"\xc8\x82\xea\xbe\x9c\x01\x85\xc9\x996\x1d\xf8\xc1\xe3~\x9d\xff9\xef\x7f\xaf\xcf\xe7\xbb\x19\xfc4\xec^\xcb\x9b\xfbz\xa6\xbe\xb3\x90_\xef/\x8d\x9e\xad\xbe\xe4\xcb0\xd2\xec\xad\x12X:\xc8\x12\x12\xd9:\x95\xba \x1cG\xb7$\xc5\xc44\x1c(\x1d\x03\x03\xdb\x84\x0cK#\xe0\x8a\xb8\x1b\x00\x1a\x08p\xb2SF\xa3\x01\x8f\x00%q\xa1Ny{k!8\xe5t>[{\xe2j\xddl\xc3\xcf\xee\xd0\xddW\x9ff3U\x9djobj\xbb\x87E\x88\x05l\x001\x12\x18\x13\xc6 \xb7(\xe3\x02a\x80\x81\xcel.u\x9b\x1b\x8c\x91\x80Z\x0c\x15\x15 (esc) @@ -1925,14 +1925,14 @@ stream_out - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=stream_out' + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out' 200 Script output follows 1 failing unbundle, requires POST request - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=unbundle' + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=unbundle' 405 push requires POST request 0 @@ -1941,7 +1941,7 @@ Static files - $ get-with-headers.py 127.0.0.1:$HGPORT 'static/style.css' + $ get-with-headers.py $LOCALIP:$HGPORT 'static/style.css' 200 Script output follows a { text-decoration:none; } @@ -2077,7 +2077,7 @@ > --cwd .. -R `pwd` $ cat hg.pid >> $DAEMON_PIDS - $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=adds("foo")&style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=adds("foo")&style=raw' 200 Script output follows @@ -2110,7 +2110,7 @@ Graph json escape of multibyte character - $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/' > out + $ get-with-headers.py $LOCALIP:$HGPORT 'graph/' > out >>> from __future__ import print_function >>> for line in open("out"): ... if line.startswith("var data ="): @@ -2121,14 +2121,14 @@ (plain version to check the format) - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | dd ibs=75 count=1 2> /dev/null; echo + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | dd ibs=75 count=1 2> /dev/null; echo 200 Script output follows lookup changegroupsubset branchmap pushkey known (spread version to check the content) - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n'; echo + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n'; echo 200 Script output @@ -2194,23 +2194,23 @@ Test paging - $ get-with-headers.py 127.0.0.1:$HGPORT \ + $ get-with-headers.py $LOCALIP:$HGPORT \ > 'graph/?style=raw' | grep changeset changeset: aed2d9c1d0e7 changeset: b60a39a85a01 - $ get-with-headers.py 127.0.0.1:$HGPORT \ + $ get-with-headers.py $LOCALIP:$HGPORT \ > 'graph/?style=raw&revcount=3' | grep changeset changeset: aed2d9c1d0e7 changeset: b60a39a85a01 changeset: ada793dcc118 - $ get-with-headers.py 127.0.0.1:$HGPORT \ + $ get-with-headers.py $LOCALIP:$HGPORT \ > 'graph/e06180cbfb0?style=raw&revcount=3' | grep changeset changeset: e06180cbfb0c changeset: b4e73ffab476 - $ get-with-headers.py 127.0.0.1:$HGPORT \ + $ get-with-headers.py $LOCALIP:$HGPORT \ > 'graph/b4e73ffab47?style=raw&revcount=3' | grep changeset changeset: b4e73ffab476
--- a/tests/test-hgweb-descend-empties.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgweb-descend-empties.t Sat Mar 11 13:53:14 2017 -0500 @@ -29,7 +29,7 @@ manifest with descending (paper) - $ get-with-headers.py 127.0.0.1:$HGPORT 'file' + $ get-with-headers.py $LOCALIP:$HGPORT 'file' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -147,7 +147,7 @@ manifest with descending (coal) - $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=coal' + $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=coal' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -266,7 +266,7 @@ manifest with descending (monoblue) - $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=monoblue' + $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=monoblue' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> @@ -379,7 +379,7 @@ manifest with descending (gitweb) - $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=gitweb' + $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=gitweb' 200 Script output follows <?xml version="1.0" encoding="ascii"?> @@ -482,7 +482,7 @@ manifest with descending (spartan) - $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=spartan' + $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=spartan' 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
--- a/tests/test-hgweb-json.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgweb-json.t Sat Mar 11 13:53:14 2017 -0500 @@ -1549,6 +1549,10 @@ ], "topics": [ { + "summary": "Colorizing Outputs", + "topic": "color" + }, + { "summary": "Configuration Files", "topic": "config" }, @@ -1593,6 +1597,10 @@ "topic": "merge-tools" }, { + "summary": "Pager Support", + "topic": "pager" + }, + { "summary": "File Name Patterns", "topic": "patterns" },
--- a/tests/test-hgweb-no-path-info.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgweb-no-path-info.t Sat Mar 11 13:53:14 2017 -0500 @@ -49,7 +49,7 @@ > 'REQUEST_METHOD': 'GET', > 'PATH_INFO': '/', > 'SCRIPT_NAME': '', - > 'SERVER_NAME': '127.0.0.1', + > 'SERVER_NAME': '$LOCALIP', > 'SERVER_PORT': os.environ['HGPORT'], > 'SERVER_PROTOCOL': 'HTTP/1.0' > } @@ -79,16 +79,16 @@ <?xml version="1.0" encoding="ascii"?> <feed xmlns="http://www.w3.org/2005/Atom"> <!-- Changelog --> - <id>http://127.0.0.1:$HGPORT/</id> (glob) - <link rel="self" href="http://127.0.0.1:$HGPORT/atom-log"/> (glob) - <link rel="alternate" href="http://127.0.0.1:$HGPORT/"/> (glob) + <id>http://$LOCALIP:$HGPORT/</id> (glob) + <link rel="self" href="http://$LOCALIP:$HGPORT/atom-log"/> (glob) + <link rel="alternate" href="http://$LOCALIP:$HGPORT/"/> (glob) <title>repo Changelog</title> <updated>1970-01-01T00:00:00+00:00</updated> <entry> <title>[default] test</title> - <id>http://127.0.0.1:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob) - <link href="http://127.0.0.1:$HGPORT/rev/61c9426e69fe"/> (glob) + <id>http://$LOCALIP:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob) + <link href="http://$LOCALIP:$HGPORT/rev/61c9426e69fe"/> (glob) <author> <name>test</name> <email>test</email>
--- a/tests/test-hgweb-no-request-uri.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgweb-no-request-uri.t Sat Mar 11 13:53:14 2017 -0500 @@ -48,7 +48,7 @@ > 'wsgi.run_once': False, > 'REQUEST_METHOD': 'GET', > 'SCRIPT_NAME': '', - > 'SERVER_NAME': '127.0.0.1', + > 'SERVER_NAME': '$LOCALIP', > 'SERVER_PORT': os.environ['HGPORT'], > 'SERVER_PROTOCOL': 'HTTP/1.0' > } @@ -90,16 +90,16 @@ <?xml version="1.0" encoding="ascii"?> <feed xmlns="http://www.w3.org/2005/Atom"> <!-- Changelog --> - <id>http://127.0.0.1:$HGPORT/</id> (glob) - <link rel="self" href="http://127.0.0.1:$HGPORT/atom-log"/> (glob) - <link rel="alternate" href="http://127.0.0.1:$HGPORT/"/> (glob) + <id>http://$LOCALIP:$HGPORT/</id> (glob) + <link rel="self" href="http://$LOCALIP:$HGPORT/atom-log"/> (glob) + <link rel="alternate" href="http://$LOCALIP:$HGPORT/"/> (glob) <title>repo Changelog</title> <updated>1970-01-01T00:00:00+00:00</updated> <entry> <title>[default] test</title> - <id>http://127.0.0.1:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob) - <link href="http://127.0.0.1:$HGPORT/rev/61c9426e69fe"/> (glob) + <id>http://$LOCALIP:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob) + <link href="http://$LOCALIP:$HGPORT/rev/61c9426e69fe"/> (glob) <author> <name>test</name> <email>test</email>
--- a/tests/test-hgweb-non-interactive.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgweb-non-interactive.t Sat Mar 11 13:53:14 2017 -0500 @@ -60,7 +60,7 @@ > 'SCRIPT_NAME': '', > 'PATH_INFO': '', > 'QUERY_STRING': '', - > 'SERVER_NAME': '127.0.0.1', + > 'SERVER_NAME': '$LOCALIP', > 'SERVER_PORT': os.environ['HGPORT'], > 'SERVER_PROTOCOL': 'HTTP/1.0' > }
--- a/tests/test-hgweb-raw.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgweb-raw.t Sat Mar 11 13:53:14 2017 -0500 @@ -32,7 +32,7 @@ It is very boring to read, but computers don't care about things like that. $ cat access.log error.log - 127.0.0.1 - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob) + $LOCALIP - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob) $ rm access.log error.log $ hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid \ @@ -53,6 +53,6 @@ It is very boring to read, but computers don't care about things like that. $ cat access.log error.log - 127.0.0.1 - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob) + $LOCALIP - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob) $ cd ..
--- a/tests/test-hgweb-symrev.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgweb-symrev.t Sat Mar 11 13:53:14 2017 -0500 @@ -37,7 +37,7 @@ (De)referencing symbolic revisions (paper) - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=paper' | egrep $REVLINKS <li><a href="/graph/tip?style=paper">graph</a></li> <li><a href="/rev/tip?style=paper">changeset</a></li> <li><a href="/file/tip?style=paper">browse</a></li> @@ -52,7 +52,7 @@ <a href="/shortlog/tip?revcount=120&style=paper">more</a> | rev 2: <a href="/shortlog/43c799df6e75?style=paper">(0)</a> <a href="/shortlog/tip?style=paper">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=paper' | egrep $REVLINKS <li><a href="/shortlog/tip?style=paper">log</a></li> <li><a href="/rev/tip?style=paper">changeset</a></li> <li><a href="/file/tip?style=paper">browse</a></li> @@ -63,7 +63,7 @@ <a href="/graph/tip?revcount=120&style=paper">more</a> | rev 2: <a href="/graph/43c799df6e75?style=paper">(0)</a> <a href="/graph/tip?style=paper">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=paper' | egrep $REVLINKS <li><a href="/shortlog/tip?style=paper">log</a></li> <li><a href="/graph/tip?style=paper">graph</a></li> <li><a href="/rev/tip?style=paper">changeset</a></li> @@ -74,24 +74,24 @@ <a href="/file/tip/dir/?style=paper"> <a href="/file/tip/foo?style=paper"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=paper' | egrep $REVLINKS <a href="/shortlog/default?style=paper" class="open"> <a href="/shortlog/9d8c40cba617?style=paper" class="open"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=paper' | egrep $REVLINKS <a href="/rev/tip?style=paper"> <a href="/rev/9d8c40cba617?style=paper"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=paper' | egrep $REVLINKS <a href="/rev/xyzzy?style=paper"> <a href="/rev/a7c1559b7bba?style=paper"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=paper&rev=all()' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=paper&rev=all()' | egrep $REVLINKS <a href="/rev/9d8c40cba617?style=paper">third</a> <a href="/rev/a7c1559b7bba?style=paper">second</a> <a href="/rev/43c799df6e75?style=paper">first</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=paper' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=paper">log</a></li> <li><a href="/graph/xyzzy?style=paper">graph</a></li> <li><a href="/raw-rev/xyzzy?style=paper">raw</a></li> @@ -102,7 +102,7 @@ <td class="author"> <a href="/rev/9d8c40cba617?style=paper">9d8c40cba617</a></td> <td class="files"><a href="/file/a7c1559b7bba/foo?style=paper">foo</a> </td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=paper' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=paper">graph</a></li> <li><a href="/rev/xyzzy?style=paper">changeset</a></li> <li><a href="/file/xyzzy?style=paper">browse</a></li> @@ -116,7 +116,7 @@ <a href="/shortlog/xyzzy?revcount=120&style=paper">more</a> | rev 1: <a href="/shortlog/43c799df6e75?style=paper">(0)</a> <a href="/shortlog/tip?style=paper">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=paper' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=paper">log</a></li> <li><a href="/rev/xyzzy?style=paper">changeset</a></li> <li><a href="/file/xyzzy?style=paper">browse</a></li> @@ -127,7 +127,7 @@ <a href="/graph/xyzzy?revcount=120&style=paper">more</a> | rev 1: <a href="/graph/43c799df6e75?style=paper">(0)</a> <a href="/graph/tip?style=paper">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=paper' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=paper">log</a></li> <li><a href="/graph/xyzzy?style=paper">graph</a></li> <li><a href="/rev/xyzzy?style=paper">changeset</a></li> @@ -138,7 +138,7 @@ <a href="/file/xyzzy/dir/?style=paper"> <a href="/file/xyzzy/foo?style=paper"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=paper' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=paper">log</a></li> <li><a href="/graph/xyzzy?style=paper">graph</a></li> <li><a href="/rev/xyzzy?style=paper">changeset</a></li> @@ -153,7 +153,7 @@ <td class="author"><a href="/file/43c799df6e75/foo?style=paper">43c799df6e75</a> </td> <td class="author"><a href="/file/9d8c40cba617/foo?style=paper">9d8c40cba617</a> </td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=paper' | egrep $REVLINKS href="/atom-log/tip/foo" title="Atom feed for test:foo" /> href="/rss-log/tip/foo" title="RSS feed for test:foo" /> <li><a href="/shortlog/xyzzy?style=paper">log</a></li> @@ -176,7 +176,7 @@ <a href="/log/xyzzy/foo?revcount=120&style=paper">more</a> | <a href="/log/43c799df6e75/foo?style=paper">(0)</a> <a href="/log/tip/foo?style=paper">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=paper' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=paper">log</a></li> <li><a href="/graph/xyzzy?style=paper">graph</a></li> <li><a href="/rev/xyzzy?style=paper">changeset</a></li> @@ -200,7 +200,7 @@ <a href="/diff/a7c1559b7bba/foo?style=paper">diff</a> <a href="/rev/a7c1559b7bba?style=paper">changeset</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=paper' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=paper">log</a></li> <li><a href="/graph/xyzzy?style=paper">graph</a></li> <li><a href="/rev/xyzzy?style=paper">changeset</a></li> @@ -215,7 +215,7 @@ <td><a href="/file/43c799df6e75/foo?style=paper">43c799df6e75</a> </td> <td><a href="/file/9d8c40cba617/foo?style=paper">9d8c40cba617</a> </td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=paper' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=paper' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=paper">log</a></li> <li><a href="/graph/xyzzy?style=paper">graph</a></li> <li><a href="/rev/xyzzy?style=paper">changeset</a></li> @@ -232,7 +232,7 @@ (De)referencing symbolic revisions (coal) - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=coal' | egrep $REVLINKS <li><a href="/graph/tip?style=coal">graph</a></li> <li><a href="/rev/tip?style=coal">changeset</a></li> <li><a href="/file/tip?style=coal">browse</a></li> @@ -247,7 +247,7 @@ <a href="/shortlog/tip?revcount=120&style=coal">more</a> | rev 2: <a href="/shortlog/43c799df6e75?style=coal">(0)</a> <a href="/shortlog/tip?style=coal">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=coal' | egrep $REVLINKS <li><a href="/shortlog/tip?style=coal">log</a></li> <li><a href="/rev/tip?style=coal">changeset</a></li> <li><a href="/file/tip?style=coal">browse</a></li> @@ -258,7 +258,7 @@ <a href="/graph/tip?revcount=120&style=coal">more</a> | rev 2: <a href="/graph/43c799df6e75?style=coal">(0)</a> <a href="/graph/tip?style=coal">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=coal' | egrep $REVLINKS <li><a href="/shortlog/tip?style=coal">log</a></li> <li><a href="/graph/tip?style=coal">graph</a></li> <li><a href="/rev/tip?style=coal">changeset</a></li> @@ -269,24 +269,24 @@ <a href="/file/tip/dir/?style=coal"> <a href="/file/tip/foo?style=coal"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=coal' | egrep $REVLINKS <a href="/shortlog/default?style=coal" class="open"> <a href="/shortlog/9d8c40cba617?style=coal" class="open"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=coal' | egrep $REVLINKS <a href="/rev/tip?style=coal"> <a href="/rev/9d8c40cba617?style=coal"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=coal' | egrep $REVLINKS <a href="/rev/xyzzy?style=coal"> <a href="/rev/a7c1559b7bba?style=coal"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=coal&rev=all()' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=coal&rev=all()' | egrep $REVLINKS <a href="/rev/9d8c40cba617?style=coal">third</a> <a href="/rev/a7c1559b7bba?style=coal">second</a> <a href="/rev/43c799df6e75?style=coal">first</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=coal' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=coal">log</a></li> <li><a href="/graph/xyzzy?style=coal">graph</a></li> <li><a href="/raw-rev/xyzzy?style=coal">raw</a></li> @@ -297,7 +297,7 @@ <td class="author"> <a href="/rev/9d8c40cba617?style=coal">9d8c40cba617</a></td> <td class="files"><a href="/file/a7c1559b7bba/foo?style=coal">foo</a> </td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=coal' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=coal">graph</a></li> <li><a href="/rev/xyzzy?style=coal">changeset</a></li> <li><a href="/file/xyzzy?style=coal">browse</a></li> @@ -311,7 +311,7 @@ <a href="/shortlog/xyzzy?revcount=120&style=coal">more</a> | rev 1: <a href="/shortlog/43c799df6e75?style=coal">(0)</a> <a href="/shortlog/tip?style=coal">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=coal' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=coal">log</a></li> <li><a href="/rev/xyzzy?style=coal">changeset</a></li> <li><a href="/file/xyzzy?style=coal">browse</a></li> @@ -322,7 +322,7 @@ <a href="/graph/xyzzy?revcount=120&style=coal">more</a> | rev 1: <a href="/graph/43c799df6e75?style=coal">(0)</a> <a href="/graph/tip?style=coal">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=coal' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=coal">log</a></li> <li><a href="/graph/xyzzy?style=coal">graph</a></li> <li><a href="/rev/xyzzy?style=coal">changeset</a></li> @@ -333,7 +333,7 @@ <a href="/file/xyzzy/dir/?style=coal"> <a href="/file/xyzzy/foo?style=coal"> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=coal' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=coal">log</a></li> <li><a href="/graph/xyzzy?style=coal">graph</a></li> <li><a href="/rev/xyzzy?style=coal">changeset</a></li> @@ -348,7 +348,7 @@ <td class="author"><a href="/file/43c799df6e75/foo?style=coal">43c799df6e75</a> </td> <td class="author"><a href="/file/9d8c40cba617/foo?style=coal">9d8c40cba617</a> </td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=coal' | egrep $REVLINKS href="/atom-log/tip/foo" title="Atom feed for test:foo" /> href="/rss-log/tip/foo" title="RSS feed for test:foo" /> <li><a href="/shortlog/xyzzy?style=coal">log</a></li> @@ -371,7 +371,7 @@ <a href="/log/xyzzy/foo?revcount=120&style=coal">more</a> | <a href="/log/43c799df6e75/foo?style=coal">(0)</a> <a href="/log/tip/foo?style=coal">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=coal' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=coal">log</a></li> <li><a href="/graph/xyzzy?style=coal">graph</a></li> <li><a href="/rev/xyzzy?style=coal">changeset</a></li> @@ -395,7 +395,7 @@ <a href="/diff/a7c1559b7bba/foo?style=coal">diff</a> <a href="/rev/a7c1559b7bba?style=coal">changeset</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=coal' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=coal">log</a></li> <li><a href="/graph/xyzzy?style=coal">graph</a></li> <li><a href="/rev/xyzzy?style=coal">changeset</a></li> @@ -410,7 +410,7 @@ <td><a href="/file/43c799df6e75/foo?style=coal">43c799df6e75</a> </td> <td><a href="/file/9d8c40cba617/foo?style=coal">9d8c40cba617</a> </td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=coal' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=coal' | egrep $REVLINKS <li><a href="/shortlog/xyzzy?style=coal">log</a></li> <li><a href="/graph/xyzzy?style=coal">graph</a></li> <li><a href="/rev/xyzzy?style=coal">changeset</a></li> @@ -427,7 +427,7 @@ (De)referencing symbolic revisions (gitweb) - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'summary?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'summary?style=gitweb' | egrep $REVLINKS <a href="/file?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a> | <a class="list" href="/rev/9d8c40cba617?style=gitweb"> <a href="/rev/9d8c40cba617?style=gitweb">changeset</a> | @@ -447,7 +447,7 @@ <a href="/log/9d8c40cba617?style=gitweb">changelog</a> | <a href="/file/9d8c40cba617?style=gitweb">files</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=gitweb' | egrep $REVLINKS <a href="/log/tip?style=gitweb">changelog</a> | <a href="/graph/tip?style=gitweb">graph</a> | <a href="/file/tip?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a> | @@ -463,7 +463,7 @@ <a href="/file/43c799df6e75?style=gitweb">files</a> <a href="/shortlog/43c799df6e75?style=gitweb">(0)</a> <a href="/shortlog/tip?style=gitweb">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=gitweb' | egrep $REVLINKS <a href="/shortlog/tip?style=gitweb">shortlog</a> | <a href="/graph/tip?style=gitweb">graph</a> | <a href="/file/tip?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a> | @@ -476,7 +476,7 @@ <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/> <a href="/log/43c799df6e75?style=gitweb">(0)</a> <a href="/log/tip?style=gitweb">tip</a> <br/> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=gitweb' | egrep $REVLINKS <a href="/shortlog/tip?style=gitweb">shortlog</a> | <a href="/log/tip?style=gitweb">changelog</a> | <a href="/file/tip?style=gitweb">files</a> | @@ -487,25 +487,25 @@ <a href="/graph/tip?revcount=120&style=gitweb">more</a> | <a href="/graph/43c799df6e75?style=gitweb">(0)</a> <a href="/graph/tip?style=gitweb">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=gitweb' | egrep $REVLINKS <td><a class="list" href="/rev/tip?style=gitweb"><b>tip</b></a></td> <a href="/rev/9d8c40cba617?style=gitweb">changeset</a> | <a href="/log/9d8c40cba617?style=gitweb">changelog</a> | <a href="/file/9d8c40cba617?style=gitweb">files</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=gitweb' | egrep $REVLINKS <td><a class="list" href="/rev/xyzzy?style=gitweb"><b>xyzzy</b></a></td> <a href="/rev/a7c1559b7bba?style=gitweb">changeset</a> | <a href="/log/a7c1559b7bba?style=gitweb">changelog</a> | <a href="/file/a7c1559b7bba?style=gitweb">files</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=gitweb' | egrep $REVLINKS <td class="open"><a class="list" href="/shortlog/default?style=gitweb"><b>default</b></a></td> <a href="/changeset/9d8c40cba617?style=gitweb">changeset</a> | <a href="/log/9d8c40cba617?style=gitweb">changelog</a> | <a href="/file/9d8c40cba617?style=gitweb">files</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=gitweb' | egrep $REVLINKS <a href="/rev/tip?style=gitweb">changeset</a> | <a href="/archive/tip.zip">zip</a> | <td><a href="/file/tip/?style=gitweb">[up]</a></td> <a href="/file/tip/dir?style=gitweb">dir</a> @@ -516,7 +516,7 @@ <a href="/log/tip/foo?style=gitweb">revisions</a> | <a href="/annotate/tip/foo?style=gitweb">annotate</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=gitweb&rev=all()' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=gitweb&rev=all()' | egrep $REVLINKS <a href="/file?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a> <a class="title" href="/rev/9d8c40cba617?style=gitweb"><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>third<span class="logtags"> <span class="branchtag" title="default">default</span> <span class="tagtag" title="tip">tip</span> </span></a> <a href="/rev/9d8c40cba617?style=gitweb">changeset</a><br/> @@ -525,7 +525,7 @@ <a class="title" href="/rev/43c799df6e75?style=gitweb"><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>first<span class="logtags"> </span></a> <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=gitweb' | egrep $REVLINKS <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> | <a href="/log/xyzzy?style=gitweb">changelog</a> | <a href="/graph/xyzzy?style=gitweb">graph</a> | @@ -542,7 +542,7 @@ <a href="/comparison/a7c1559b7bba/foo?style=gitweb">comparison</a> | <a href="/log/a7c1559b7bba/foo?style=gitweb">revisions</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=gitweb' | egrep $REVLINKS <a href="/log/xyzzy?style=gitweb">changelog</a> | <a href="/graph/xyzzy?style=gitweb">graph</a> | <a href="/file/xyzzy?style=gitweb">files</a> | <a href="/archive/xyzzy.zip">zip</a> | @@ -555,7 +555,7 @@ <a href="/file/43c799df6e75?style=gitweb">files</a> <a href="/shortlog/43c799df6e75?style=gitweb">(0)</a> <a href="/shortlog/tip?style=gitweb">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=gitweb' | egrep $REVLINKS <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> | <a href="/graph/xyzzy?style=gitweb">graph</a> | <a href="/file/xyzzy?style=gitweb">files</a> | <a href="/archive/xyzzy.zip">zip</a> | @@ -566,7 +566,7 @@ <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/> <a href="/log/43c799df6e75?style=gitweb">(0)</a> <a href="/log/tip?style=gitweb">tip</a> <br/> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=gitweb' | egrep $REVLINKS <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> | <a href="/log/xyzzy?style=gitweb">changelog</a> | <a href="/file/xyzzy?style=gitweb">files</a> | @@ -577,7 +577,7 @@ <a href="/graph/xyzzy?revcount=120&style=gitweb">more</a> | <a href="/graph/43c799df6e75?style=gitweb">(0)</a> <a href="/graph/tip?style=gitweb">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=gitweb' | egrep $REVLINKS <a href="/rev/xyzzy?style=gitweb">changeset</a> | <a href="/archive/xyzzy.zip">zip</a> | <td><a href="/file/xyzzy/?style=gitweb">[up]</a></td> <a href="/file/xyzzy/dir?style=gitweb">dir</a> @@ -588,7 +588,7 @@ <a href="/log/xyzzy/foo?style=gitweb">revisions</a> | <a href="/annotate/xyzzy/foo?style=gitweb">annotate</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=gitweb' | egrep $REVLINKS <a href="/file/xyzzy/?style=gitweb">files</a> | <a href="/rev/xyzzy?style=gitweb">changeset</a> | <a href="/file/tip/foo?style=gitweb">latest</a> | @@ -601,7 +601,7 @@ <a class="list" href="/file/43c799df6e75/foo?style=gitweb"> <a class="list" href="/file/9d8c40cba617/foo?style=gitweb">9d8c40cba617</a></td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=gitweb' | egrep $REVLINKS <a href="/file/xyzzy/foo?style=gitweb">file</a> | <a href="/annotate/xyzzy/foo?style=gitweb">annotate</a> | <a href="/diff/xyzzy/foo?style=gitweb">diff</a> | @@ -616,9 +616,11 @@ <a href="/file/43c799df6e75/foo?style=gitweb">file</a> | <a href="/diff/43c799df6e75/foo?style=gitweb">diff</a> | <a href="/annotate/43c799df6e75/foo?style=gitweb">annotate</a> + <a href="/log/xyzzy/foo?revcount=30&style=gitweb">less</a> + <a href="/log/xyzzy/foo?revcount=120&style=gitweb">more</a> <a href="/log/43c799df6e75/foo?style=gitweb">(0)</a> <a href="/log/tip/foo?style=gitweb">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=gitweb' | egrep $REVLINKS <a href="/file/xyzzy/?style=gitweb">files</a> | <a href="/rev/xyzzy?style=gitweb">changeset</a> | <a href="/file/xyzzy/foo?style=gitweb">file</a> | @@ -640,7 +642,7 @@ <a href="/diff/a7c1559b7bba/foo?style=gitweb">diff</a> <a href="/rev/a7c1559b7bba?style=gitweb">changeset</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=gitweb' | egrep $REVLINKS <a href="/file/xyzzy?style=gitweb">files</a> | <a href="/rev/xyzzy?style=gitweb">changeset</a> | <a href="/file/xyzzy/foo?style=gitweb">file</a> | @@ -653,7 +655,7 @@ <a class="list" href="/diff/43c799df6e75/foo?style=gitweb"> <a class="list" href="/diff/9d8c40cba617/foo?style=gitweb">9d8c40cba617</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=gitweb' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=gitweb' | egrep $REVLINKS <a href="/file/xyzzy?style=gitweb">files</a> | <a href="/rev/xyzzy?style=gitweb">changeset</a> | <a href="/file/xyzzy/foo?style=gitweb">file</a> | @@ -668,7 +670,7 @@ (De)referencing symbolic revisions (monoblue) - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'summary?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'summary?style=monoblue' | egrep $REVLINKS <li><a href="/archive/tip.zip">zip</a></li> <a href="/rev/9d8c40cba617?style=monoblue"> <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> | @@ -688,7 +690,7 @@ <a href="/log/9d8c40cba617?style=monoblue">changelog</a> | <a href="/file/9d8c40cba617?style=monoblue">files</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=monoblue' | egrep $REVLINKS <li><a href="/graph/tip?style=monoblue">graph</a></li> <li><a href="/file/tip?style=monoblue">files</a></li> <li><a href="/archive/tip.zip">zip</a></li> @@ -703,7 +705,7 @@ <a href="/file/43c799df6e75?style=monoblue">files</a> <a href="/shortlog/43c799df6e75?style=monoblue">(0)</a> <a href="/shortlog/tip?style=monoblue">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=monoblue' | egrep $REVLINKS <li><a href="/graph/tip?style=monoblue">graph</a></li> <li><a href="/file/tip?style=monoblue">files</a></li> <li><a href="/archive/tip.zip">zip</a></li> @@ -712,31 +714,31 @@ <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3> <a href="/log/43c799df6e75?style=monoblue">(0)</a> <a href="/log/tip?style=monoblue">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=monoblue' | egrep $REVLINKS <li><a href="/file/tip?style=monoblue">files</a></li> <a href="/graph/tip?revcount=30&style=monoblue">less</a> <a href="/graph/tip?revcount=120&style=monoblue">more</a> | <a href="/graph/43c799df6e75?style=monoblue">(0)</a> <a href="/graph/tip?style=monoblue">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=monoblue' | egrep $REVLINKS <td><a href="/rev/tip?style=monoblue">tip</a></td> <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> | <a href="/log/9d8c40cba617?style=monoblue">changelog</a> | <a href="/file/9d8c40cba617?style=monoblue">files</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=monoblue' | egrep $REVLINKS <td><a href="/rev/xyzzy?style=monoblue">xyzzy</a></td> <a href="/rev/a7c1559b7bba?style=monoblue">changeset</a> | <a href="/log/a7c1559b7bba?style=monoblue">changelog</a> | <a href="/file/a7c1559b7bba?style=monoblue">files</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=monoblue' | egrep $REVLINKS <td class="open"><a href="/shortlog/default?style=monoblue">default</a></td> <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> | <a href="/log/9d8c40cba617?style=monoblue">changelog</a> | <a href="/file/9d8c40cba617?style=monoblue">files</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=monoblue' | egrep $REVLINKS <li><a href="/graph/tip?style=monoblue">graph</a></li> <li><a href="/rev/tip?style=monoblue">changeset</a></li> <li><a href="/archive/tip.zip">zip</a></li> @@ -749,13 +751,13 @@ <a href="/log/tip/foo?style=monoblue">revisions</a> | <a href="/annotate/tip/foo?style=monoblue">annotate</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=monoblue&rev=all()' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=monoblue&rev=all()' | egrep $REVLINKS <li><a href="/archive/tip.zip">zip</a></li> <h3 class="changelog"><a class="title" href="/rev/9d8c40cba617?style=monoblue">third<span class="logtags"> <span class="branchtag" title="default">default</span> <span class="tagtag" title="tip">tip</span> </span></a></h3> <h3 class="changelog"><a class="title" href="/rev/a7c1559b7bba?style=monoblue">second<span class="logtags"> <span class="bookmarktag" title="xyzzy">xyzzy</span> </span></a></h3> <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/file/xyzzy?style=monoblue">files</a></li> <li><a href="/raw-rev/xyzzy">raw</a></li> @@ -771,7 +773,7 @@ <a href="/comparison/a7c1559b7bba/foo?style=monoblue">comparison</a> | <a href="/log/a7c1559b7bba/foo?style=monoblue">revisions</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/file/xyzzy?style=monoblue">files</a></li> <li><a href="/archive/xyzzy.zip">zip</a></li> @@ -783,7 +785,7 @@ <a href="/file/43c799df6e75?style=monoblue">files</a> <a href="/shortlog/43c799df6e75?style=monoblue">(0)</a> <a href="/shortlog/tip?style=monoblue">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/file/xyzzy?style=monoblue">files</a></li> <li><a href="/archive/xyzzy.zip">zip</a></li> @@ -791,13 +793,13 @@ <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3> <a href="/log/43c799df6e75?style=monoblue">(0)</a> <a href="/log/tip?style=monoblue">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=monoblue' | egrep $REVLINKS <li><a href="/file/xyzzy?style=monoblue">files</a></li> <a href="/graph/xyzzy?revcount=30&style=monoblue">less</a> <a href="/graph/xyzzy?revcount=120&style=monoblue">more</a> | <a href="/graph/43c799df6e75?style=monoblue">(0)</a> <a href="/graph/tip?style=monoblue">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/rev/xyzzy?style=monoblue">changeset</a></li> <li><a href="/archive/xyzzy.zip">zip</a></li> @@ -810,7 +812,7 @@ <a href="/log/xyzzy/foo?style=monoblue">revisions</a> | <a href="/annotate/xyzzy/foo?style=monoblue">annotate</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/file/xyzzy/?style=monoblue">files</a></li> <li><a href="/file/tip/foo?style=monoblue">latest</a></li> @@ -823,7 +825,7 @@ <a href="/file/43c799df6e75/foo?style=monoblue"> <a href="/file/9d8c40cba617/foo?style=monoblue">9d8c40cba617</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/file/xyzzy?style=monoblue">files</a></li> <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li> @@ -841,7 +843,7 @@ <a href="/annotate/43c799df6e75/foo?style=monoblue">annotate</a> <a href="/log/43c799df6e75/foo?style=monoblue">(0)</a> <a href="/log/tip/foo?style=monoblue">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/file/xyzzy/?style=monoblue">files</a></li> <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li> @@ -863,7 +865,7 @@ <a href="/diff/a7c1559b7bba/foo?style=monoblue">diff</a> <a href="/rev/a7c1559b7bba?style=monoblue">changeset</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/file/xyzzy?style=monoblue">files</a></li> <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li> @@ -876,7 +878,7 @@ <dd><a href="/diff/43c799df6e75/foo?style=monoblue">43c799df6e75</a></dd> <dd><a href="/diff/9d8c40cba617/foo?style=monoblue">9d8c40cba617</a></dd> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=monoblue' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=monoblue' | egrep $REVLINKS <li><a href="/graph/xyzzy?style=monoblue">graph</a></li> <li><a href="/file/xyzzy?style=monoblue">files</a></li> <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li> @@ -891,7 +893,7 @@ (De)referencing symbolic revisions (spartan) - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=spartan' | egrep $REVLINKS <a href="/log/tip?style=spartan">changelog</a> <a href="/graph/tip?style=spartan">graph</a> <a href="/file/tip/?style=spartan">files</a> @@ -902,7 +904,7 @@ <td class="node"><a href="/rev/43c799df6e75?style=spartan">first</a></td> navigate: <small class="navigate"><a href="/shortlog/43c799df6e75?style=spartan">(0)</a> <a href="/shortlog/tip?style=spartan">tip</a> </small> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=spartan' | egrep $REVLINKS <a href="/shortlog/tip?style=spartan">shortlog</a> <a href="/graph/tip?style=spartan">graph</a> <a href="/file/tip?style=spartan">files</a> @@ -919,20 +921,20 @@ <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td> navigate: <small class="navigate"><a href="/log/43c799df6e75?style=spartan">(0)</a> <a href="/log/tip?style=spartan">tip</a> </small> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=spartan' | egrep $REVLINKS <a href="/log/tip?style=spartan">changelog</a> <a href="/shortlog/tip?style=spartan">shortlog</a> <a href="/file/tip/?style=spartan">files</a> navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small> navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=spartan' | egrep $REVLINKS <a href="/rev/9d8c40cba617?style=spartan">tip</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=spartan' | egrep $REVLINKS <a href="/shortlog/9d8c40cba617?style=spartan" class="open">default</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=spartan' | egrep $REVLINKS <a href="/log/tip?style=spartan">changelog</a> <a href="/shortlog/tip?style=spartan">shortlog</a> <a href="/graph/tip?style=spartan">graph</a> @@ -944,7 +946,7 @@ <a href="/file/tip/dir/?style=spartan"> <td><a href="/file/tip/foo?style=spartan">foo</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=spartan&rev=all()' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=spartan&rev=all()' | egrep $REVLINKS <a href="/archive/tip.zip">zip</a> <td class="node"><a href="/rev/9d8c40cba617?style=spartan">9d8c40cba617</a></td> <a href="/rev/a7c1559b7bba?style=spartan">a7c1559b7bba</a> @@ -960,7 +962,7 @@ <th class="files"><a href="/file/43c799df6e75?style=spartan">files</a>:</th> <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=spartan' | egrep $REVLINKS <a href="/log/xyzzy?style=spartan">changelog</a> <a href="/shortlog/xyzzy?style=spartan">shortlog</a> <a href="/graph/xyzzy?style=spartan">graph</a> @@ -972,7 +974,7 @@ <td class="child"><a href="/rev/9d8c40cba617?style=spartan">9d8c40cba617</a></td> <td class="files"><a href="/file/a7c1559b7bba/foo?style=spartan">foo</a> </td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=spartan' | egrep $REVLINKS <a href="/log/xyzzy?style=spartan">changelog</a> <a href="/graph/xyzzy?style=spartan">graph</a> <a href="/file/xyzzy/?style=spartan">files</a> @@ -982,7 +984,7 @@ <td class="node"><a href="/rev/43c799df6e75?style=spartan">first</a></td> navigate: <small class="navigate"><a href="/shortlog/43c799df6e75?style=spartan">(0)</a> <a href="/shortlog/tip?style=spartan">tip</a> </small> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=spartan' | egrep $REVLINKS <a href="/shortlog/xyzzy?style=spartan">shortlog</a> <a href="/graph/xyzzy?style=spartan">graph</a> <a href="/file/xyzzy?style=spartan">files</a> @@ -996,14 +998,14 @@ <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td> navigate: <small class="navigate"><a href="/log/43c799df6e75?style=spartan">(0)</a> <a href="/log/tip?style=spartan">tip</a> </small> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=spartan' | egrep $REVLINKS <a href="/log/xyzzy?style=spartan">changelog</a> <a href="/shortlog/xyzzy?style=spartan">shortlog</a> <a href="/file/xyzzy/?style=spartan">files</a> navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small> navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=spartan' | egrep $REVLINKS <a href="/log/xyzzy?style=spartan">changelog</a> <a href="/shortlog/xyzzy?style=spartan">shortlog</a> <a href="/graph/xyzzy?style=spartan">graph</a> @@ -1015,7 +1017,7 @@ <a href="/file/xyzzy/dir/?style=spartan"> <td><a href="/file/xyzzy/foo?style=spartan">foo</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=spartan' | egrep $REVLINKS <a href="/log/xyzzy?style=spartan">changelog</a> <a href="/shortlog/xyzzy?style=spartan">shortlog</a> <a href="/graph/xyzzy?style=spartan">graph</a> @@ -1028,7 +1030,7 @@ <a href="/file/43c799df6e75/foo?style=spartan"> <td><a href="/file/9d8c40cba617/foo?style=spartan">9d8c40cba617</a></td> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=spartan' | egrep $REVLINKS href="/atom-log/tip/foo" title="Atom feed for test:foo"> href="/rss-log/tip/foo" title="RSS feed for test:foo"> <a href="/file/xyzzy/foo?style=spartan">file</a> @@ -1045,7 +1047,7 @@ <a href="/diff/43c799df6e75/foo?style=spartan">(diff)</a> <a href="/annotate/43c799df6e75/foo?style=spartan">(annotate)</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=spartan' | egrep $REVLINKS <a href="/log/xyzzy?style=spartan">changelog</a> <a href="/shortlog/xyzzy?style=spartan">shortlog</a> <a href="/graph/xyzzy?style=spartan">graph</a> @@ -1067,7 +1069,7 @@ <a href="/diff/a7c1559b7bba/foo?style=spartan">diff</a> <a href="/rev/a7c1559b7bba?style=spartan">changeset</a> - $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=spartan' | egrep $REVLINKS + $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=spartan' | egrep $REVLINKS <a href="/log/xyzzy?style=spartan">changelog</a> <a href="/shortlog/xyzzy?style=spartan">shortlog</a> <a href="/graph/xyzzy?style=spartan">graph</a>
--- a/tests/test-hgwebdir.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hgwebdir.t Sat Mar 11 13:53:14 2017 -0500 @@ -1421,7 +1421,7 @@ > EOF $ hg serve -d --pid-file=hg.pid --web-conf paths.conf \ > -A access-paths.log -E error-paths-9.log - listening at http://*:$HGPORT1/ (bound to 127.0.0.1:$HGPORT1) (glob) + listening at http://*:$HGPORT1/ (bound to *$LOCALIP*:$HGPORT1) (glob) $ cat hg.pid >> $DAEMON_PIDS $ get-with-headers.py localhost:$HGPORT1 '?style=raw' 200 Script output follows @@ -1433,7 +1433,7 @@ $ killdaemons.py $ hg serve -p $HGPORT2 -d -v --pid-file=hg.pid --web-conf paths.conf \ > -A access-paths.log -E error-paths-10.log - listening at http://*:$HGPORT2/ (bound to 127.0.0.1:$HGPORT2) (glob) + listening at http://*:$HGPORT2/ (bound to *$LOCALIP*:$HGPORT2) (glob) $ cat hg.pid >> $DAEMON_PIDS $ get-with-headers.py localhost:$HGPORT2 '?style=raw' 200 Script output follows
--- a/tests/test-histedit-arguments.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-histedit-arguments.t Sat Mar 11 13:53:14 2017 -0500 @@ -72,7 +72,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # Run on a revision not ancestors of the current working directory. @@ -308,7 +308,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # Test --continue with --keep @@ -544,7 +544,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # $ cd ..
--- a/tests/test-histedit-bookmark-motion.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-histedit-bookmark-motion.t Sat Mar 11 13:53:14 2017 -0500 @@ -78,7 +78,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # $ hg histedit 1 --commands - --verbose << EOF | grep histedit > pick 177f92b77385 2 c @@ -141,7 +141,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # $ hg histedit 1 --commands - --verbose << EOF | grep histedit > pick b346ab9a313d 1 c
--- a/tests/test-histedit-commute.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-histedit-commute.t Sat Mar 11 13:53:14 2017 -0500 @@ -72,7 +72,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # edit the history @@ -350,7 +350,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # should also work if a commit message is missing
--- a/tests/test-histedit-edit.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-histedit-edit.t Sat Mar 11 13:53:14 2017 -0500 @@ -478,5 +478,5 @@ # p, fold = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date #
--- a/tests/test-histedit-fold-non-commute.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-histedit-fold-non-commute.t Sat Mar 11 13:53:14 2017 -0500 @@ -5,6 +5,12 @@ > histedit= > EOF + $ modwithdate () + > { + > echo $1 > $1 + > hg ci -m $1 -d "$2 0" + > } + $ initrepo () > { > hg init $1 @@ -14,12 +20,14 @@ > hg add $x > done > hg ci -m 'Initial commit' - > for x in a b c d e f ; do - > echo $x > $x - > hg ci -m $x - > done + > modwithdate a 1 + > modwithdate b 2 + > modwithdate c 3 + > modwithdate d 4 + > modwithdate e 5 + > modwithdate f 6 > echo 'I can haz no commute' > e - > hg ci -m 'does not commute with e' + > hg ci -m 'does not commute with e' -d '7 0' > cd .. > } @@ -34,48 +42,48 @@ $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 5 >> $EDITED $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 6 >> $EDITED $ cat $EDITED - pick 65a9a84f33fd 3 c - pick 00f1c5383965 4 d - fold 39522b764e3d 7 does not commute with e - pick 7b4e2f4b7bcd 5 e - pick 500cac37a696 6 f + pick 092e4ce14829 3 c + pick ae78f4c9d74f 4 d + fold 42abbb61bede 7 does not commute with e + pick 7f3755409b00 5 e + pick dd184f2faeb0 6 f log before edit $ hg log --graph - @ changeset: 7:39522b764e3d + @ changeset: 7:42abbb61bede | tag: tip | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:07 1970 +0000 | summary: does not commute with e | - o changeset: 6:500cac37a696 + o changeset: 6:dd184f2faeb0 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:06 1970 +0000 | summary: f | - o changeset: 5:7b4e2f4b7bcd + o changeset: 5:7f3755409b00 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:05 1970 +0000 | summary: e | - o changeset: 4:00f1c5383965 + o changeset: 4:ae78f4c9d74f | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:04 1970 +0000 | summary: d | - o changeset: 3:65a9a84f33fd + o changeset: 3:092e4ce14829 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:03 1970 +0000 | summary: c | - o changeset: 2:da6535b52e45 + o changeset: 2:40ccdd8beb95 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:02 1970 +0000 | summary: b | - o changeset: 1:c1f09da44841 + o changeset: 1:cd997a145b29 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:01 1970 +0000 | summary: a | o changeset: 0:1715188a53c7 @@ -89,7 +97,7 @@ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved merging e warning: conflicts while merging e! (edit, then use 'hg resolve --mark') - Fix up the change (fold 39522b764e3d) + Fix up the change (fold 42abbb61bede) (hg histedit --continue to resume) fix up @@ -113,7 +121,7 @@ HG: changed e merging e warning: conflicts while merging e! (edit, then use 'hg resolve --mark') - Fix up the change (pick 7b4e2f4b7bcd) + Fix up the change (pick 7f3755409b00) (hg histedit --continue to resume) just continue this time @@ -124,34 +132,34 @@ continue: hg histedit --continue $ hg diff $ hg histedit --continue 2>&1 | fixbundle - 7b4e2f4b7bcd: skipping changeset (no changes) + 7f3755409b00: skipping changeset (no changes) log after edit $ hg log --graph - @ changeset: 5:d9cf42e54966 + @ changeset: 5:1300355b1a54 | tag: tip | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:06 1970 +0000 | summary: f | - o changeset: 4:10486af2e984 + o changeset: 4:e2ac33269083 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:07 1970 +0000 | summary: d | - o changeset: 3:65a9a84f33fd + o changeset: 3:092e4ce14829 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:03 1970 +0000 | summary: c | - o changeset: 2:da6535b52e45 + o changeset: 2:40ccdd8beb95 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:02 1970 +0000 | summary: b | - o changeset: 1:c1f09da44841 + o changeset: 1:cd997a145b29 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:01 1970 +0000 | summary: a | o changeset: 0:1715188a53c7 @@ -175,7 +183,7 @@ $ cd .. -Repeat test using "roll", not "fold". "roll" folds in changes but drops message +Repeat test using "roll", not "fold". "roll" folds in changes but drops message and date $ initrepo r2 $ cd r2 @@ -189,48 +197,48 @@ $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 5 >> $EDITED $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 6 >> $EDITED $ cat $EDITED - pick 65a9a84f33fd 3 c - pick 00f1c5383965 4 d - roll 39522b764e3d 7 does not commute with e - pick 7b4e2f4b7bcd 5 e - pick 500cac37a696 6 f + pick 092e4ce14829 3 c + pick ae78f4c9d74f 4 d + roll 42abbb61bede 7 does not commute with e + pick 7f3755409b00 5 e + pick dd184f2faeb0 6 f log before edit $ hg log --graph - @ changeset: 7:39522b764e3d + @ changeset: 7:42abbb61bede | tag: tip | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:07 1970 +0000 | summary: does not commute with e | - o changeset: 6:500cac37a696 + o changeset: 6:dd184f2faeb0 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:06 1970 +0000 | summary: f | - o changeset: 5:7b4e2f4b7bcd + o changeset: 5:7f3755409b00 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:05 1970 +0000 | summary: e | - o changeset: 4:00f1c5383965 + o changeset: 4:ae78f4c9d74f | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:04 1970 +0000 | summary: d | - o changeset: 3:65a9a84f33fd + o changeset: 3:092e4ce14829 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:03 1970 +0000 | summary: c | - o changeset: 2:da6535b52e45 + o changeset: 2:40ccdd8beb95 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:02 1970 +0000 | summary: b | - o changeset: 1:c1f09da44841 + o changeset: 1:cd997a145b29 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:01 1970 +0000 | summary: a | o changeset: 0:1715188a53c7 @@ -244,7 +252,7 @@ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved merging e warning: conflicts while merging e! (edit, then use 'hg resolve --mark') - Fix up the change (roll 39522b764e3d) + Fix up the change (roll 42abbb61bede) (hg histedit --continue to resume) fix up @@ -255,7 +263,7 @@ $ hg histedit --continue 2>&1 | fixbundle | grep -v '2 files removed' merging e warning: conflicts while merging e! (edit, then use 'hg resolve --mark') - Fix up the change (pick 7b4e2f4b7bcd) + Fix up the change (pick 7f3755409b00) (hg histedit --continue to resume) just continue this time @@ -264,34 +272,34 @@ (no more unresolved files) continue: hg histedit --continue $ hg histedit --continue 2>&1 | fixbundle - 7b4e2f4b7bcd: skipping changeset (no changes) + 7f3755409b00: skipping changeset (no changes) log after edit $ hg log --graph - @ changeset: 5:e7c4f5d4eb75 + @ changeset: 5:b538bcb461be | tag: tip | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:06 1970 +0000 | summary: f | - o changeset: 4:803d1bb561fc + o changeset: 4:317e37cb6d66 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:04 1970 +0000 | summary: d | - o changeset: 3:65a9a84f33fd + o changeset: 3:092e4ce14829 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:03 1970 +0000 | summary: c | - o changeset: 2:da6535b52e45 + o changeset: 2:40ccdd8beb95 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:02 1970 +0000 | summary: b | - o changeset: 1:c1f09da44841 + o changeset: 1:cd997a145b29 | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 + | date: Thu Jan 01 00:00:01 1970 +0000 | summary: a | o changeset: 0:1715188a53c7 @@ -316,16 +324,16 @@ description is taken from rollup target commit $ hg log --debug --rev 4 - changeset: 4:803d1bb561fceac3129ec778db9da249a3106fc3 + changeset: 4:317e37cb6d66c1c84628c00e5bf4c8c292831951 phase: draft - parent: 3:65a9a84f33fdeb1ad5679b3941ec885d2b24027b + parent: 3:092e4ce14829f4974399ce4316d59f64ef0b6725 parent: -1:0000000000000000000000000000000000000000 manifest: 4:b068a323d969f22af1296ec6a5ea9384cef437ac user: test - date: Thu Jan 01 00:00:00 1970 +0000 + date: Thu Jan 01 00:00:04 1970 +0000 files: d e extra: branch=default - extra: histedit_source=00f1c53839651fa5c76d423606811ea5455a79d0,39522b764e3d26103f08bd1fa2ccd3e3d7dbcf4e + extra: histedit_source=ae78f4c9d74ffa4b6cb5045001c303fe9204e890,42abbb61bede6f4366fa1e74a664343e5d558a70 description: d
--- a/tests/test-histedit-fold.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-histedit-fold.t Sat Mar 11 13:53:14 2017 -0500 @@ -20,52 +20,60 @@ Simple folding -------------------- + $ addwithdate () + > { + > echo $1 > $1 + > hg add $1 + > hg ci -m $1 -d "$2 0" + > } + $ initrepo () > { > hg init r > cd r - > for x in a b c d e f ; do - > echo $x > $x - > hg add $x - > hg ci -m $x - > done + > addwithdate a 1 + > addwithdate b 2 + > addwithdate c 3 + > addwithdate d 4 + > addwithdate e 5 + > addwithdate f 6 > } $ initrepo log before edit $ hg logt --graph - @ 5:652413bf663e f + @ 5:178e35e0ce73 f | - o 4:e860deea161a e + o 4:1ddb6c90f2ee e | - o 3:055a42cdd887 d + o 3:532247a8969b d | - o 2:177f92b77385 c + o 2:ff2c9fa2018b c | - o 1:d2ae7f538514 b + o 1:97d72e5f12c7 b | - o 0:cb9a9f314b8b a + o 0:8580ff50825a a - $ hg histedit 177f92b77385 --commands - 2>&1 <<EOF | fixbundle - > pick e860deea161a e - > pick 652413bf663e f - > fold 177f92b77385 c - > pick 055a42cdd887 d + $ hg histedit ff2c9fa2018b --commands - 2>&1 <<EOF | fixbundle + > pick 1ddb6c90f2ee e + > pick 178e35e0ce73 f + > fold ff2c9fa2018b c + > pick 532247a8969b d > EOF log after edit $ hg logt --graph - @ 4:9c277da72c9b d + @ 4:c4d7f3def76d d | - o 3:6de59d13424a f + o 3:575228819b7e f | - o 2:ee283cb5f2d5 e + o 2:505a591af19e e | - o 1:d2ae7f538514 b + o 1:97d72e5f12c7 b | - o 0:cb9a9f314b8b a + o 0:8580ff50825a a post-fold manifest @@ -78,19 +86,19 @@ f -check histedit_source +check histedit_source, including that it uses the later date, from the first changeset $ hg log --debug --rev 3 - changeset: 3:6de59d13424a8a13acd3e975514aed29dd0d9b2d + changeset: 3:575228819b7e6ed69e8c0a6a383ee59a80db7358 phase: draft - parent: 2:ee283cb5f2d5955443f23a27b697a04339e9a39a + parent: 2:505a591af19eed18f560af827b9e03d2076773dc parent: -1:0000000000000000000000000000000000000000 manifest: 3:81eede616954057198ead0b2c73b41d1f392829a user: test - date: Thu Jan 01 00:00:00 1970 +0000 + date: Thu Jan 01 00:00:06 1970 +0000 files+: c f extra: branch=default - extra: histedit_source=a4f7421b80f79fcc59fff01bcbf4a53d127dd6d3,177f92b773850b59254aa5e923436f921b55483b + extra: histedit_source=7cad1d7030207872dfd1c3a7cb430f24f2884086,ff2c9fa2018b15fa74b33363bda9527323e2a99f description: f *** @@ -98,43 +106,43 @@ -rollup will fold without preserving the folded commit's message +rollup will fold without preserving the folded commit's message or date $ OLDHGEDITOR=$HGEDITOR $ HGEDITOR=false - $ hg histedit d2ae7f538514 --commands - 2>&1 <<EOF | fixbundle - > pick d2ae7f538514 b - > roll ee283cb5f2d5 e - > pick 6de59d13424a f - > pick 9c277da72c9b d + $ hg histedit 97d72e5f12c7 --commands - 2>&1 <<EOF | fixbundle + > pick 97d72e5f12c7 b + > roll 505a591af19e e + > pick 575228819b7e f + > pick c4d7f3def76d d > EOF $ HGEDITOR=$OLDHGEDITOR log after edit $ hg logt --graph - @ 3:c4a9eb7989fc d + @ 3:bab801520cec d | - o 2:8e03a72b6f83 f + o 2:58c8f2bfc151 f | - o 1:391ee782c689 b + o 1:5d939c56c72e b | - o 0:cb9a9f314b8b a + o 0:8580ff50825a a description is taken from rollup target commit $ hg log --debug --rev 1 - changeset: 1:391ee782c68930be438ccf4c6a403daedbfbffa5 + changeset: 1:5d939c56c72e77e29f5167696218e2131a40f5cf phase: draft - parent: 0:cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b + parent: 0:8580ff50825a50c8f716709acdf8de0deddcd6ab parent: -1:0000000000000000000000000000000000000000 manifest: 1:b5e112a3a8354e269b1524729f0918662d847c38 user: test - date: Thu Jan 01 00:00:00 1970 +0000 + date: Thu Jan 01 00:00:02 1970 +0000 files+: b e extra: branch=default - extra: histedit_source=d2ae7f538514cd87c17547b0de4cea71fe1af9fb,ee283cb5f2d5955443f23a27b697a04339e9a39a + extra: histedit_source=97d72e5f12c7e84f85064aa72e5a297142c36ed9,505a591af19eed18f560af827b9e03d2076773dc description: b @@ -163,13 +171,13 @@ > EOF $ rm -f .hg/last-message.txt - $ hg status --rev '8e03a72b6f83^1::c4a9eb7989fc' + $ hg status --rev '58c8f2bfc151^1::bab801520cec' A c A d A f - $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit 8e03a72b6f83 --commands - 2>&1 <<EOF - > pick 8e03a72b6f83 f - > fold c4a9eb7989fc d + $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit 58c8f2bfc151 --commands - 2>&1 <<EOF + > pick 58c8f2bfc151 f + > fold bab801520cec d > EOF allow non-folding commit ==== before editing @@ -209,37 +217,37 @@ $ cd .. $ rm -r r -folding preserves initial author --------------------------------- +folding preserves initial author but uses later date +---------------------------------------------------- $ initrepo - $ hg ci --user "someone else" --amend --quiet + $ hg ci -d '7 0' --user "someone else" --amend --quiet tip before edit $ hg log --rev . - changeset: 5:a00ad806cb55 + changeset: 5:10c36dd37515 tag: tip user: someone else - date: Thu Jan 01 00:00:00 1970 +0000 + date: Thu Jan 01 00:00:07 1970 +0000 summary: f $ hg --config progress.debug=1 --debug \ - > histedit e860deea161a --commands - 2>&1 <<EOF | \ + > histedit 1ddb6c90f2ee --commands - 2>&1 <<EOF | \ > egrep 'editing|unresolved' - > pick e860deea161a e - > fold a00ad806cb55 f + > pick 1ddb6c90f2ee e + > fold 10c36dd37515 f > EOF - editing: pick e860deea161a 4 e 1/2 changes (50.00%) - editing: fold a00ad806cb55 5 f 2/2 changes (100.00%) + editing: pick 1ddb6c90f2ee 4 e 1/2 changes (50.00%) + editing: fold 10c36dd37515 5 f 2/2 changes (100.00%) -tip after edit +tip after edit, which should use the later date, from the second changeset $ hg log --rev . - changeset: 4:698d4e8040a1 + changeset: 4:e4f3ec5d0b40 tag: tip user: test - date: Thu Jan 01 00:00:00 1970 +0000 + date: Thu Jan 01 00:00:07 1970 +0000 summary: e
--- a/tests/test-histedit-obsolete.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-histedit-obsolete.t Sat Mar 11 13:53:14 2017 -0500 @@ -136,7 +136,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # $ hg histedit 1 --commands - --verbose <<EOF | grep histedit > pick 177f92b77385 2 c
--- a/tests/test-histedit-outgoing.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-histedit-outgoing.t Sat Mar 11 13:53:14 2017 -0500 @@ -54,7 +54,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # $ cd .. @@ -88,7 +88,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # $ cd .. @@ -114,7 +114,7 @@ # p, pick = use commit # d, drop = remove commit from history # f, fold = use commit, but combine it with the one above - # r, roll = like fold, but discard this commit's description + # r, roll = like fold, but discard this commit's description and date # test to check number of roots in outgoing revisions
--- a/tests/test-hook.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-hook.t Sat Mar 11 13:53:14 2017 -0500 @@ -832,6 +832,50 @@ [1] $ cd .. +check whether HG_PENDING makes pending changes only in related +repositories visible to an external hook. + +(emulate a transaction running concurrently by copied +.hg/store/00changelog.i.a in subsequent test) + + $ cat > $TESTTMP/savepending.sh <<EOF + > cp .hg/store/00changelog.i.a .hg/store/00changelog.i.a.saved + > exit 1 # to avoid adding new revision for subsequent tests + > EOF + $ cd a + $ hg tip -q + 4:539e4b31b6dc + $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible" + transaction abort! + rollback completed + abort: pretxnclose hook exited with status 1 + [255] + $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a + +(check (in)visibility of new changeset while transaction running in +repo) + + $ cat > $TESTTMP/checkpending.sh <<EOF + > echo '@a' + > hg -R $TESTTMP/a tip -q + > echo '@a/nested' + > hg -R $TESTTMP/a/nested tip -q + > exit 1 # to avoid adding new revision for subsequent tests + > EOF + $ hg init nested + $ cd nested + $ echo a > a + $ hg add a + $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0' + @a + 4:539e4b31b6dc + @a/nested + 0:bf5e395ced2c + transaction abort! + rollback completed + abort: pretxnclose hook exited with status 1 + [255] + Hook from untrusted hgrc are reported as failure ================================================
--- a/tests/test-http-bundle1.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-http-bundle1.t Sat Mar 11 13:53:14 2017 -0500 @@ -28,11 +28,11 @@ #if windows $ hg serve -p $HGPORT1 2>&1 - abort: cannot start server at ':$HGPORT1': * (glob) + abort: cannot start server at 'localhost:$HGPORT1': * (glob) [255] #else $ hg serve -p $HGPORT1 2>&1 - abort: cannot start server at ':$HGPORT1': Address already in use + abort: cannot start server at 'localhost:$HGPORT1': Address already in use [255] #endif $ cd ..
--- a/tests/test-http-protocol.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-http-protocol.t Sat Mar 11 13:53:14 2017 -0500 @@ -16,9 +16,9 @@ compression formats are advertised in compression capability #if zstd - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null #else - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null #endif $ killdaemons.py @@ -27,7 +27,7 @@ $ hg --config server.compressionengines=none -R server serve -p $HGPORT -d --pid-file hg.pid $ cat hg.pid > $DAEMON_PIDS - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null $ killdaemons.py @@ -35,7 +35,7 @@ $ hg --config server.compressionengines=none,zlib -R server serve -p $HGPORT -d --pid-file hg.pid $ cat hg.pid > $DAEMON_PIDS - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null $ killdaemons.py @@ -46,7 +46,7 @@ Server should send application/mercurial-0.1 to clients if no Accept is used - $ get-with-headers.py --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - + $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - 200 Script output follows content-type: application/mercurial-0.1 date: * (glob) @@ -55,7 +55,7 @@ Server should send application/mercurial-0.1 when client says it wants it - $ get-with-headers.py --hgproto '0.1' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - + $ get-with-headers.py --hgproto '0.1' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - 200 Script output follows content-type: application/mercurial-0.1 date: * (glob) @@ -64,14 +64,14 @@ Server should send application/mercurial-0.2 when client says it wants it - $ get-with-headers.py --hgproto '0.2' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - + $ get-with-headers.py --hgproto '0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - 200 Script output follows content-type: application/mercurial-0.2 date: * (glob) server: * (glob) transfer-encoding: chunked - $ get-with-headers.py --hgproto '0.1 0.2' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - + $ get-with-headers.py --hgproto '0.1 0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - 200 Script output follows content-type: application/mercurial-0.2 date: * (glob) @@ -80,7 +80,7 @@ Requesting a compression format that server doesn't support results will fall back to 0.1 - $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - + $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' - 200 Script output follows content-type: application/mercurial-0.1 date: * (glob) @@ -90,7 +90,7 @@ #if zstd zstd is used if available - $ get-with-headers.py --hgproto '0.2 comp=zstd' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp + $ get-with-headers.py --hgproto '0.2 comp=zstd' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp $ f --size --hexdump --bytes 36 --sha1 resp resp: size=248, sha1=4d8d8f87fb82bd542ce52881fdc94f850748 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu| @@ -101,7 +101,7 @@ application/mercurial-0.2 is not yet used on non-streaming responses - $ get-with-headers.py --hgproto '0.2' 127.0.0.1:$HGPORT '?cmd=heads' - + $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=heads' - 200 Script output follows content-length: 41 content-type: application/mercurial-0.1 @@ -118,11 +118,11 @@ No Accept will send 0.1+zlib, even though "none" is preferred b/c "none" isn't supported on 0.1 - $ get-with-headers.py --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type + $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type 200 Script output follows content-type: application/mercurial-0.1 - $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp $ f --size --hexdump --bytes 28 --sha1 resp resp: size=227, sha1=35a4c074da74f32f5440da3cbf04 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu| @@ -130,7 +130,7 @@ Explicit 0.1 will send zlib because "none" isn't supported on 0.1 - $ get-with-headers.py --hgproto '0.1' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp + $ get-with-headers.py --hgproto '0.1' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp $ f --size --hexdump --bytes 28 --sha1 resp resp: size=227, sha1=35a4c074da74f32f5440da3cbf04 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu| @@ -139,7 +139,7 @@ 0.2 with no compression will get "none" because that is server's preference (spec says ZL and UN are implicitly supported) - $ get-with-headers.py --hgproto '0.2' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp + $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp $ f --size --hexdump --bytes 32 --sha1 resp resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu| @@ -147,7 +147,7 @@ Client receives server preference even if local order doesn't match - $ get-with-headers.py --hgproto '0.2 comp=zlib,none' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp + $ get-with-headers.py --hgproto '0.2 comp=zlib,none' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp $ f --size --hexdump --bytes 32 --sha1 resp resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu| @@ -155,7 +155,7 @@ Client receives only supported format even if not server preferred format - $ get-with-headers.py --hgproto '0.2 comp=zlib' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp + $ get-with-headers.py --hgproto '0.2 comp=zlib' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp $ f --size --hexdump --bytes 33 --sha1 resp resp: size=232, sha1=a1c727f0c9693ca15742a75c30419bc36 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
--- a/tests/test-http.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-http.t Sat Mar 11 13:53:14 2017 -0500 @@ -23,7 +23,7 @@ [255] #else $ hg serve -p $HGPORT1 2>&1 - abort: cannot start server at ':$HGPORT1': Address already in use + abort: cannot start server at 'localhost:$HGPORT1': Address already in use [255] #endif $ cd ..
--- a/tests/test-https.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-https.t Sat Mar 11 13:53:14 2017 -0500 @@ -36,11 +36,11 @@ #if windows $ hg serve -p $HGPORT --certificate=$PRIV 2>&1 - abort: cannot start server at ':$HGPORT': + abort: cannot start server at 'localhost:$HGPORT': [255] #else $ hg serve -p $HGPORT --certificate=$PRIV 2>&1 - abort: cannot start server at ':$HGPORT': Address already in use + abort: cannot start server at 'localhost:$HGPORT': Address already in use [255] #endif $ cd .. @@ -278,17 +278,17 @@ cacert mismatch $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \ - > https://127.0.0.1:$HGPORT/ - pulling from https://127.0.0.1:$HGPORT/ (glob) - warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) - abort: 127.0.0.1 certificate error: certificate is for localhost (glob) - (set hostsecurity.127.0.0.1:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely) (glob) + > https://$LOCALIP:$HGPORT/ + pulling from https://*:$HGPORT/ (glob) + warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + abort: $LOCALIP certificate error: certificate is for localhost + (set hostsecurity.$LOCALIP:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely) [255] $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \ - > https://127.0.0.1:$HGPORT/ --insecure - pulling from https://127.0.0.1:$HGPORT/ (glob) - warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) - warning: connection security to 127.0.0.1 is disabled per current settings; communication is susceptible to eavesdropping and tampering (glob) + > https://$LOCALIP:$HGPORT/ --insecure + pulling from https://*:$HGPORT/ (glob) + warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + warning: connection security to $LOCALIP is disabled per current settings; communication is susceptible to eavesdropping and tampering searching for changes no changes found $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-other.pem" @@ -382,6 +382,7 @@ - works without cacerts (hostfingerprints) $ hg -R copy-pull id https://localhost:$HGPORT/ --insecure --config hostfingerprints.localhost=ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03 warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + (SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: localhost.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e) 5fed3813f7f5 - works without cacerts (hostsecurity) @@ -396,6 +397,7 @@ - multiple fingerprints specified and first matches $ hg --config 'hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03, deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ --insecure warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + (SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: localhost.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e) 5fed3813f7f5 $ hg --config 'hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03, sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ @@ -405,6 +407,7 @@ - multiple fingerprints specified and last matches $ hg --config 'hostfingerprints.localhost=deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03' -R copy-pull id https://localhost:$HGPORT/ --insecure warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + (SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: localhost.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e) 5fed3813f7f5 $ hg --config 'hostsecurity.localhost:fingerprints=sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03' -R copy-pull id https://localhost:$HGPORT/ @@ -434,8 +437,9 @@ - ignores that certificate doesn't match hostname - $ hg -R copy-pull id https://127.0.0.1:$HGPORT/ --config hostfingerprints.127.0.0.1=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 - warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + $ hg -R copy-pull id https://$LOCALIP:$HGPORT/ --config hostfingerprints.$LOCALIP=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 + warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + (SHA-1 fingerprint for $LOCALIP found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: $LOCALIP.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e) 5fed3813f7f5 Ports used by next test. Kill servers. @@ -571,9 +575,10 @@ warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) searching for changes no changes found - $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://127.0.0.1:$HGPORT/ --config hostfingerprints.127.0.0.1=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 - pulling from https://127.0.0.1:$HGPORT/ (glob) - warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://localhost:$HGPORT/ --config hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 --trace + pulling from https://*:$HGPORT/ (glob) + warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?) + (SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: localhost.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e) searching for changes no changes found
--- a/tests/test-i18n.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-i18n.t Sat Mar 11 13:53:14 2017 -0500 @@ -29,14 +29,15 @@ Test keyword search in translated help text: - $ HGENCODING=UTF-8 LANGUAGE=de hg help -k blättern + $ HGENCODING=UTF-8 LANGUAGE=de hg help -k Aktualisiert Themen: - extensions Benutzung erweiterter Funktionen + subrepos Unterarchive - Erweiterungen: + Befehle: - pager Verwendet einen externen Pager zum Bl\xc3\xa4ttern in der Ausgabe von Befehlen (esc) + pull Ruft \xc3\x84nderungen von der angegebenen Quelle ab (esc) + update Aktualisiert das Arbeitsverzeichnis (oder wechselt die Version) #endif
--- a/tests/test-largefiles-small-disk.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-largefiles-small-disk.t Sat Mar 11 13:53:14 2017 -0500 @@ -5,7 +5,11 @@ > from mercurial import util > # > # this makes the original largefiles code abort: + > _origcopyfileobj = shutil.copyfileobj > def copyfileobj(fsrc, fdst, length=16*1024): + > # allow journal files (used by transaction) to be written + > if 'journal.' in fdst.name: + > return _origcopyfileobj(fsrc, fdst, length) > fdst.write(fsrc.read(4)) > raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC)) > shutil.copyfileobj = copyfileobj
--- a/tests/test-largefiles-wireproto.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-largefiles-wireproto.t Sat Mar 11 13:53:14 2017 -0500 @@ -347,7 +347,7 @@ searching 2 changesets for largefiles verified existence of 2 revisions of 2 largefiles $ tail -1 access.log - 127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob) + $LOCALIP - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob) $ hg -R batchverifyclone update getting changed largefiles 2 largefiles updated, 0 removed @@ -384,7 +384,7 @@ searching 3 changesets for largefiles verified existence of 3 revisions of 3 largefiles $ tail -1 access.log - 127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob) + $LOCALIP - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob) $ killdaemons.py
--- a/tests/test-largefiles.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-largefiles.t Sat Mar 11 13:53:14 2017 -0500 @@ -192,7 +192,7 @@ $ hg serve -d -p $HGPORT --pid-file ../hg.pid $ cat ../hg.pid >> $DAEMON_PIDS - $ get-with-headers.py 127.0.0.1:$HGPORT 'file/tip/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'file/tip/?style=raw' 200 Script output follows @@ -201,7 +201,7 @@ -rw-r--r-- 9 normal3 - $ get-with-headers.py 127.0.0.1:$HGPORT 'file/tip/sub/?style=raw' + $ get-with-headers.py $LOCALIP:$HGPORT 'file/tip/sub/?style=raw' 200 Script output follows
--- a/tests/test-lock.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-lock.py Sat Mar 11 13:53:14 2017 -0500 @@ -10,7 +10,7 @@ from mercurial import ( error, lock, - scmutil, + vfs as vfsmod, ) testlockname = 'testlock' @@ -36,7 +36,7 @@ self._acquirecalled = False self._releasecalled = False self._postreleasecalled = False - self.vfs = scmutil.vfs(dir, audit=False) + self.vfs = vfsmod.vfs(dir, audit=False) self._pidoffset = pidoffset def makelock(self, *args, **kwargs):
--- a/tests/test-logtoprocess.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-logtoprocess.t Sat Mar 11 13:53:14 2017 -0500 @@ -1,3 +1,7 @@ +ATTENTION: logtoprocess runs commands asynchronously. Be sure to append "| cat" +to hg commands, to wait for the output, if you want to test its output. +Otherwise the test will be flaky. + Test if logtoprocess correctly captures command-related log calls. $ hg init @@ -10,6 +14,7 @@ > def foo(ui, repo): > ui.log('foo', 'a message: %(bar)s\n', bar='spam') > EOF + $ cp $HGRCPATH $HGRCPATH.bak $ cat >> $HGRCPATH << EOF > [extensions] > logtoprocess= @@ -33,9 +38,8 @@ Running a command triggers both a ui.log('command') and a ui.log('commandfinish') call. The foo command also uses ui.log. -Use head to ensure we wait for all lines to be produced, and sort to avoid -ordering issues between the various processes we spawn: - $ hg foo | head -n 17 | sort +Use sort to avoid ordering issues between the various processes we spawn: + $ hg foo | cat | sort @@ -52,3 +56,18 @@ logtoprocess commandfinish output: logtoprocess foo output: spam + +Confirm that logging blocked time catches stdio properly: + $ cp $HGRCPATH.bak $HGRCPATH + $ cat >> $HGRCPATH << EOF + > [extensions] + > logtoprocess= + > pager= + > [logtoprocess] + > uiblocked=echo "\$EVENT stdio \$OPT_STDIO_BLOCKED ms command \$OPT_COMMAND_DURATION ms" + > [ui] + > logblockedtimes=True + > EOF + + $ hg log | cat + uiblocked stdio [0-9]+.[0-9]* ms command [0-9]+.[0-9]* ms (re)
--- a/tests/test-mac-packages.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-mac-packages.t Sat Mar 11 13:53:14 2017 -0500 @@ -25,6 +25,10 @@ ./Library/Python/2.7/site-packages/mercurial/pure/bdiff.py 100644 0/0 ./Library/Python/2.7/site-packages/mercurial/pure/bdiff.pyc 100644 0/0 ./Library/Python/2.7/site-packages/mercurial/pure/bdiff.pyo 100644 0/0 + $ grep zsh/site-functions/hg boms.txt | cut -d ' ' -f 1,2,3 + ./usr/local/share/zsh/site-functions/hg 100640 0/0 + $ grep hg-completion.bash boms.txt | cut -d ' ' -f 1,2,3 + ./usr/local/hg/contrib/hg-completion.bash 100640 0/0 $ egrep 'man[15]' boms.txt | cut -d ' ' -f 1,2,3 ./usr/local/share/man/man1 40755 0/0 ./usr/local/share/man/man1/hg.1 100644 0/0 @@ -40,7 +44,7 @@ ./Library/Python/2.7/site-packages/mercurial/localrepo.py 100644 0/0 ./Library/Python/2.7/site-packages/mercurial/localrepo.pyc 100644 0/0 ./Library/Python/2.7/site-packages/mercurial/localrepo.pyo 100644 0/0 - $ grep '/hg ' boms.txt | cut -d ' ' -f 1,2,3 + $ grep 'bin/hg ' boms.txt | cut -d ' ' -f 1,2,3 ./usr/local/bin/hg 100755 0/0 Make sure the built binary uses the system Python interpreter
--- a/tests/test-manifest.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-manifest.py Sat Mar 11 13:53:14 2017 -0500 @@ -320,7 +320,7 @@ 'bar/baz/qux.py': None, 'foo': (MISSING, (BIN_HASH_1, '')), } - self.assertEqual(want, pruned.diff(short, True)) + self.assertEqual(want, pruned.diff(short, clean=True)) def testReversedLines(self): backwards = ''.join(
--- a/tests/test-minirst.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-minirst.py Sat Mar 11 13:53:14 2017 -0500 @@ -118,6 +118,13 @@ | This is the first line. The line continues here. | This is the second line. + +Bullet lists are also detected: + +* This is the first bullet +* This is the second bullet + It has 2 lines +* This is the third bullet """ debugformats('lists', lists)
--- a/tests/test-minirst.py.out Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-minirst.py.out Sat Mar 11 13:53:14 2017 -0500 @@ -187,6 +187,12 @@ This is the first line. The line continues here. This is the second line. + +Bullet lists are also detected: + +* This is the first bullet +* This is the second bullet It has 2 lines +* This is the third bullet ---------------------------------------------------------------------- 30 column format: @@ -231,6 +237,14 @@ This is the first line. The line continues here. This is the second line. + +Bullet lists are also +detected: + +* This is the first bullet +* This is the second bullet It + has 2 lines +* This is the third bullet ---------------------------------------------------------------------- html format: @@ -276,6 +290,14 @@ <li> This is the first line. The line continues here. <li> This is the second line. </ol> +<p> +Bullet lists are also detected: +</p> +<ul> + <li> This is the first bullet + <li> This is the second bullet It has 2 lines + <li> This is the third bullet +</ul> ---------------------------------------------------------------------- == options ==
--- a/tests/test-mq.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-mq.t Sat Mar 11 13:53:14 2017 -0500 @@ -25,7 +25,7 @@ Known patches are represented as patch files in the .hg/patches directory. Applied patches are both patch files and changesets. - Common tasks (use 'hg help command' for more details): + Common tasks (use 'hg help COMMAND' for more details): create new patch qnew import existing patch qimport
--- a/tests/test-pager.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-pager.t Sat Mar 11 13:53:14 2017 -0500 @@ -26,7 +26,7 @@ > hg ci -m "modify a $x" > done -By default diff and log are paged, but summary is not: +By default diff and log are paged, but id is not: $ hg diff -c 2 --pager=yes paged! 'diff -r f4be7687d414 -r bce265549556 a\n' @@ -50,25 +50,16 @@ paged! 'summary: modify a 9\n' paged! '\n' - $ hg summary - parent: 10:46106edeeb38 tip - modify a 10 - branch: default - commit: (clean) - update: (current) - phases: 11 draft + $ hg id + 46106edeeb38 tip -We can enable the pager on summary: +We can enable the pager on id: - $ hg --config pager.attend-summary=yes summary - paged! 'parent: 10:46106edeeb38 tip\n' - paged! ' modify a 10\n' - paged! 'branch: default\n' - paged! 'commit: (clean)\n' - paged! 'update: (current)\n' - paged! 'phases: 11 draft\n' + $ hg --config pager.attend-id=yes id + paged! '46106edeeb38 tip\n' -If we completely change the attend list that's respected: +Setting attend-$COMMAND to a false value works, even with pager in +core: $ hg --config pager.attend-diff=no diff -c 2 diff -r f4be7687d414 -r bce265549556 a @@ -79,15 +70,6 @@ a 1 +a 2 - $ hg --config pager.attend=summary diff -c 2 - diff -r f4be7687d414 -r bce265549556 a - --- a/a Thu Jan 01 00:00:00 1970 +0000 - +++ b/a Thu Jan 01 00:00:00 1970 +0000 - @@ -1,2 +1,3 @@ - a - a 1 - +a 2 - If 'log' is in attend, then 'history' should also be paged: $ hg history --limit 2 --config pager.attend=log paged! 'changeset: 10:46106edeeb38\n' @@ -102,61 +84,17 @@ paged! 'summary: modify a 9\n' paged! '\n' -Possible bug: history is explicitly ignored in pager config, but -because log is in the attend list it still gets pager treatment. - - $ hg history --limit 2 --config pager.attend=log \ - > --config pager.ignore=history - paged! 'changeset: 10:46106edeeb38\n' - paged! 'tag: tip\n' - paged! 'user: test\n' - paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' - paged! 'summary: modify a 10\n' - paged! '\n' - paged! 'changeset: 9:6dd8ea7dd621\n' - paged! 'user: test\n' - paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' - paged! 'summary: modify a 9\n' - paged! '\n' - -Possible bug: history is explicitly marked as attend-history=no, but -it doesn't fail to get paged because log is still in the attend list. - - $ hg history --limit 2 --config pager.attend-history=no - paged! 'changeset: 10:46106edeeb38\n' - paged! 'tag: tip\n' - paged! 'user: test\n' - paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' - paged! 'summary: modify a 10\n' - paged! '\n' - paged! 'changeset: 9:6dd8ea7dd621\n' - paged! 'user: test\n' - paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' - paged! 'summary: modify a 9\n' - paged! '\n' - -Possible bug: disabling pager for log but enabling it for history -doesn't result in history being paged. - - $ hg history --limit 2 --config pager.attend-log=no \ - > --config pager.attend-history=yes - changeset: 10:46106edeeb38 - tag: tip - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: modify a 10 - - changeset: 9:6dd8ea7dd621 - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: modify a 9 - - Pager should not start if stdout is not a tty. $ hg log -l1 -q --config ui.formatted=False 10:46106edeeb38 +Pager should be disabled if pager.pager is empty (otherwise the output would +be silently lost.) + + $ hg log -l1 -q --config pager.pager= + 10:46106edeeb38 + Pager with color enabled allows colors to come through by default, even though stdout is no longer a tty. $ cat >> $HGRCPATH <<EOF @@ -207,6 +145,11 @@ $ A=2 hg --config pager.attend-printa=yes printa paged! '2\n' +Something that's explicitly attended is still not paginated if the +pager is globally set to off using a flag: + $ A=2 hg --config pager.attend-printa=yes printa --pager=no + 2 + Pager should not override the exit code of other commands $ cat >> $TESTTMP/fortytwo.py <<'EOF' @@ -227,3 +170,61 @@ $ hg fortytwo --pager=on paged! '42\n' [42] + +A command that asks for paging using ui.pager() directly works: + $ hg blame a + paged! ' 0: a\n' + paged! ' 1: a 1\n' + paged! ' 2: a 2\n' + paged! ' 3: a 3\n' + paged! ' 4: a 4\n' + paged! ' 5: a 5\n' + paged! ' 6: a 6\n' + paged! ' 7: a 7\n' + paged! ' 8: a 8\n' + paged! ' 9: a 9\n' + paged! '10: a 10\n' +but not with HGPLAIN + $ HGPLAIN=1 hg blame a + 0: a + 1: a 1 + 2: a 2 + 3: a 3 + 4: a 4 + 5: a 5 + 6: a 6 + 7: a 7 + 8: a 8 + 9: a 9 + 10: a 10 +explicit flags work too: + $ hg blame --pager=no a + 0: a + 1: a 1 + 2: a 2 + 3: a 3 + 4: a 4 + 5: a 5 + 6: a 6 + 7: a 7 + 8: a 8 + 9: a 9 + 10: a 10 + +Put annotate in the ignore list for pager: + $ cat >> $HGRCPATH <<EOF + > [pager] + > ignore = annotate + > EOF + $ hg blame a + 0: a + 1: a 1 + 2: a 2 + 3: a 3 + 4: a 4 + 5: a 5 + 6: a 6 + 7: a 7 + 8: a 8 + 9: a 9 + 10: a 10
--- a/tests/test-parseindex.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-parseindex.t Sat Mar 11 13:53:14 2017 -0500 @@ -26,7 +26,7 @@ summary: change foo $ cat >> test.py << EOF - > from mercurial import changelog, scmutil + > from mercurial import changelog, vfs > from mercurial.node import * > > class singlebyteread(object): @@ -42,7 +42,7 @@ > return getattr(self.real, key) > > def opener(*args): - > o = scmutil.opener(*args) + > o = vfs.vfs(*args) > def wrapper(*a): > f = o(*a) > return singlebyteread(f) @@ -67,8 +67,8 @@ $ cd a $ python <<EOF - > from mercurial import changelog, scmutil - > cl = changelog.changelog(scmutil.vfs('.hg/store')) + > from mercurial import changelog, vfs + > cl = changelog.changelog(vfs.vfs('.hg/store')) > print 'good heads:' > for head in [0, len(cl) - 1, -1]: > print'%s: %r' % (head, cl.reachableroots(0, [head], [0])) @@ -147,8 +147,8 @@ $ cat <<EOF > test.py > import sys - > from mercurial import changelog, scmutil - > cl = changelog.changelog(scmutil.vfs(sys.argv[1])) + > from mercurial import changelog, vfs + > cl = changelog.changelog(vfs.vfs(sys.argv[1])) > n0, n1 = cl.node(0), cl.node(1) > ops = [ > ('reachableroots',
--- a/tests/test-patchbomb.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-patchbomb.t Sat Mar 11 13:53:14 2017 -0500 @@ -2371,6 +2371,128 @@ +test flag template: + $ echo foo > intro.text + $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -r 0:1 \ + > --desc intro.text --subject test \ + > --config patchbomb.flagtemplate='R{rev}' + this patch series consists of 2 patches. + + Cc: + + displaying [PATCH 0 of 2 R1] test ... + Content-Type: text/plain; charset="us-ascii" + MIME-Version: 1.0 + Content-Transfer-Encoding: 7bit + Subject: [PATCH 0 of 2 R1] test + Message-Id: <patchbomb.60@*> (glob) + User-Agent: Mercurial-patchbomb/* (glob) + Date: Thu, 01 Jan 1970 00:01:00 +0000 + From: quux + To: foo + + foo + + displaying [PATCH 1 of 2 R0] a ... + Content-Type: text/plain; charset="us-ascii" + MIME-Version: 1.0 + Content-Transfer-Encoding: 7bit + Subject: [PATCH 1 of 2 R0] a + X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab + X-Mercurial-Series-Index: 1 + X-Mercurial-Series-Total: 2 + Message-Id: <8580ff50825a50c8f716.61@*> (glob) + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) + In-Reply-To: <patchbomb.60@*> (glob) + References: <patchbomb.60@*> (glob) + User-Agent: Mercurial-patchbomb/* (glob) + Date: Thu, 01 Jan 1970 00:01:01 +0000 + From: quux + To: foo + + # HG changeset patch + # User test + # Date 1 0 + # Thu Jan 01 00:00:01 1970 +0000 + # Node ID 8580ff50825a50c8f716709acdf8de0deddcd6ab + # Parent 0000000000000000000000000000000000000000 + a + + diff -r 000000000000 -r 8580ff50825a a + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/a Thu Jan 01 00:00:01 1970 +0000 + @@ -0,0 +1,1 @@ + +a + + displaying [PATCH 2 of 2 R1] b ... + Content-Type: text/plain; charset="us-ascii" + MIME-Version: 1.0 + Content-Transfer-Encoding: 7bit + Subject: [PATCH 2 of 2 R1] b + X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 + X-Mercurial-Series-Index: 2 + X-Mercurial-Series-Total: 2 + Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) + In-Reply-To: <patchbomb.60@*> (glob) + References: <patchbomb.60@*> (glob) + User-Agent: Mercurial-patchbomb/* (glob) + Date: Thu, 01 Jan 1970 00:01:02 +0000 + From: quux + To: foo + + # HG changeset patch + # User test + # Date 2 0 + # Thu Jan 01 00:00:02 1970 +0000 + # Node ID 97d72e5f12c7e84f85064aa72e5a297142c36ed9 + # Parent 8580ff50825a50c8f716709acdf8de0deddcd6ab + b + + diff -r 8580ff50825a -r 97d72e5f12c7 b + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/b Thu Jan 01 00:00:02 1970 +0000 + @@ -0,0 +1,1 @@ + +b + + +test flag template plus --flag: + $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -r 0 --flag 'V2' \ + > --config patchbomb.flagtemplate='{branch} {flags}' + this patch series consists of 1 patches. + + Cc: + + displaying [PATCH default V2] a ... + Content-Type: text/plain; charset="us-ascii" + MIME-Version: 1.0 + Content-Transfer-Encoding: 7bit + Subject: [PATCH default V2] a + X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab + X-Mercurial-Series-Index: 1 + X-Mercurial-Series-Total: 1 + Message-Id: <8580ff50825a50c8f716.60@*> (glob) + X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob) + User-Agent: Mercurial-patchbomb/* (glob) + Date: Thu, 01 Jan 1970 00:01:00 +0000 + From: quux + To: foo + + # HG changeset patch + # User test + # Date 1 0 + # Thu Jan 01 00:00:01 1970 +0000 + # Node ID 8580ff50825a50c8f716709acdf8de0deddcd6ab + # Parent 0000000000000000000000000000000000000000 + a + + diff -r 000000000000 -r 8580ff50825a a + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/a Thu Jan 01 00:00:01 1970 +0000 + @@ -0,0 +1,1 @@ + +a + + test multi-byte domain parsing: $ UUML=`$PYTHON -c 'import sys; sys.stdout.write("\374")'` $ HGENCODING=iso-8859-1
--- a/tests/test-phases.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-phases.t Sat Mar 11 13:53:14 2017 -0500 @@ -479,12 +479,8 @@ o 0 public A -move changeset forward and backward and test kill switch +move changeset forward and backward - $ cat <<EOF >> $HGRCPATH - > [experimental] - > nativephaseskillswitch = true - > EOF $ hg phase --draft --force 1::4 $ hg log -G --template "{rev} {phase} {desc}\n" @ 7 secret merge B' and E @@ -505,10 +501,6 @@ test partial failure - $ cat <<EOF >> $HGRCPATH - > [experimental] - > nativephaseskillswitch = false - > EOF $ hg phase --public 7 $ hg phase --draft '5 or 7' cannot move 1 changesets to a higher phase, use --force @@ -590,3 +582,47 @@ crosschecking files in changesets and manifests checking files 7 files, 8 changesets, 7 total revisions + + $ cd .. + +check whether HG_PENDING makes pending changes only in related +repositories visible to an external hook. + +(emulate a transaction running concurrently by copied +.hg/phaseroots.pending in subsequent test) + + $ cat > $TESTTMP/savepending.sh <<EOF + > cp .hg/store/phaseroots.pending .hg/store/phaseroots.pending.saved + > exit 1 # to avoid changing phase for subsequent tests + > EOF + $ cd push-dest + $ hg phase 6 + 6: draft + $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" phase -f -s 6 + transaction abort! + rollback completed + abort: pretxnclose hook exited with status 1 + [255] + $ cp .hg/store/phaseroots.pending.saved .hg/store/phaseroots.pending + +(check (in)visibility of phaseroot while transaction running in repo) + + $ cat > $TESTTMP/checkpending.sh <<EOF + > echo '@initialrepo' + > hg -R $TESTTMP/initialrepo phase 7 + > echo '@push-dest' + > hg -R $TESTTMP/push-dest phase 6 + > exit 1 # to avoid changing phase for subsequent tests + > EOF + $ cd ../initialrepo + $ hg phase 7 + 7: public + $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" phase -f -s 7 + @initialrepo + 7: secret + @push-dest + 6: draft + transaction abort! + rollback completed + abort: pretxnclose hook exited with status 1 + [255]
--- a/tests/test-pull-update.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-pull-update.t Sat Mar 11 13:53:14 2017 -0500 @@ -16,6 +16,21 @@ $ echo 1.2 > foo $ hg ci -Am m +Should respect config to disable dirty update + $ hg co -qC 0 + $ echo 2 > foo + $ hg --config experimental.updatecheck=abort pull -u ../tt + pulling from ../tt + searching for changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files (+1 heads) + abort: uncommitted changes + [255] + $ hg --config extensions.strip= strip --no-backup tip + $ hg co -qC tip + Should not update to the other topological branch: $ hg pull -u ../tt
--- a/tests/test-push-http-bundle1.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-push-http-bundle1.t Sat Mar 11 13:53:14 2017 -0500 @@ -79,7 +79,7 @@ remote: adding manifests remote: adding file changes remote: added 1 changesets with 1 changes to 1 files - remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob) + remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob) % serve errors $ hg rollback repository tip rolled back to revision 0 (undo serve) @@ -95,7 +95,7 @@ remote: adding manifests remote: adding file changes remote: added 1 changesets with 1 changes to 1 files - remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob) + remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob) % serve errors $ hg rollback repository tip rolled back to revision 0 (undo serve) @@ -111,7 +111,7 @@ remote: adding manifests remote: adding file changes remote: added 1 changesets with 1 changes to 1 files - remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob) + remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob) % serve errors $ hg rollback repository tip rolled back to revision 0 (undo serve)
--- a/tests/test-push-http.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-push-http.t Sat Mar 11 13:53:14 2017 -0500 @@ -70,7 +70,7 @@ remote: adding file changes remote: added 1 changesets with 1 changes to 1 files remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1 - remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob) + remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob) % serve errors $ hg rollback repository tip rolled back to revision 0 (undo serve) @@ -87,7 +87,7 @@ remote: adding file changes remote: added 1 changesets with 1 changes to 1 files remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1 - remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob) + remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob) % serve errors $ hg rollback repository tip rolled back to revision 0 (undo serve) @@ -104,7 +104,7 @@ remote: adding file changes remote: added 1 changesets with 1 changes to 1 files remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1 - remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob) + remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob) % serve errors $ hg rollback repository tip rolled back to revision 0 (undo serve) @@ -125,7 +125,7 @@ remote: adding manifests remote: adding file changes remote: added 1 changesets with 1 changes to 1 files - remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob) + remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob) remote: pushkey-abort: prepushkey hook exited with status 1 remote: transaction abort! remote: rollback completed @@ -145,7 +145,7 @@ remote: adding manifests remote: adding file changes remote: added 1 changesets with 1 changes to 1 files - remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob) + remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:*: (glob) % serve errors $ hg rollback repository tip rolled back to revision 0 (undo serve)
--- a/tests/test-rebase-abort.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-rebase-abort.t Sat Mar 11 13:53:14 2017 -0500 @@ -374,10 +374,11 @@ $ hg --config extensions.n=$TESTDIR/failfilemerge.py rebase -s 3 -d tip rebasing 3:3a71550954f1 "b" rebasing 4:e80b69427d80 "c" + transaction abort! + rollback completed abort: ^C [255] $ hg rebase --abort - saved backup bundle to $TESTTMP/interrupted/.hg/strip-backup/3d8812cf300d-93041a90-backup.hg (glob) rebase aborted $ hg log -G --template "{rev} {desc} {bookmarks}" o 6 no-a @@ -398,7 +399,7 @@ parent: 0:df4f53cec30a base branch: default - commit: (clean) + commit: 1 unknown (clean) update: 6 new changesets (update) phases: 7 draft
--- a/tests/test-rebase-conflicts.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-rebase-conflicts.t Sat Mar 11 13:53:14 2017 -0500 @@ -218,13 +218,13 @@ $ hg rebase -s9 -d2 --debug # use debug to really check merge base used rebase onto 4bc80088dc6b starting from e31216eec445 + rebase status stored ignoring null merge rebase of 3 ignoring null merge rebase of 4 ignoring null merge rebase of 6 ignoring null merge rebase of 8 rebasing 9:e31216eec445 "more changes to f1" future parents are 2 and -1 - rebase status stored update to 2:4bc80088dc6b resolving manifests branchmerge: False, force: True, partial: False @@ -250,7 +250,6 @@ rebased as 19c888675e13 rebasing 10:2f2496ddf49d "merge" (tip) future parents are 11 and 7 - rebase status stored already in target merge against 10:2f2496ddf49d detach base 9:e31216eec445 @@ -268,6 +267,7 @@ committing changelog rebased as 2a7f09cac94c rebase merging completed + rebase status stored update back to initial working directory parent resolving manifests branchmerge: False, force: False, partial: False
--- a/tests/test-rebase-obsolete.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-rebase-obsolete.t Sat Mar 11 13:53:14 2017 -0500 @@ -175,7 +175,7 @@ 32af7686d403cf45b5d95f2d70cebea587ac806a 0 {5fddd98957c8a54a4d436dfe1da9d87f21a1b97b} (*) {'user': 'test'} (glob) -More complex case were part of the rebase set were already rebased +More complex case where part of the rebase set were already rebased $ hg rebase --rev 'desc(D)' --dest 'desc(H)' rebasing 9:08483444fef9 "D" @@ -272,6 +272,35 @@ D +Start rebase from a commit that is obsolete but not hidden only because it's +a working copy parent. We should be moved back to the starting commit as usual +even though it is hidden (until we're moved there). + + $ hg --hidden up -qr 'first(hidden())' + $ hg rebase --rev 13 --dest 15 + rebasing 13:98f6af4ee953 "C" + $ hg log -G + o 16:294a2b93eb4d C + | + o 15:627d46148090 D + | + | o 12:462a34d07e59 B + | | + | o 11:4596109a6a43 D + | | + | o 7:02de42196ebe H + | | + +---o 6:eea13746799a G + | |/ + | o 5:24b6387c8c8c F + | | + o | 4:9520eea781bc E + |/ + | @ 1:42ccdea3bb16 B + |/ + o 0:cd010b8cd998 A + + $ cd .. collapse rebase
--- a/tests/test-rebase-scenario-global.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-rebase-scenario-global.t Sat Mar 11 13:53:14 2017 -0500 @@ -272,7 +272,8 @@ rebasing 6:eea13746799a "G" abort: cannot use revision 6 as base, result would have 3 parents [255] - + $ hg rebase --abort + rebase aborted These will abort gracefully (using --base): @@ -773,7 +774,7 @@ Get back to the root of cwd-vanish. Note that even though `cd ..` works on most systems, it does not work on FreeBSD 10, so we use an absolute path to get back to the repository. - $ cd $TESTTMP/cwd-vanish + $ cd $TESTTMP Test that rebase is done in topo order (issue5370) @@ -819,7 +820,7 @@ rebasing 4:82ae8dc7a9b7 "E" rebasing 3:ab709c9f7171 "D" rebasing 5:412b391de760 "F" - saved backup bundle to $TESTTMP/cwd-vanish/order/.hg/strip-backup/76035bbd54bd-e341bc99-backup.hg (glob) + saved backup bundle to $TESTTMP/order/.hg/strip-backup/76035bbd54bd-e341bc99-backup.hg (glob) $ hg tglog o 6: 'F' @@ -840,7 +841,7 @@ Test experimental revset ======================== - $ cd .. + $ cd ../cwd-vanish Make the repo a bit more interesting
--- a/tests/test-revert-interactive.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-revert-interactive.t Sat Mar 11 13:53:14 2017 -0500 @@ -380,29 +380,29 @@ 3 hunks, 3 lines changed examine changes to 'folder1/g'? [Ynesfdaq?] y - @@ -1,5 +1,4 @@ - -firstline + @@ -1,4 +1,5 @@ + +firstline c 1 2 3 discard change 1/3 to 'folder1/g'? [Ynesfdaq?] y - @@ -2,7 +1,7 @@ + @@ -1,7 +2,7 @@ c 1 2 3 - - 3 - +4 + -4 + + 3 5 d discard change 2/3 to 'folder1/g'? [Ynesfdaq?] y - @@ -7,3 +6,2 @@ + @@ -6,2 +7,3 @@ 5 d - -lastline + +lastline discard change 3/3 to 'folder1/g'? [Ynesfdaq?] n $ hg diff --nodates
--- a/tests/test-revset.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-revset.t Sat Mar 11 13:53:14 2017 -0500 @@ -40,6 +40,8 @@ > cmdutil, > node as nodemod, > revset, + > revsetlang, + > smartset, > ) > cmdtable = {} > command = cmdutil.command(cmdtable) @@ -49,17 +51,18 @@ > def debugrevlistspec(ui, repo, fmt, *args, **opts): > if opts['bin']: > args = map(nodemod.bin, args) - > expr = revset.formatspec(fmt, list(args)) + > expr = revsetlang.formatspec(fmt, list(args)) > if ui.verbose: - > tree = revset.parse(expr, lookup=repo.__contains__) - > ui.note(revset.prettyformat(tree), "\n") + > tree = revsetlang.parse(expr, lookup=repo.__contains__) + > ui.note(revsetlang.prettyformat(tree), "\n") > if opts["optimize"]: - > opttree = revset.optimize(revset.analyze(tree)) - > ui.note("* optimized:\n", revset.prettyformat(opttree), "\n") + > opttree = revsetlang.optimize(revsetlang.analyze(tree)) + > ui.note("* optimized:\n", revsetlang.prettyformat(opttree), + > "\n") > func = revset.match(ui, expr, repo) > revs = func(repo) > if ui.verbose: - > ui.note("* set:\n", revset.prettyformatset(revs), "\n") + > ui.note("* set:\n", smartset.prettyformat(revs), "\n") > for c in revs: > ui.write("%s\n" % c) > EOF
--- a/tests/test-serve.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-serve.t Sat Mar 11 13:53:14 2017 -0500 @@ -34,13 +34,13 @@ With -v $ hgserve - listening at http://localhost/ (bound to 127.0.0.1:HGPORT1) (glob) + listening at http://localhost/ (bound to *$LOCALIP*:HGPORT1) (glob) % errors With -v and -p HGPORT2 $ hgserve -p "$HGPORT2" - listening at http://localhost/ (bound to 127.0.0.1:HGPORT2) (glob) + listening at http://localhost/ (bound to *$LOCALIP*:HGPORT2) (glob) % errors With -v and -p daytime (should fail because low port) @@ -57,25 +57,25 @@ With --prefix foo $ hgserve --prefix foo - listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob) + listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) % errors With --prefix /foo $ hgserve --prefix /foo - listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob) + listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) % errors With --prefix foo/ $ hgserve --prefix foo/ - listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob) + listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) % errors With --prefix /foo/ $ hgserve --prefix /foo/ - listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob) + listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) % errors $ cd ..
--- a/tests/test-share.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-share.t Sat Mar 11 13:53:14 2017 -0500 @@ -114,6 +114,8 @@ $ test -d .hg/store $ test -f .hg/sharedpath [1] + $ grep shared .hg/requires + [1] $ hg unshare abort: this is not a shared repo [255] @@ -154,6 +156,67 @@ * bm1 2:c2e0ac586386 bm3 2:c2e0ac586386 +check whether HG_PENDING makes pending changes only in relatd +repositories visible to an external hook. + +In "hg share" case, another transaction can't run in other +repositories sharing same source repository, because starting +transaction requires locking store of source repository. + +Therefore, this test scenario ignores checking visibility of +.hg/bookmakrs.pending in repo2, which shares repo1 without bookmarks. + + $ cat > $TESTTMP/checkbookmarks.sh <<EOF + > echo "@repo1" + > hg -R $TESTTMP/repo1 bookmarks + > echo "@repo2" + > hg -R $TESTTMP/repo2 bookmarks + > echo "@repo3" + > hg -R $TESTTMP/repo3 bookmarks + > exit 1 # to avoid adding new bookmark for subsequent tests + > EOF + + $ cd ../repo1 + $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX + @repo1 + bm1 2:c2e0ac586386 + bm3 2:c2e0ac586386 + * bmX 2:c2e0ac586386 + @repo2 + * bm2 3:0e6e70d1d5f1 + @repo3 + bm1 2:c2e0ac586386 + * bm3 2:c2e0ac586386 + bmX 2:c2e0ac586386 + transaction abort! + rollback completed + abort: pretxnclose hook exited with status 1 + [255] + $ hg book bm1 + +FYI, in contrast to above test, bmX is invisible in repo1 (= shared +src), because (1) HG_PENDING refers only repo3 and (2) +"bookmarks.pending" is written only into repo3. + + $ cd ../repo3 + $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX + @repo1 + * bm1 2:c2e0ac586386 + bm3 2:c2e0ac586386 + @repo2 + * bm2 3:0e6e70d1d5f1 + @repo3 + bm1 2:c2e0ac586386 + bm3 2:c2e0ac586386 + * bmX 2:c2e0ac586386 + transaction abort! + rollback completed + abort: pretxnclose hook exited with status 1 + [255] + $ hg book bm3 + + $ cd ../repo1 + test that commits work $ echo 'shared bookmarks' > a @@ -297,6 +360,56 @@ bm4 5:92793bfc8cad $ cd .. +test shared clones using relative paths work + + $ mkdir thisdir + $ hg init thisdir/orig + $ hg share -U thisdir/orig thisdir/abs + $ hg share -U --relative thisdir/abs thisdir/rel + $ cat thisdir/rel/.hg/sharedpath + ../../orig/.hg (no-eol) + $ grep shared thisdir/*/.hg/requires + thisdir/abs/.hg/requires:shared + thisdir/rel/.hg/requires:shared + thisdir/rel/.hg/requires:relshared + +test that relative shared paths aren't relative to $PWD + + $ cd thisdir + $ hg -R rel root + $TESTTMP/thisdir/rel + $ cd .. + +now test that relative paths really are relative, survive across +renames and changes of PWD + + $ hg -R thisdir/abs root + $TESTTMP/thisdir/abs + $ hg -R thisdir/rel root + $TESTTMP/thisdir/rel + $ mv thisdir thatdir + $ hg -R thatdir/abs root + abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg! + [255] + $ hg -R thatdir/rel root + $TESTTMP/thatdir/rel + +test unshare relshared repo + + $ cd thatdir/rel + $ hg unshare + $ test -d .hg/store + $ test -f .hg/sharedpath + [1] + $ grep shared .hg/requires + [1] + $ hg unshare + abort: this is not a shared repo + [255] + $ cd ../.. + + $ rm -r thatdir + Explicitly kill daemons to let the test exit on Windows $ killdaemons.py
--- a/tests/test-shelve.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-shelve.t Sat Mar 11 13:53:14 2017 -0500 @@ -493,7 +493,7 @@ $ ln -s foo a/a $ hg shelve -q -n symlink a/a $ hg status a/a - $ hg unshelve -q symlink + $ hg unshelve -q -n symlink $ hg status a/a M a/a $ hg revert a/a @@ -1692,7 +1692,7 @@ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ echo 3 >> file && hg ci -Am 13 $ hg shelve --list - default (1s ago) changes to: 1 + default (*s ago) changes to: 1 (glob) $ hg unshelve --keep unshelving change 'default' rebasing shelved changes
--- a/tests/test-ssh-bundle1.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-ssh-bundle1.t Sat Mar 11 13:53:14 2017 -0500 @@ -494,7 +494,7 @@ Got arguments 1:user@dummy 2:hg -R local serve --stdio Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio - changegroup-in-remote hook: HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob) + changegroup-in-remote hook: HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob) Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio @@ -504,7 +504,7 @@ Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio - changegroup-in-remote hook: HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob) + changegroup-in-remote hook: HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob) Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg init 'a repo' Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio @@ -512,7 +512,7 @@ Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio - changegroup-in-remote hook: HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob) + changegroup-in-remote hook: HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob) Got arguments 1:user@dummy 2:hg -R remote serve --stdio remote hook failure is attributed to remote
--- a/tests/test-ssh.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-ssh.t Sat Mar 11 13:53:14 2017 -0500 @@ -498,7 +498,7 @@ Got arguments 1:user@dummy 2:hg -R local serve --stdio Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio - changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob) + changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob) Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio @@ -508,7 +508,7 @@ Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio - changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob) + changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob) Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg init 'a repo' Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio @@ -516,7 +516,7 @@ Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio Got arguments 1:user@dummy 2:hg -R remote serve --stdio - changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob) + changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:$LOCALIP (glob) Got arguments 1:user@dummy 2:hg -R remote serve --stdio remote hook failure is attributed to remote
--- a/tests/test-status-color.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-status-color.t Sat Mar 11 13:53:14 2017 -0500 @@ -1,6 +1,6 @@ $ cat <<EOF >> $HGRCPATH - > [extensions] - > color = + > [ui] + > color = always > [color] > mode = ansi > EOF @@ -14,7 +14,7 @@ hg status in repo root: - $ hg status --color=always + $ hg status \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc) @@ -41,7 +41,7 @@ hg status . in repo root: - $ hg status --color=always . + $ hg status . \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc) @@ -49,17 +49,17 @@ \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc) - $ hg status --color=always --cwd a + $ hg status --cwd a \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc) - $ hg status --color=always --cwd a . + $ hg status --cwd a . \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc) - $ hg status --color=always --cwd a .. + $ hg status --cwd a .. \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/1/in_b_1\x1b[0m (esc) @@ -67,18 +67,18 @@ \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/in_b\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc) - $ hg status --color=always --cwd b + $ hg status --cwd b \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc) - $ hg status --color=always --cwd b . + $ hg status --cwd b . \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc) - $ hg status --color=always --cwd b .. + $ hg status --cwd b .. \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/in_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc) @@ -86,43 +86,43 @@ \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc) - $ hg status --color=always --cwd a/1 + $ hg status --cwd a/1 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc) - $ hg status --color=always --cwd a/1 . + $ hg status --cwd a/1 . \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc) - $ hg status --color=always --cwd a/1 .. + $ hg status --cwd a/1 .. \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_a\x1b[0m (esc) - $ hg status --color=always --cwd b/1 + $ hg status --cwd b/1 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc) - $ hg status --color=always --cwd b/1 . + $ hg status --cwd b/1 . \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc) - $ hg status --color=always --cwd b/1 .. + $ hg status --cwd b/1 .. \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../2/in_b_2\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc) - $ hg status --color=always --cwd b/2 + $ hg status --cwd b/2 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc) - $ hg status --color=always --cwd b/2 . + $ hg status --cwd b/2 . \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc) - $ hg status --color=always --cwd b/2 .. + $ hg status --cwd b/2 .. \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../1/in_b_1\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc) \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc) @@ -137,7 +137,7 @@ ? in_root Make sure ui.formatted=False works - $ hg status --config ui.formatted=False + $ hg status --color=auto --config ui.formatted=False ? a/1/in_a_1 ? a/in_a ? b/1/in_b_1 @@ -179,7 +179,7 @@ hg status: - $ hg status --color=always + $ hg status \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc) \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc) \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc) @@ -187,7 +187,7 @@ hg status modified added removed deleted unknown never-existed ignored: - $ hg status --color=always modified added removed deleted unknown never-existed ignored + $ hg status modified added removed deleted unknown never-existed ignored never-existed: * (glob) \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc) \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc) @@ -198,7 +198,7 @@ hg status -C: - $ hg status --color=always -C + $ hg status -C \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc) \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc) \x1b[0;0m modified\x1b[0m (esc) @@ -208,7 +208,7 @@ hg status -A: - $ hg status --color=always -A + $ hg status -A \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc) \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc) \x1b[0;0m modified\x1b[0m (esc) @@ -226,7 +226,7 @@ $ mkdir "$TESTTMP/terminfo" $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti" - $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --color=always -A + $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo -A \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1madded\x1b[30m (esc) \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1mcopied\x1b[30m (esc) \x1b[30m\x1b[30m modified\x1b[30m (esc) @@ -245,7 +245,7 @@ > # We can override what's in the terminfo database, too > terminfo.bold = \E[2m > EOF - $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim --color=always -A + $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim -A \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2madded\x1b[30m (esc) \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2mcopied\x1b[30m (esc) \x1b[30m\x1b[30m modified\x1b[30m (esc) @@ -265,11 +265,11 @@ hg status ignoreddir/file: - $ hg status --color=always ignoreddir/file + $ hg status ignoreddir/file hg status -i ignoreddir/file: - $ hg status --color=always -i ignoreddir/file + $ hg status -i ignoreddir/file \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignoreddir/file\x1b[0m (esc) $ cd .. @@ -293,7 +293,9 @@ test unknown color - $ hg --config color.status.modified=periwinkle status --color=always + $ hg --config color.status.modified=periwinkle status + ignoring unknown color/effect 'periwinkle' (configured in color.status.modified) + ignoring unknown color/effect 'periwinkle' (configured in color.status.modified) ignoring unknown color/effect 'periwinkle' (configured in color.status.modified) M modified \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc) @@ -307,8 +309,8 @@ If result is not as expected, raise error $ assert() { - > hg status --color=always $1 > ../a - > hg status --color=always $2 > ../b + > hg status $1 > ../a + > hg status $2 > ../b > if diff ../a ../b > /dev/null; then > out=0 > else @@ -367,7 +369,7 @@ hg resolve with one unresolved, one resolved: - $ hg resolve --color=always -l + $ hg resolve -l \x1b[0;31;1mU \x1b[0m\x1b[0;31;1ma\x1b[0m (esc) \x1b[0;32;1mR \x1b[0m\x1b[0;32;1mb\x1b[0m (esc)
--- a/tests/test-treemanifest.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-treemanifest.t Sat Mar 11 13:53:14 2017 -0500 @@ -437,6 +437,16 @@ $ hg ci -Aqm 'initial' + $ echo >> .A/one.txt + $ echo >> .A/two.txt + $ echo >> b/bar/fruits.txt + $ echo >> b/bar/orange/fly/gnat.py + $ echo >> b/bar/orange/fly/housefly.txt + $ echo >> b/foo/apple/bees/flower.py + $ echo >> c.txt + $ echo >> d.py + $ hg ci -Aqm 'second' + We'll see that visitdir works by removing some treemanifest revlogs and running the files command with various parameters. @@ -468,6 +478,12 @@ b/bar/orange/fly/gnat.py (glob) b/bar/orange/fly/housefly.txt (glob) b/foo/apple/bees/flower.py (glob) + $ hg diff -r '.^' -r . --stat b + b/bar/fruits.txt | 1 + + b/bar/orange/fly/gnat.py | 1 + + b/bar/orange/fly/housefly.txt | 1 + + b/foo/apple/bees/flower.py | 1 + + 4 files changed, 4 insertions(+), 0 deletions(-) $ cp -R .hg/store-copy/. .hg/store Test files with just includes and excludes. @@ -477,6 +493,9 @@ $ rm -r .hg/store/meta/b/foo/apple/bees $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees b/bar/fruits.txt (glob) + $ hg diff -r '.^' -r . --stat -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees + b/bar/fruits.txt | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) $ cp -R .hg/store-copy/. .hg/store Test files for a subdirectory, excluding a directory within it. @@ -487,6 +506,11 @@ b/bar/fruits.txt (glob) b/bar/orange/fly/gnat.py (glob) b/bar/orange/fly/housefly.txt (glob) + $ hg diff -r '.^' -r . --stat -X path:b/foo b + b/bar/fruits.txt | 1 + + b/bar/orange/fly/gnat.py | 1 + + b/bar/orange/fly/housefly.txt | 1 + + 3 files changed, 3 insertions(+), 0 deletions(-) $ cp -R .hg/store-copy/. .hg/store Test files for a sub directory, including only a directory within it, and @@ -497,6 +521,10 @@ $ hg files -r . -I path:b/bar/orange -I path:a b b/bar/orange/fly/gnat.py (glob) b/bar/orange/fly/housefly.txt (glob) + $ hg diff -r '.^' -r . --stat -I path:b/bar/orange -I path:a b + b/bar/orange/fly/gnat.py | 1 + + b/bar/orange/fly/housefly.txt | 1 + + 2 files changed, 2 insertions(+), 0 deletions(-) $ cp -R .hg/store-copy/. .hg/store Test files for a pattern, including a directory, and excluding a directory @@ -507,6 +535,9 @@ $ rm -r .hg/store/meta/b/bar/orange $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange b/bar/fruits.txt (glob) + $ hg diff -r '.^' -r . --stat glob:**.txt -I path:b/bar -X path:b/bar/orange + b/bar/fruits.txt | 1 + + 1 files changed, 1 insertions(+), 0 deletions(-) $ cp -R .hg/store-copy/. .hg/store Add some more changes to the deep repo @@ -522,7 +553,7 @@ checking directory manifests crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions Dirlogs are included in fncache $ grep meta/.A/00manifest.i .hg/store/fncache @@ -563,8 +594,9 @@ checking directory manifests 0: empty or missing b/ b/@0: parent-directory manifest refers to unknown revision 67688a370455 - b/@1: parent-directory manifest refers to unknown revision f38e85d334c5 - b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0 + b/@1: parent-directory manifest refers to unknown revision f065da70369e + b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b + b/@3: parent-directory manifest refers to unknown revision 367152e6af28 warning: orphan revlog 'meta/b/bar/00manifest.i' warning: orphan revlog 'meta/b/bar/orange/00manifest.i' warning: orphan revlog 'meta/b/bar/orange/fly/00manifest.i' @@ -577,9 +609,9 @@ b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest b/foo/apple/bees/flower.py@0: in changeset but not in manifest checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions 6 warnings encountered! - 8 integrity errors encountered! + 9 integrity errors encountered! (first damaged changeset appears to be 0) [1] $ cp -R .hg/store-newcopy/. .hg/store @@ -590,22 +622,22 @@ checking changesets checking manifests checking directory manifests - b/@1: parent-directory manifest refers to unknown revision f38e85d334c5 - b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0 - b/bar/@?: rev 1 points to unexpected changeset 1 - b/bar/@?: 5e03c4ee5e4a not in parent-directory manifest + b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b + b/@3: parent-directory manifest refers to unknown revision 367152e6af28 b/bar/@?: rev 2 points to unexpected changeset 2 - b/bar/@?: 1b16940d66d6 not in parent-directory manifest - b/bar/orange/@?: rev 1 points to unexpected changeset 2 + b/bar/@?: 44d7e1146e0d not in parent-directory manifest + b/bar/@?: rev 3 points to unexpected changeset 3 + b/bar/@?: 70b10c6b17b7 not in parent-directory manifest + b/bar/orange/@?: rev 2 points to unexpected changeset 3 (expected None) - b/bar/orange/fly/@?: rev 1 points to unexpected changeset 2 + b/bar/orange/fly/@?: rev 2 points to unexpected changeset 3 (expected None) crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions 2 warnings encountered! 8 integrity errors encountered! - (first damaged changeset appears to be 1) + (first damaged changeset appears to be 2) [1] $ cp -R .hg/store-newcopy/. .hg/store @@ -621,7 +653,7 @@ adding changesets adding manifests adding file changes - added 3 changesets with 10 changes to 8 files + added 4 changesets with 18 changes to 8 files updating to branch default 8 files updated, 0 files merged, 0 files removed, 0 files unresolved No server errors. @@ -656,7 +688,7 @@ checking directory manifests crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions $ cd .. Create clones using old repo formats to use in later tests @@ -667,7 +699,7 @@ adding changesets adding manifests adding file changes - added 3 changesets with 10 changes to 8 files + added 4 changesets with 18 changes to 8 files updating to branch default 8 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cd deeprepo-basicstore @@ -683,7 +715,7 @@ adding changesets adding manifests adding file changes - added 3 changesets with 10 changes to 8 files + added 4 changesets with 18 changes to 8 files updating to branch default 8 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cd deeprepo-encodedstore @@ -701,7 +733,7 @@ checking directory manifests crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions Local clone with encodedstore $ hg clone -U deeprepo-encodedstore local-clone-encodedstore @@ -711,7 +743,7 @@ checking directory manifests crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions Local clone with fncachestore $ hg clone -U deeprepo local-clone-fncachestore @@ -721,7 +753,7 @@ checking directory manifests crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions Stream clone with basicstore $ hg clone --config experimental.changegroup3=True --uncompressed -U \ @@ -737,7 +769,7 @@ checking directory manifests crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions Stream clone with encodedstore $ hg clone --config experimental.changegroup3=True --uncompressed -U \ @@ -753,7 +785,7 @@ checking directory manifests crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions Stream clone with fncachestore $ hg clone --config experimental.changegroup3=True --uncompressed -U \ @@ -769,11 +801,11 @@ checking directory manifests crosschecking files in changesets and manifests checking files - 8 files, 3 changesets, 10 total revisions + 8 files, 4 changesets, 18 total revisions Packed bundle $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg - writing 3349 bytes for 18 files + writing 5330 bytes for 18 files bundle requirements: generaldelta, revlogv1, treemanifest $ hg debugbundle --spec repo-packed.hg none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Ctreemanifest @@ -825,3 +857,13 @@ added 1 changesets with 1 changes to 1 files (+1 heads) (run 'hg heads' to see heads, 'hg merge' to merge) +Committing a empty commit does not duplicate root treemanifest + $ echo z >> z + $ hg commit -Aqm 'pre-empty commit' + $ hg rm z + $ hg commit --amend -m 'empty commit' + saved backup bundle to $TESTTMP/grafted-dir-repo-clone/.hg/strip-backup/cb99d5717cea-de37743b-amend-backup.hg (glob) + $ hg log -r 'tip + tip^' -T '{manifest}\n' + 1:678d3574b88c + 1:678d3574b88c + $ hg --config extensions.strip= strip -r . -q
--- a/tests/test-ui-color.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-ui-color.py Sat Mar 11 13:53:14 2017 -0500 @@ -1,16 +1,13 @@ from __future__ import absolute_import, print_function import os -from hgext import ( - color, -) from mercurial import ( dispatch, ui as uimod, ) # ensure errors aren't buffered -testui = color.colorui() +testui = uimod.ui() testui.pushbuffer() testui.write(('buffered\n')) testui.warn(('warning\n')) @@ -35,6 +32,7 @@ dispatch.dispatch(dispatch.request(['version', '-q'], ui_)) runcmd() -print("colored? " + str(issubclass(ui_.__class__, color.colorui))) +print("colored? %s" % (ui_._colormode is not None)) runcmd() -print("colored? " + str(issubclass(ui_.__class__, color.colorui))) +print("colored? %s" % (ui_._colormode is not None)) +
--- a/tests/test-update-branches.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-update-branches.t Sat Mar 11 13:53:14 2017 -0500 @@ -160,6 +160,16 @@ parent=1 M foo + $ revtest '-m dirty linear' dirty 1 2 -m + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + M foo + + $ revtest '-m dirty cross' dirty 3 4 -m + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=4 + M foo + $ revtest '-c dirtysub linear' dirtysub 1 2 -c abort: uncommitted changes in subrepository 'sub' parent=1 @@ -171,10 +181,173 @@ parent=2 $ revtest '-cC dirty linear' dirty 1 2 -cC - abort: cannot specify both -c/--check and -C/--clean + abort: can only specify one of -C/--clean, -c/--check, or -m/merge + parent=1 + M foo + + $ revtest '-mc dirty linear' dirty 1 2 -mc + abort: can only specify one of -C/--clean, -c/--check, or -m/merge + parent=1 + M foo + + $ revtest '-mC dirty linear' dirty 1 2 -mC + abort: can only specify one of -C/--clean, -c/--check, or -m/merge + parent=1 + M foo + + $ echo '[experimental]' >> .hg/hgrc + $ echo 'updatecheck = abort' >> .hg/hgrc + + $ revtest 'none dirty linear' dirty 1 2 + abort: uncommitted changes + parent=1 + M foo + + $ revtest 'none dirty linear' dirty 1 2 -c + abort: uncommitted changes + parent=1 + M foo + + $ revtest 'none dirty linear' dirty 1 2 -C + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + + $ echo 'updatecheck = none' >> .hg/hgrc + + $ revtest 'none dirty cross' dirty 3 4 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=4 + M foo + + $ revtest 'none dirty linear' dirty 1 2 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + M foo + + $ revtest 'none dirty linear' dirty 1 2 -c + abort: uncommitted changes parent=1 M foo + $ revtest 'none dirty linear' dirty 1 2 -C + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + + $ hg co -qC 3 + $ echo dirty >> a + $ hg co --tool :merge3 4 + merging a + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges + [1] + $ hg st + M a + ? a.orig + $ cat a + <<<<<<< working copy: 6efa171f091b - test: 3 + three + dirty + ||||||| base + three + ======= + four + >>>>>>> destination: d047485b3896 b1 - test: 4 + $ rm a.orig + + $ echo 'updatecheck = noconflict' >> .hg/hgrc + + $ revtest 'none dirty cross' dirty 3 4 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=4 + M foo + + $ revtest 'none dirty linear' dirty 1 2 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + M foo + + $ revtest 'none dirty linear' dirty 1 2 -c + abort: uncommitted changes + parent=1 + M foo + + $ revtest 'none dirty linear' dirty 1 2 -C + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + +Locally added file is allowed + $ hg up -qC 3 + $ echo a > bar + $ hg add bar + $ hg up -q 4 + $ hg st + A bar + $ hg forget bar + $ rm bar + +Locally removed file is allowed + $ hg up -qC 3 + $ hg rm foo + $ hg up -q 4 + +File conflict is not allowed + $ hg up -qC 3 + $ echo dirty >> a + $ hg up -q 4 + abort: conflicting changes + (commit or update --clean to discard changes) + [255] + $ hg up -m 4 + merging a + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges + [1] + $ rm a.orig + +Change/delete conflict is not allowed + $ hg up -qC 3 + $ hg rm foo + $ hg up -q 4 + +Uses default value of "linear" when value is misspelled + $ echo 'updatecheck = linyar' >> .hg/hgrc + + $ revtest 'dirty cross' dirty 3 4 + abort: uncommitted changes + (commit or update --clean to discard changes) + parent=3 + M foo + +Setup for later tests + $ revtest 'none dirty linear' dirty 1 2 -c + abort: uncommitted changes + parent=1 + M foo + + $ cd .. + +Test updating to null revision + + $ hg init null-repo + $ cd null-repo + $ echo a > a + $ hg add a + $ hg ci -m a + $ hg up -qC 0 + $ echo b > b + $ hg add b + $ hg up null + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg st + A b + $ hg up -q 0 + $ hg st + A b + $ hg up -qC null + $ hg st + ? b $ cd .. Test updating with closed head
--- a/tests/test-walk.t Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/test-walk.t Sat Mar 11 13:53:14 2017 -0500 @@ -112,6 +112,74 @@ f beans/navy ../beans/navy f beans/pinto ../beans/pinto f beans/turtle ../beans/turtle + + $ hg debugwalk 'rootfilesin:' + f fennel ../fennel + f fenugreek ../fenugreek + f fiddlehead ../fiddlehead + $ hg debugwalk -I 'rootfilesin:' + f fennel ../fennel + f fenugreek ../fenugreek + f fiddlehead ../fiddlehead + $ hg debugwalk 'rootfilesin:.' + f fennel ../fennel + f fenugreek ../fenugreek + f fiddlehead ../fiddlehead + $ hg debugwalk -I 'rootfilesin:.' + f fennel ../fennel + f fenugreek ../fenugreek + f fiddlehead ../fiddlehead + $ hg debugwalk -X 'rootfilesin:' + f beans/black ../beans/black + f beans/borlotti ../beans/borlotti + f beans/kidney ../beans/kidney + f beans/navy ../beans/navy + f beans/pinto ../beans/pinto + f beans/turtle ../beans/turtle + f mammals/Procyonidae/cacomistle Procyonidae/cacomistle + f mammals/Procyonidae/coatimundi Procyonidae/coatimundi + f mammals/Procyonidae/raccoon Procyonidae/raccoon + f mammals/skunk skunk + $ hg debugwalk 'rootfilesin:fennel' + $ hg debugwalk -I 'rootfilesin:fennel' + $ hg debugwalk 'rootfilesin:skunk' + $ hg debugwalk -I 'rootfilesin:skunk' + $ hg debugwalk 'rootfilesin:beans' + f beans/black ../beans/black + f beans/borlotti ../beans/borlotti + f beans/kidney ../beans/kidney + f beans/navy ../beans/navy + f beans/pinto ../beans/pinto + f beans/turtle ../beans/turtle + $ hg debugwalk -I 'rootfilesin:beans' + f beans/black ../beans/black + f beans/borlotti ../beans/borlotti + f beans/kidney ../beans/kidney + f beans/navy ../beans/navy + f beans/pinto ../beans/pinto + f beans/turtle ../beans/turtle + $ hg debugwalk 'rootfilesin:mammals' + f mammals/skunk skunk + $ hg debugwalk -I 'rootfilesin:mammals' + f mammals/skunk skunk + $ hg debugwalk 'rootfilesin:mammals/' + f mammals/skunk skunk + $ hg debugwalk -I 'rootfilesin:mammals/' + f mammals/skunk skunk + $ hg debugwalk -X 'rootfilesin:mammals' + f beans/black ../beans/black + f beans/borlotti ../beans/borlotti + f beans/kidney ../beans/kidney + f beans/navy ../beans/navy + f beans/pinto ../beans/pinto + f beans/turtle ../beans/turtle + f fennel ../fennel + f fenugreek ../fenugreek + f fiddlehead ../fiddlehead + f mammals/Procyonidae/cacomistle Procyonidae/cacomistle + f mammals/Procyonidae/coatimundi Procyonidae/coatimundi + f mammals/Procyonidae/raccoon Procyonidae/raccoon + $ hg debugwalk . f mammals/Procyonidae/cacomistle Procyonidae/cacomistle f mammals/Procyonidae/coatimundi Procyonidae/coatimundi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-xdg.t Sat Mar 11 13:53:14 2017 -0500 @@ -0,0 +1,11 @@ +#if no-windows no-osx + + $ mkdir -p xdgconf/hg + $ echo '[ui]' > xdgconf/hg/hgrc + $ echo 'username = foobar' >> xdgconf/hg/hgrc + $ XDG_CONFIG_HOME="`pwd`/xdgconf" ; export XDG_CONFIG_HOME + $ unset HGRCPATH + $ hg config ui.username + foobar + +#endif
--- a/tests/tinyproxy.py Tue Mar 07 13:24:24 2017 -0500 +++ b/tests/tinyproxy.py Sat Mar 11 13:53:14 2017 -0500 @@ -26,6 +26,11 @@ urlparse = util.urlparse socketserver = util.socketserver +if os.environ.get('HGIPV6', '0') == '1': + family = socket.AF_INET6 +else: + family = socket.AF_INET + class ProxyHandler (httpserver.basehttprequesthandler): __base = httpserver.basehttprequesthandler __base_handle = __base.handle @@ -65,7 +70,7 @@ return 1 def do_CONNECT(self): - soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + soc = socket.socket(family, socket.SOCK_STREAM) try: if self._connect_to(self.path, soc): self.log_request(200) @@ -85,7 +90,7 @@ if scm != 'http' or fragment or not netloc: self.send_error(400, "bad url %s" % self.path) return - soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + soc = socket.socket(family, socket.SOCK_STREAM) try: if self._connect_to(netloc, soc): self.log_request()