tests/md5sum.py
author Gregory Szorc <gregory.szorc@gmail.com>
Wed, 03 Oct 2018 09:43:01 -0700
changeset 40030 62160d3077cd
parent 33873 904bc1dc2694
child 43076 2372284d9457
permissions -rwxr-xr-x
cborutil: change buffering strategy Profiling revealed that we were spending a lot of time on the line that was concatenating the old buffer with the incoming data when attempting to decode long byte strings, such as manifest revisions. Essentially, we were feeding N chunks of size len(X) << len(Y) into decode() and continuously allocating a new, larger buffer to hold the undecoded input. This created substantial memory churn and slowed down execution. Changing the code to aggregate pending chunks in a list until we have enough data to fully decode the next atom makes things much more efficient. I don't have exact data, but I recall the old code spending >1s on manifest fulltexts from the mozilla-unified repo. The new code doesn't significantly appear in profile output. Differential Revision: https://phab.mercurial-scm.org/D4854

#!/usr/bin/env python
#
# Based on python's Tools/scripts/md5sum.py
#
# This software may be used and distributed according to the terms
# of the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2, which is
# GPL-compatible.

from __future__ import absolute_import

import hashlib
import os
import sys

try:
    import msvcrt
    msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
    msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
    pass

for filename in sys.argv[1:]:
    try:
        fp = open(filename, 'rb')
    except IOError as msg:
        sys.stderr.write('%s: Can\'t open: %s\n' % (filename, msg))
        sys.exit(1)

    m = hashlib.md5()
    try:
        for data in iter(lambda: fp.read(8192), b''):
            m.update(data)
    except IOError as msg:
        sys.stderr.write('%s: I/O error: %s\n' % (filename, msg))
        sys.exit(1)
    sys.stdout.write('%s  %s\n' % (m.hexdigest(), filename))

sys.exit(0)