tests/filterpyflakes.py
author Gregory Szorc <gregory.szorc@gmail.com>
Wed, 03 Oct 2018 09:43:01 -0700
changeset 40031 62160d3077cd
parent 33367 6029939f7e98
child 43076 2372284d9457
permissions -rwxr-xr-x
cborutil: change buffering strategy Profiling revealed that we were spending a lot of time on the line that was concatenating the old buffer with the incoming data when attempting to decode long byte strings, such as manifest revisions. Essentially, we were feeding N chunks of size len(X) << len(Y) into decode() and continuously allocating a new, larger buffer to hold the undecoded input. This created substantial memory churn and slowed down execution. Changing the code to aggregate pending chunks in a list until we have enough data to fully decode the next atom makes things much more efficient. I don't have exact data, but I recall the old code spending >1s on manifest fulltexts from the mozilla-unified repo. The new code doesn't significantly appear in profile output. Differential Revision: https://phab.mercurial-scm.org/D4854

#!/usr/bin/env python

# Filter output by pyflakes to control which warnings we check

from __future__ import absolute_import, print_function

import re
import sys

lines = []
for line in sys.stdin:
    # We blacklist tests that are too noisy for us
    pats = [
        r"undefined name 'WindowsError'",
        r"redefinition of unused '[^']+' from line",
        # for cffi, allow re-exports from pure.*
        r"cffi/[^:]*:.*\bimport \*' used",
        r"cffi/[^:]*:.*\*' imported but unused",
    ]

    keep = True
    for pat in pats:
        if re.search(pat, line):
            keep = False
            break # pattern matches
    if keep:
        fn = line.split(':', 1)[0]
        f = open(fn)
        data = f.read()
        f.close()
        if 'no-' 'check-code' in data:
            continue
        lines.append(line)

for line in lines:
    sys.stdout.write(line)
print()