dirstate.walk: don't keep track of normalized files in parallel
Rev 2bb13f2b778c changed the semantics of the work list to store (normalized,
non-normalized) pairs. All the tuple creation and destruction hurts perf: on a
large repo on OS X, 'hg status' went from 3.62 seconds to 3.78.
It also is unnecessary in most cases:
- it is clearly unnecessary on case-sensitive filesystems.
- it is also unnecessary when filenames have been read off of disk rather than
being supplied by the user.
The only case where the non-normalized case is required at all is when the file
is unknown.
To eliminate most of the perf cost, keep trace of whether the directory needs
to be normalized at all with a boolean called 'alreadynormed'. Pay the cost of
directory normalization only when necessary.
For the above large repo, 'hg status' goes to 3.63 seconds.
#!/usr/bin/env python
"""
Utility for inspecting files in various ways.
This tool is like the collection of tools found in a unix environment but are
cross platform and stable and suitable for our needs in the test suite.
This can be used instead of tools like:
[
dd
find
head
hexdump
ls
md5sum
readlink
sha1sum
stat
tail
test
readlink.py
md5sum.py
"""
import sys, os, errno, re, glob, optparse
def visit(opts, filenames, outfile):
"""Process filenames in the way specified in opts, writing output to
outfile."""
for f in sorted(filenames):
isstdin = f == '-'
if not isstdin and not os.path.lexists(f):
outfile.write('%s: file not found\n' % f)
continue
quiet = opts.quiet and not opts.recurse or isstdin
isdir = os.path.isdir(f)
islink = os.path.islink(f)
isfile = os.path.isfile(f) and not islink
dirfiles = None
content = None
facts = []
if isfile:
if opts.type:
facts.append('file')
if opts.hexdump or opts.dump or opts.md5:
content = file(f).read()
elif islink:
if opts.type:
facts.append('link')
content = os.readlink(f)
elif isstdin:
content = sys.stdin.read()
if opts.size:
facts.append('size=%s' % len(content))
elif isdir:
if opts.recurse or opts.type:
dirfiles = glob.glob(f + '/*')
facts.append('directory with %s files' % len(dirfiles))
elif opts.type:
facts.append('type unknown')
if not isstdin:
stat = os.lstat(f)
if opts.size and not isdir:
facts.append('size=%s' % stat.st_size)
if opts.mode and not islink:
facts.append('mode=%o' % (stat.st_mode & 0777))
if opts.links:
facts.append('links=%s' % stat.st_nlink)
if opts.newer:
# mtime might be in whole seconds so newer file might be same
if stat.st_mtime >= os.stat(opts.newer).st_mtime:
facts.append('newer than %s' % opts.newer)
else:
facts.append('older than %s' % opts.newer)
if opts.md5 and content is not None:
try:
from hashlib import md5
except ImportError:
from md5 import md5
facts.append('md5=%s' % md5(content).hexdigest()[:opts.bytes])
if opts.sha1 and content is not None:
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
facts.append('sha1=%s' % sha1(content).hexdigest()[:opts.bytes])
if isstdin:
outfile.write(', '.join(facts) + '\n')
elif facts:
outfile.write('%s: %s\n' % (f, ', '.join(facts)))
elif not quiet:
outfile.write('%s:\n' % f)
if content is not None:
chunk = content
if not islink:
if opts.lines:
if opts.lines >= 0:
chunk = ''.join(chunk.splitlines(True)[:opts.lines])
else:
chunk = ''.join(chunk.splitlines(True)[opts.lines:])
if opts.bytes:
if opts.bytes >= 0:
chunk = chunk[:opts.bytes]
else:
chunk = chunk[opts.bytes:]
if opts.hexdump:
for i in range(0, len(chunk), 16):
s = chunk[i:i+16]
outfile.write('%04x: %-47s |%s|\n' %
(i, ' '.join('%02x' % ord(c) for c in s),
re.sub('[^ -~]', '.', s)))
if opts.dump:
if not quiet:
outfile.write('>>>\n')
outfile.write(chunk)
if not quiet:
if chunk.endswith('\n'):
outfile.write('<<<\n')
else:
outfile.write('\n<<< no trailing newline\n')
if opts.recurse and dirfiles:
assert not isstdin
visit(opts, dirfiles, outfile)
if __name__ == "__main__":
parser = optparse.OptionParser("%prog [options] [filenames]")
parser.add_option("-t", "--type", action="store_true",
help="show file type (file or directory)")
parser.add_option("-m", "--mode", action="store_true",
help="show file mode")
parser.add_option("-l", "--links", action="store_true",
help="show number of links")
parser.add_option("-s", "--size", action="store_true",
help="show size of file")
parser.add_option("-n", "--newer", action="store",
help="check if file is newer (or same)")
parser.add_option("-r", "--recurse", action="store_true",
help="recurse into directories")
parser.add_option("-S", "--sha1", action="store_true",
help="show sha1 hash of the content")
parser.add_option("-M", "--md5", action="store_true",
help="show md5 hash of the content")
parser.add_option("-D", "--dump", action="store_true",
help="dump file content")
parser.add_option("-H", "--hexdump", action="store_true",
help="hexdump file content")
parser.add_option("-B", "--bytes", type="int",
help="number of characters to dump")
parser.add_option("-L", "--lines", type="int",
help="number of lines to dump")
parser.add_option("-q", "--quiet", action="store_true",
help="no default output")
(opts, filenames) = parser.parse_args(sys.argv[1:])
if not filenames:
filenames = ['-']
visit(opts, filenames, sys.stdout)