view tests/f @ 24461:05ccfe6763f1

osutil: use getdirentriesattr on OS X if possible This is a significant win for large repositories on OS X, especially with a cold cache. Unfortunately we need to keep the lstat-based implementation around for two reasons: - Not all filesystems support this call. - There's an edge case in which it's best to fall back to avoid a retry loop. More about this in the comments. The below tests are all performed on a Mac with an SSD running OS X 10.9, on a repository with over 200k files. The results are best of 5 with simulated best-effort conditions. The gains with a hot cache are pretty impressive: 'hg status' goes from 5.18 seconds to 3.79 seconds. However, a repository that large will probably already be using something like hgwatchman [1], which helps much more (for this repo, 'hg status' with hgwatchman is approximately 1 second). Where this really helps is when the cache is cold [2]: hg status goes from 31.0 seconds to 9.66. See http://lists.apple.com/archives/filesystem-dev/2014/Dec/msg00002.html for some more discussion about this function. This is based on a patch by Sean Farley <sean@farley.io>. [1] https://bitbucket.org/facebook/hgwatchman [2] There appears to be no easy way to clear the file cache (aka "vnodes") on OS X short of rebooting. purge(8) purportedly does that but in my testing had little effect. The workaround I came up with was to assume that vnode eviction was LRU, make sure the kern.maxvnodes sysctl is smaller than the size of the repository, then make sure we'd always miss the cache by running 'hg status' in another clone of the repository before running it in the test repository.
author Siddharth Agarwal <sid0@fb.com>
date Wed, 25 Mar 2015 15:55:31 -0700
parents 7d0aa6269ece
children 6686ae524f94
line wrap: on
line source

#!/usr/bin/env python

"""
Utility for inspecting files in various ways.

This tool is like the collection of tools found in a unix environment but are
cross platform and stable and suitable for our needs in the test suite.

This can be used instead of tools like:
  [
  dd
  find
  head
  hexdump
  ls
  md5sum
  readlink
  sha1sum
  stat
  tail
  test
  readlink.py
  md5sum.py
"""

import sys, os, errno, re, glob, optparse

def visit(opts, filenames, outfile):
    """Process filenames in the way specified in opts, writing output to
    outfile."""
    for f in sorted(filenames):
        isstdin = f == '-'
        if not isstdin and not os.path.lexists(f):
            outfile.write('%s: file not found\n' % f)
            continue
        quiet = opts.quiet and not opts.recurse or isstdin
        isdir = os.path.isdir(f)
        islink = os.path.islink(f)
        isfile = os.path.isfile(f) and not islink
        dirfiles = None
        content = None
        facts = []
        if isfile:
            if opts.type:
                facts.append('file')
            if opts.hexdump or opts.dump or opts.md5:
                content = file(f).read()
        elif islink:
            if opts.type:
                facts.append('link')
            content = os.readlink(f)
        elif isstdin:
            content = sys.stdin.read()
            if opts.size:
                facts.append('size=%s' % len(content))
        elif isdir:
            if opts.recurse or opts.type:
                dirfiles = glob.glob(f + '/*')
                facts.append('directory with %s files' % len(dirfiles))
        elif opts.type:
            facts.append('type unknown')
        if not isstdin:
            stat = os.lstat(f)
            if opts.size and not isdir:
                facts.append('size=%s' % stat.st_size)
            if opts.mode and not islink:
                facts.append('mode=%o' % (stat.st_mode & 0777))
            if opts.links:
                facts.append('links=%s' % stat.st_nlink)
            if opts.newer:
                # mtime might be in whole seconds so newer file might be same
                if stat.st_mtime >= os.stat(opts.newer).st_mtime:
                    facts.append('newer than %s' % opts.newer)
                else:
                    facts.append('older than %s' % opts.newer)
        if opts.md5 and content is not None:
            try:
                from hashlib import md5
            except ImportError:
                from md5 import md5
            facts.append('md5=%s' % md5(content).hexdigest()[:opts.bytes])
        if opts.sha1 and content is not None:
            try:
                from hashlib import sha1
            except ImportError:
                from sha import sha as sha1
            facts.append('sha1=%s' % sha1(content).hexdigest()[:opts.bytes])
        if isstdin:
            outfile.write(', '.join(facts) + '\n')
        elif facts:
            outfile.write('%s: %s\n' % (f, ', '.join(facts)))
        elif not quiet:
            outfile.write('%s:\n' % f)
        if content is not None:
            chunk = content
            if not islink:
                if opts.lines:
                    if opts.lines >= 0:
                        chunk = ''.join(chunk.splitlines(True)[:opts.lines])
                    else:
                        chunk = ''.join(chunk.splitlines(True)[opts.lines:])
                if opts.bytes:
                    if opts.bytes >= 0:
                        chunk = chunk[:opts.bytes]
                    else:
                        chunk = chunk[opts.bytes:]
            if opts.hexdump:
                for i in range(0, len(chunk), 16):
                    s = chunk[i:i+16]
                    outfile.write('%04x: %-47s |%s|\n' %
                                  (i, ' '.join('%02x' % ord(c) for c in s),
                                   re.sub('[^ -~]', '.', s)))
            if opts.dump:
                if not quiet:
                    outfile.write('>>>\n')
                outfile.write(chunk)
                if not quiet:
                    if chunk.endswith('\n'):
                        outfile.write('<<<\n')
                    else:
                        outfile.write('\n<<< no trailing newline\n')
        if opts.recurse and dirfiles:
            assert not isstdin
            visit(opts, dirfiles, outfile)

if __name__ == "__main__":
    parser = optparse.OptionParser("%prog [options] [filenames]")
    parser.add_option("-t", "--type", action="store_true",
                      help="show file type (file or directory)")
    parser.add_option("-m", "--mode", action="store_true",
                      help="show file mode")
    parser.add_option("-l", "--links", action="store_true",
                      help="show number of links")
    parser.add_option("-s", "--size", action="store_true",
                      help="show size of file")
    parser.add_option("-n", "--newer", action="store",
                      help="check if file is newer (or same)")
    parser.add_option("-r", "--recurse", action="store_true",
                      help="recurse into directories")
    parser.add_option("-S", "--sha1", action="store_true",
                      help="show sha1 hash of the content")
    parser.add_option("-M", "--md5", action="store_true",
                      help="show md5 hash of the content")
    parser.add_option("-D", "--dump", action="store_true",
                      help="dump file content")
    parser.add_option("-H", "--hexdump", action="store_true",
                      help="hexdump file content")
    parser.add_option("-B", "--bytes", type="int",
                      help="number of characters to dump")
    parser.add_option("-L", "--lines", type="int",
                      help="number of lines to dump")
    parser.add_option("-q", "--quiet", action="store_true",
                      help="no default output")
    (opts, filenames) = parser.parse_args(sys.argv[1:])
    if not filenames:
        filenames = ['-']

    visit(opts, filenames, sys.stdout)