view mercurial/cffi/osutil.py @ 39872:733db72f0f54

revlog: move revision verification out of verify File revision verification is performing low-level checks of file storage, namely that flags are appropriate and revision data can be resolved. Since these checks are somewhat revlog-specific and may not be appropriate for alternate storage backends, this commit moves those checks from verify.py to revlog.py. Because we're now emitting warnings/errors that apply to specific revisions, we taught the iverifyproblem interface to expose the problematic node and to report this node in verify output. This was necessary to prevent unwanted test changes. After this change, revlog.verifyintegrity() and file verify code in verify.py both iterate over revisions and resolve their fulltext. But they do so in separate loops. (verify.py needs to resolve fulltexts as part of calling renamed() - at least when using revlogs.) This should add overhead. But on the mozilla-unified repo: $ hg verify before: time: real 700.640 secs (user 585.520+0.000 sys 23.480+0.000) after: time: real 682.380 secs (user 570.370+0.000 sys 22.240+0.000) I'm not sure what's going on. Maybe avoiding the filelog attribute proxies shaved off enough time to offset the losses? Maybe fulltext resolution has less overhead than I thought? I've left a comment indicating the potential for optimization. But because it doesn't produce a performance regression on a large repository, I'm not going to worry about it. Differential Revision: https://phab.mercurial-scm.org/D4745
author Gregory Szorc <gregory.szorc@gmail.com>
date Mon, 24 Sep 2018 11:27:47 -0700
parents dacfcdd8b94e
children 2372284d9457
line wrap: on
line source

# osutil.py - CFFI version of osutil.c
#
# Copyright 2016 Maciej Fijalkowski <fijall@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

import os
import stat as statmod

from ..pure.osutil import *

from .. import (
    pycompat,
)

if pycompat.isdarwin:
    from . import _osutil

    ffi = _osutil.ffi
    lib = _osutil.lib

    listdir_batch_size = 4096
    # tweakable number, only affects performance, which chunks
    # of bytes do we get back from getattrlistbulk

    attrkinds = [None] * 20 # we need the max no for enum VXXX, 20 is plenty

    attrkinds[lib.VREG] = statmod.S_IFREG
    attrkinds[lib.VDIR] = statmod.S_IFDIR
    attrkinds[lib.VLNK] = statmod.S_IFLNK
    attrkinds[lib.VBLK] = statmod.S_IFBLK
    attrkinds[lib.VCHR] = statmod.S_IFCHR
    attrkinds[lib.VFIFO] = statmod.S_IFIFO
    attrkinds[lib.VSOCK] = statmod.S_IFSOCK

    class stat_res(object):
        def __init__(self, st_mode, st_mtime, st_size):
            self.st_mode = st_mode
            self.st_mtime = st_mtime
            self.st_size = st_size

    tv_sec_ofs = ffi.offsetof("struct timespec", "tv_sec")
    buf = ffi.new("char[]", listdir_batch_size)

    def listdirinternal(dfd, req, stat, skip):
        ret = []
        while True:
            r = lib.getattrlistbulk(dfd, req, buf, listdir_batch_size, 0)
            if r == 0:
                break
            if r == -1:
                raise OSError(ffi.errno, os.strerror(ffi.errno))
            cur = ffi.cast("val_attrs_t*", buf)
            for i in range(r):
                lgt = cur.length
                assert lgt == ffi.cast('uint32_t*', cur)[0]
                ofs = cur.name_info.attr_dataoffset
                str_lgt = cur.name_info.attr_length
                base_ofs = ffi.offsetof('val_attrs_t', 'name_info')
                name = str(ffi.buffer(ffi.cast("char*", cur) + base_ofs + ofs,
                           str_lgt - 1))
                tp = attrkinds[cur.obj_type]
                if name == "." or name == "..":
                    continue
                if skip == name and tp == statmod.S_ISDIR:
                    return []
                if stat:
                    mtime = cur.mtime.tv_sec
                    mode = (cur.accessmask & ~lib.S_IFMT)| tp
                    ret.append((name, tp, stat_res(st_mode=mode, st_mtime=mtime,
                                st_size=cur.datalength)))
                else:
                    ret.append((name, tp))
                cur = ffi.cast("val_attrs_t*", int(ffi.cast("intptr_t", cur))
                    + lgt)
        return ret

    def listdir(path, stat=False, skip=None):
        req = ffi.new("struct attrlist*")
        req.bitmapcount = lib.ATTR_BIT_MAP_COUNT
        req.commonattr = (lib.ATTR_CMN_RETURNED_ATTRS |
                          lib.ATTR_CMN_NAME |
                          lib.ATTR_CMN_OBJTYPE |
                          lib.ATTR_CMN_ACCESSMASK |
                          lib.ATTR_CMN_MODTIME)
        req.fileattr = lib.ATTR_FILE_DATALENGTH
        dfd = lib.open(path, lib.O_RDONLY, 0)
        if dfd == -1:
            raise OSError(ffi.errno, os.strerror(ffi.errno))

        try:
            ret = listdirinternal(dfd, req, stat, skip)
        finally:
            try:
                lib.close(dfd)
            except BaseException:
                pass # we ignore all the errors from closing, not
                # much we can do about that
        return ret