view mercurial/cffi/osutil.py @ 39270:37e56607cbb9

lfs: add a progress bar when searching for blobs to upload The search itself can take an extreme amount of time if there are a lot of revisions involved. I've got a local repo that took 6 minutes to push 1850 commits, and 60% of that time was spent here (there are ~70K files): \ 58.1% wrapper.py: extractpointers line 297: pointers = extractpointers(... | 57.7% wrapper.py: pointersfromctx line 352: for p in pointersfromctx(ct... | 57.4% wrapper.py: pointerfromctx line 397: p = pointerfromctx(ctx, f, ... \ 38.7% context.py: __contains__ line 368: if f not in ctx: | 38.7% util.py: __get__ line 82: return key in self._manifest | 38.7% context.py: _manifest line 1416: result = self.func(obj) | 38.7% manifest.py: read line 472: return self._manifestctx.re... \ 25.6% revlog.py: revision line 1562: text = rl.revision(self._node) \ 12.8% revlog.py: _chunks line 2217: bins = self._chunks(chain, ... | 12.0% revlog.py: decompressline 2112: ladd(decomp(buffer(data, ch... \ 7.8% revlog.py: checkhash line 2232: self.checkhash(text, node, ... | 7.8% revlog.py: hash line 2315: if node != self.hash(text, ... | 7.8% revlog.py: hash line 2242: return hash(text, p1, p2) \ 12.0% manifest.py: __init__ line 1565: self._data = manifestdict(t... \ 16.8% context.py: filenode line 378: if not _islfs(fctx.filelog(... | 15.7% util.py: __get__ line 706: return self._filelog | 14.8% context.py: _filelog line 1416: result = self.func(obj) | 14.8% localrepo.py: file line 629: return self._repo.file(self... | 14.8% filelog.py: __init__ line 1134: return filelog.filelog(self... | 14.5% revlog.py: __init__ line 24: censorable=True)
author Matt Harbison <matt_harbison@yahoo.com>
date Fri, 24 Aug 2018 17:45:46 -0400
parents dacfcdd8b94e
children 2372284d9457
line wrap: on
line source

# osutil.py - CFFI version of osutil.c
#
# Copyright 2016 Maciej Fijalkowski <fijall@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

import os
import stat as statmod

from ..pure.osutil import *

from .. import (
    pycompat,
)

if pycompat.isdarwin:
    from . import _osutil

    ffi = _osutil.ffi
    lib = _osutil.lib

    listdir_batch_size = 4096
    # tweakable number, only affects performance, which chunks
    # of bytes do we get back from getattrlistbulk

    attrkinds = [None] * 20 # we need the max no for enum VXXX, 20 is plenty

    attrkinds[lib.VREG] = statmod.S_IFREG
    attrkinds[lib.VDIR] = statmod.S_IFDIR
    attrkinds[lib.VLNK] = statmod.S_IFLNK
    attrkinds[lib.VBLK] = statmod.S_IFBLK
    attrkinds[lib.VCHR] = statmod.S_IFCHR
    attrkinds[lib.VFIFO] = statmod.S_IFIFO
    attrkinds[lib.VSOCK] = statmod.S_IFSOCK

    class stat_res(object):
        def __init__(self, st_mode, st_mtime, st_size):
            self.st_mode = st_mode
            self.st_mtime = st_mtime
            self.st_size = st_size

    tv_sec_ofs = ffi.offsetof("struct timespec", "tv_sec")
    buf = ffi.new("char[]", listdir_batch_size)

    def listdirinternal(dfd, req, stat, skip):
        ret = []
        while True:
            r = lib.getattrlistbulk(dfd, req, buf, listdir_batch_size, 0)
            if r == 0:
                break
            if r == -1:
                raise OSError(ffi.errno, os.strerror(ffi.errno))
            cur = ffi.cast("val_attrs_t*", buf)
            for i in range(r):
                lgt = cur.length
                assert lgt == ffi.cast('uint32_t*', cur)[0]
                ofs = cur.name_info.attr_dataoffset
                str_lgt = cur.name_info.attr_length
                base_ofs = ffi.offsetof('val_attrs_t', 'name_info')
                name = str(ffi.buffer(ffi.cast("char*", cur) + base_ofs + ofs,
                           str_lgt - 1))
                tp = attrkinds[cur.obj_type]
                if name == "." or name == "..":
                    continue
                if skip == name and tp == statmod.S_ISDIR:
                    return []
                if stat:
                    mtime = cur.mtime.tv_sec
                    mode = (cur.accessmask & ~lib.S_IFMT)| tp
                    ret.append((name, tp, stat_res(st_mode=mode, st_mtime=mtime,
                                st_size=cur.datalength)))
                else:
                    ret.append((name, tp))
                cur = ffi.cast("val_attrs_t*", int(ffi.cast("intptr_t", cur))
                    + lgt)
        return ret

    def listdir(path, stat=False, skip=None):
        req = ffi.new("struct attrlist*")
        req.bitmapcount = lib.ATTR_BIT_MAP_COUNT
        req.commonattr = (lib.ATTR_CMN_RETURNED_ATTRS |
                          lib.ATTR_CMN_NAME |
                          lib.ATTR_CMN_OBJTYPE |
                          lib.ATTR_CMN_ACCESSMASK |
                          lib.ATTR_CMN_MODTIME)
        req.fileattr = lib.ATTR_FILE_DATALENGTH
        dfd = lib.open(path, lib.O_RDONLY, 0)
        if dfd == -1:
            raise OSError(ffi.errno, os.strerror(ffi.errno))

        try:
            ret = listdirinternal(dfd, req, stat, skip)
        finally:
            try:
                lib.close(dfd)
            except BaseException:
                pass # we ignore all the errors from closing, not
                # much we can do about that
        return ret