view hgext/remotefilelog/connectionpool.py @ 43044:f9d35f01b8b3

setup: build extensions in parallel by default The build_ext distutils command in Python 3.5+ has a "parallel" option that controls whether to build extensions in parallel. It is disabled by default (None) and can be set to an integer value for number of cores or True to indicate use all available CPU cores. This commit changes our build_ext command override to set "parallel" to True unless a value has been provided by the caller. On my machine, this makes `python setup.py build_ext` 1-4s faster. It is worth noting that at this time, each individual source file constituting the extension is still built serially. For Mercurial, this means that we can't build faster than the slowest-to-build extension, which is the zstd extension by a long shot. This means that setup.py is still not very efficient at utilizing multiple cores. But we're better than before. Differential Revision: https://phab.mercurial-scm.org/D6923 # no-check-commit because of foo_bar naming
author Gregory Szorc <gregory.szorc@gmail.com>
date Mon, 30 Sep 2019 17:26:41 -0700
parents 3a333a582d7b
children 2372284d9457
line wrap: on
line source

# connectionpool.py - class for pooling peer connections for reuse
#
# Copyright 2017 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

from mercurial import (
    extensions,
    hg,
    sshpeer,
    util,
)

_sshv1peer = sshpeer.sshv1peer

class connectionpool(object):
    def __init__(self, repo):
        self._repo = repo
        self._pool = dict()

    def get(self, path):
        pathpool = self._pool.get(path)
        if pathpool is None:
            pathpool = list()
            self._pool[path] = pathpool

        conn = None
        if len(pathpool) > 0:
            try:
                conn = pathpool.pop()
                peer = conn.peer
                # If the connection has died, drop it
                if isinstance(peer, _sshv1peer):
                    if peer._subprocess.poll() is not None:
                        conn = None
            except IndexError:
                pass

        if conn is None:
            def _cleanup(orig):
                # close pipee first so peer.cleanup reading it won't deadlock,
                # if there are other processes with pipeo open (i.e. us).
                peer = orig.im_self
                if util.safehasattr(peer, 'pipee'):
                    peer.pipee.close()
                return orig()

            peer = hg.peer(self._repo.ui, {}, path)
            if util.safehasattr(peer, 'cleanup'):
                extensions.wrapfunction(peer, 'cleanup', _cleanup)

            conn = connection(pathpool, peer)

        return conn

    def close(self):
        for pathpool in self._pool.itervalues():
            for conn in pathpool:
                conn.close()
            del pathpool[:]

class connection(object):
    def __init__(self, pool, peer):
        self._pool = pool
        self.peer = peer

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        # Only add the connection back to the pool if there was no exception,
        # since an exception could mean the connection is not in a reusable
        # state.
        if type is None:
            self._pool.append(self)
        else:
            self.close()

    def close(self):
        if util.safehasattr(self.peer, 'cleanup'):
            self.peer.cleanup()