view tests/killdaemons.py @ 44363:f7459da77f23

nodemap: introduce an option to use mmap to read the nodemap mapping The performance and memory benefit is much greater if we don't have to copy all the data in memory for each information. So we introduce an option (on by default) to read the data using mmap. This changeset is the last one definition the API for index support nodemap data. (they have to be able to use the mmaping). Below are some benchmark comparing the best we currently have in 5.3 with the final step of this series (using the persistent nodemap implementation in Rust). The benchmark run `hg perfindex` with various revset and the following variants: Before: * do not use the persistent nodemap * use the CPython implementation of the index for nodemap * use mmapping of the changelog index After: * use the MixedIndex Rust code, with the NodeTree object for nodemap access (still in review) * use the persistent nodemap data from disk * access the persistent nodemap data through mmap * use mmapping of the changelog index The persistent nodemap greatly speed up most operation on very large repositories. Some of the previously very fast lookup end up a bit slower because the persistent nodemap has to be setup. However the absolute slowdown is very small and won't matters in the big picture. Here are some numbers (in seconds) for the reference copy of mozilla-try: Revset Before After abs-change speedup -10000: 0.004622 0.005532 0.000910 × 0.83 -10: 0.000050 0.000132 0.000082 × 0.37 tip 0.000052 0.000085 0.000033 × 0.61 0 + (-10000:) 0.028222 0.005337 -0.022885 × 5.29 0 0.023521 0.000084 -0.023437 × 280.01 (-10000:) + 0 0.235539 0.005308 -0.230231 × 44.37 (-10:) + :9 0.232883 0.000180 -0.232703 ×1293.79 (-10000:) + (:99) 0.238735 0.005358 -0.233377 × 44.55 :99 + (-10000:) 0.317942 0.005593 -0.312349 × 56.84 :9 + (-10:) 0.313372 0.000179 -0.313193 ×1750.68 :9 0.316450 0.000143 -0.316307 ×2212.93 On smaller repositories, the cost of nodemap related operation is not as big, so the win is much more modest. Yet it helps shaving a handful of millisecond here and there. Here are some numbers (in seconds) for the reference copy of mercurial: Revset Before After abs-change speedup -10: 0.000065 0.000097 0.000032 × 0.67 tip 0.000063 0.000078 0.000015 × 0.80 0 0.000561 0.000079 -0.000482 × 7.10 -10000: 0.004609 0.003648 -0.000961 × 1.26 0 + (-10000:) 0.005023 0.003715 -0.001307 × 1.35 (-10:) + :9 0.002187 0.000108 -0.002079 ×20.25 (-10000:) + 0 0.006252 0.003716 -0.002536 × 1.68 (-10000:) + (:99) 0.006367 0.003707 -0.002660 × 1.71 :9 + (-10:) 0.003846 0.000110 -0.003736 ×34.96 :9 0.003854 0.000099 -0.003755 ×38.92 :99 + (-10000:) 0.007644 0.003778 -0.003866 × 2.02 Differential Revision: https://phab.mercurial-scm.org/D7894
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Tue, 11 Feb 2020 11:18:52 +0100
parents 2372284d9457
children c102b704edb5
line wrap: on
line source

#!/usr/bin/env python

from __future__ import absolute_import
import errno
import os
import signal
import sys
import time

if os.name == 'nt':
    import ctypes

    _BOOL = ctypes.c_long
    _DWORD = ctypes.c_ulong
    _UINT = ctypes.c_uint
    _HANDLE = ctypes.c_void_p

    ctypes.windll.kernel32.CloseHandle.argtypes = [_HANDLE]
    ctypes.windll.kernel32.CloseHandle.restype = _BOOL

    ctypes.windll.kernel32.GetLastError.argtypes = []
    ctypes.windll.kernel32.GetLastError.restype = _DWORD

    ctypes.windll.kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD]
    ctypes.windll.kernel32.OpenProcess.restype = _HANDLE

    ctypes.windll.kernel32.TerminateProcess.argtypes = [_HANDLE, _UINT]
    ctypes.windll.kernel32.TerminateProcess.restype = _BOOL

    ctypes.windll.kernel32.WaitForSingleObject.argtypes = [_HANDLE, _DWORD]
    ctypes.windll.kernel32.WaitForSingleObject.restype = _DWORD

    def _check(ret, expectederr=None):
        if ret == 0:
            winerrno = ctypes.GetLastError()
            if winerrno == expectederr:
                return True
            raise ctypes.WinError(winerrno)

    def kill(pid, logfn, tryhard=True):
        logfn('# Killing daemon process %d' % pid)
        PROCESS_TERMINATE = 1
        PROCESS_QUERY_INFORMATION = 0x400
        SYNCHRONIZE = 0x00100000
        WAIT_OBJECT_0 = 0
        WAIT_TIMEOUT = 258
        WAIT_FAILED = _DWORD(0xFFFFFFFF).value
        handle = ctypes.windll.kernel32.OpenProcess(
            PROCESS_TERMINATE | SYNCHRONIZE | PROCESS_QUERY_INFORMATION,
            False,
            pid,
        )
        if handle is None:
            _check(0, 87)  # err 87 when process not found
            return  # process not found, already finished
        try:
            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
            if r == WAIT_OBJECT_0:
                pass  # terminated, but process handle still available
            elif r == WAIT_TIMEOUT:
                _check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
            elif r == WAIT_FAILED:
                _check(0)  # err stored in GetLastError()

            # TODO?: forcefully kill when timeout
            #        and ?shorter waiting time? when tryhard==True
            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
            # timeout = 100 ms
            if r == WAIT_OBJECT_0:
                pass  # process is terminated
            elif r == WAIT_TIMEOUT:
                logfn('# Daemon process %d is stuck')
            elif r == WAIT_FAILED:
                _check(0)  # err stored in GetLastError()
        except:  # re-raises
            ctypes.windll.kernel32.CloseHandle(handle)  # no _check, keep error
            raise
        _check(ctypes.windll.kernel32.CloseHandle(handle))


else:

    def kill(pid, logfn, tryhard=True):
        try:
            os.kill(pid, 0)
            logfn('# Killing daemon process %d' % pid)
            os.kill(pid, signal.SIGTERM)
            if tryhard:
                for i in range(10):
                    time.sleep(0.05)
                    os.kill(pid, 0)
            else:
                time.sleep(0.1)
                os.kill(pid, 0)
            logfn('# Daemon process %d is stuck - really killing it' % pid)
            os.kill(pid, signal.SIGKILL)
        except OSError as err:
            if err.errno != errno.ESRCH:
                raise


def killdaemons(pidfile, tryhard=True, remove=False, logfn=None):
    if not logfn:
        logfn = lambda s: s
    # Kill off any leftover daemon processes
    try:
        pids = []
        with open(pidfile) as fp:
            for line in fp:
                try:
                    pid = int(line)
                    if pid <= 0:
                        raise ValueError
                except ValueError:
                    logfn(
                        '# Not killing daemon process %s - invalid pid'
                        % line.rstrip()
                    )
                    continue
                pids.append(pid)
        for pid in pids:
            kill(pid, logfn, tryhard)
        if remove:
            os.unlink(pidfile)
    except IOError:
        pass


if __name__ == '__main__':
    if len(sys.argv) > 1:
        (path,) = sys.argv[1:]
    else:
        path = os.environ["DAEMON_PIDS"]

    killdaemons(path, remove=True)