view tests/killdaemons.py @ 33289:abd7dedbaa36

sparse: vendor Facebook-developed extension Facebook has developed an extension to enable "sparse" checkouts - a working directory with a subset of files. This feature is a critical component in enabling repositories to scale to infinite number of files while retaining reasonable performance. It's worth noting that sparse checkout is only one possible solution to this problem: another is virtual filesystems that realize files on first access. But given that virtual filesystems may not be accessible to all users, sparse checkout is necessary as a fallback. Per mailing list discussion at https://www.mercurial-scm.org/pipermail/mercurial-devel/2017-March/095868.html we want to add sparse checkout to the Mercurial distribution via roughly the following mechanism: 1. Vendor extension as-is with minimal modifications (this patch) 2. Refactor extension so it is more clearly experimental and inline with Mercurial practices 3. Move code from extension into core where possible 4. Drop experimental labeling and/or move feature into core after sign-off from narrow clone feature owners This commit essentially copies the sparse extension and tests from revision 71e0a2aeca92a4078fe1b8c76e32c88ff1929737 of the https://bitbucket.org/facebook/hg-experimental repository. A list of modifications made as part of vendoring is as follows: * "EXPERIMENTAL" added to module docstring * Imports were changed to match Mercurial style conventions * "testedwith" value was updated to core Mercurial special value and comment boilerplate was inserted * A "clone_sparse" function was renamed to "clonesparse" to appease the style checker * Paths to the sparse extension in tests reflect built-in location * test-sparse-extensions.t was renamed to test-sparse-fsmonitor.t and references to "simplecache" were removed. The test always skips because it isn't trivial to run it given the way we currently run fsmonitor tests * A double empty line was removed from test-sparse-profiles.t There are aspects of the added code that are obviously not ideal. The goal is to make a minimal number of modifications as part of the vendoring to make it easier to track changes from the original implementation. Refactoring will occur in subsequent patches.
author Gregory Szorc <gregory.szorc@gmail.com>
date Sat, 01 Jul 2017 10:43:29 -0700
parents ed1f376090cd
children 89793289c891
line wrap: on
line source

#!/usr/bin/env python

from __future__ import absolute_import
import errno
import os
import signal
import sys
import time

if os.name =='nt':
    import ctypes

    _BOOL = ctypes.c_long
    _DWORD = ctypes.c_ulong
    _UINT = ctypes.c_uint
    _HANDLE = ctypes.c_void_p

    ctypes.windll.kernel32.CloseHandle.argtypes = [_HANDLE]
    ctypes.windll.kernel32.CloseHandle.restype = _BOOL

    ctypes.windll.kernel32.GetLastError.argtypes = []
    ctypes.windll.kernel32.GetLastError.restype = _DWORD

    ctypes.windll.kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD]
    ctypes.windll.kernel32.OpenProcess.restype = _HANDLE

    ctypes.windll.kernel32.TerminateProcess.argtypes = [_HANDLE, _UINT]
    ctypes.windll.kernel32.TerminateProcess.restype = _BOOL

    ctypes.windll.kernel32.WaitForSingleObject.argtypes = [_HANDLE, _DWORD]
    ctypes.windll.kernel32.WaitForSingleObject.restype = _DWORD

    def _check(ret, expectederr=None):
        if ret == 0:
            winerrno = ctypes.GetLastError()
            if winerrno == expectederr:
                return True
            raise ctypes.WinError(winerrno)

    def kill(pid, logfn, tryhard=True):
        logfn('# Killing daemon process %d' % pid)
        PROCESS_TERMINATE = 1
        PROCESS_QUERY_INFORMATION = 0x400
        SYNCHRONIZE = 0x00100000
        WAIT_OBJECT_0 = 0
        WAIT_TIMEOUT = 258
        WAIT_FAILED = _DWORD(0xFFFFFFFF).value
        handle = ctypes.windll.kernel32.OpenProcess(
                PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION,
                False, pid)
        if handle is None:
            _check(0, 87) # err 87 when process not found
            return # process not found, already finished
        try:
            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
            if r == WAIT_OBJECT_0:
                pass # terminated, but process handle still available
            elif r == WAIT_TIMEOUT:
                _check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
            elif r == WAIT_FAILED:
                _check(0)  # err stored in GetLastError()

            # TODO?: forcefully kill when timeout
            #        and ?shorter waiting time? when tryhard==True
            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
                                                       # timeout = 100 ms
            if r == WAIT_OBJECT_0:
                pass # process is terminated
            elif r == WAIT_TIMEOUT:
                logfn('# Daemon process %d is stuck')
            elif r == WAIT_FAILED:
                _check(0)  # err stored in GetLastError()
        except: #re-raises
            ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error
            raise
        _check(ctypes.windll.kernel32.CloseHandle(handle))

else:
    def kill(pid, logfn, tryhard=True):
        try:
            os.kill(pid, 0)
            logfn('# Killing daemon process %d' % pid)
            os.kill(pid, signal.SIGTERM)
            if tryhard:
                for i in range(10):
                    time.sleep(0.05)
                    os.kill(pid, 0)
            else:
                time.sleep(0.1)
                os.kill(pid, 0)
            logfn('# Daemon process %d is stuck - really killing it' % pid)
            os.kill(pid, signal.SIGKILL)
        except OSError as err:
            if err.errno != errno.ESRCH:
                raise

def killdaemons(pidfile, tryhard=True, remove=False, logfn=None):
    if not logfn:
        logfn = lambda s: s
    # Kill off any leftover daemon processes
    try:
        pids = []
        with open(pidfile) as fp:
            for line in fp:
                try:
                    pid = int(line)
                    if pid <= 0:
                        raise ValueError
                except ValueError:
                    logfn('# Not killing daemon process %s - invalid pid'
                          % line.rstrip())
                    continue
                pids.append(pid)
        for pid in pids:
            kill(pid, logfn, tryhard)
        if remove:
            os.unlink(pidfile)
    except IOError:
        pass

if __name__ == '__main__':
    if len(sys.argv) > 1:
        path, = sys.argv[1:]
    else:
        path = os.environ["DAEMON_PIDS"]

    killdaemons(path)