view tests/killdaemons.py @ 29917:f32f8bf5dc4c

streamclone: force @filecache properties to be reloaded from file Before this patch, consumev1() invokes repo.invalidate() after closing transaction, to force @filecache properties to be reloaded from files at next access, because streamclone writes data into files directly. But this doesn't work as expected in the case below: 1. at closing transaction, repo._refreshfilecachestats() refreshes file stat of each @filecache properties with streamclone-ed files This means that in-memory properties are treated as valid. 2. but streamclone doesn't changes in-memory properties This means that in-memory properties are actually invalid. 3. repo.invalidate() just forces to examine file stat of @filecache properties at the first access after it Such examination should concludes that reloading from file isn't needed, because file stat was already refreshed at (1). Therefore, invalid in-memory cached properties (2) are unintentionally treated as valid (1). This patch invokes repo.invalidate() with clearfilecache=True, to force @filecache properties to be reloaded from file at next access. BTW, it is accidental that repo.invalidate() without clearfilecache=True in streamclone case seems to work as expected before this patch. If transaction is started via "filtered repo" object, repo._refreshfilecachestats() tries to refresh file stat of each @filecache properties on "filtered repo" object, even though all of them are stored into "unfiltered repo" object. In this case, repo._refreshfilecachestats() does nothing unintentionally, but this unexpected behavior causes reloading @filecache properties after repo.invalidate(). This is reason why this patch should be applied before making _refreshfilecachestats() correctly refresh file stat of @filecache properties.
author FUJIWARA Katsunori <foozy@lares.dti.ne.jp>
date Mon, 12 Sep 2016 03:06:28 +0900
parents 4ddfb730789d
children f840b2621cce
line wrap: on
line source

#!/usr/bin/env python

from __future__ import absolute_import
import errno
import os
import signal
import sys
import time

if os.name =='nt':
    import ctypes

    def _check(ret, expectederr=None):
        if ret == 0:
            winerrno = ctypes.GetLastError()
            if winerrno == expectederr:
                return True
            raise ctypes.WinError(winerrno)

    def kill(pid, logfn, tryhard=True):
        logfn('# Killing daemon process %d' % pid)
        PROCESS_TERMINATE = 1
        PROCESS_QUERY_INFORMATION = 0x400
        SYNCHRONIZE = 0x00100000
        WAIT_OBJECT_0 = 0
        WAIT_TIMEOUT = 258
        handle = ctypes.windll.kernel32.OpenProcess(
                PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION,
                False, pid)
        if handle == 0:
            _check(0, 87) # err 87 when process not found
            return # process not found, already finished
        try:
            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
            if r == WAIT_OBJECT_0:
                pass # terminated, but process handle still available
            elif r == WAIT_TIMEOUT:
                _check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
            else:
                _check(r)

            # TODO?: forcefully kill when timeout
            #        and ?shorter waiting time? when tryhard==True
            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
                                                       # timeout = 100 ms
            if r == WAIT_OBJECT_0:
                pass # process is terminated
            elif r == WAIT_TIMEOUT:
                logfn('# Daemon process %d is stuck')
            else:
                _check(r) # any error
        except: #re-raises
            ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error
            raise
        _check(ctypes.windll.kernel32.CloseHandle(handle))

else:
    def kill(pid, logfn, tryhard=True):
        try:
            os.kill(pid, 0)
            logfn('# Killing daemon process %d' % pid)
            os.kill(pid, signal.SIGTERM)
            if tryhard:
                for i in range(10):
                    time.sleep(0.05)
                    os.kill(pid, 0)
            else:
                time.sleep(0.1)
                os.kill(pid, 0)
            logfn('# Daemon process %d is stuck - really killing it' % pid)
            os.kill(pid, signal.SIGKILL)
        except OSError as err:
            if err.errno != errno.ESRCH:
                raise

def killdaemons(pidfile, tryhard=True, remove=False, logfn=None):
    if not logfn:
        logfn = lambda s: s
    # Kill off any leftover daemon processes
    try:
        fp = open(pidfile)
        for line in fp:
            try:
                pid = int(line)
                if pid <= 0:
                    raise ValueError
            except ValueError:
                logfn('# Not killing daemon process %s - invalid pid'
                      % line.rstrip())
                continue
            kill(pid, logfn, tryhard)
        fp.close()
        if remove:
            os.unlink(pidfile)
    except IOError:
        pass

if __name__ == '__main__':
    if len(sys.argv) > 1:
        path, = sys.argv[1:]
    else:
        path = os.environ["DAEMON_PIDS"]

    killdaemons(path)