view tests/killdaemons.py @ 23976:344939126579 stable

largefiles: don't interfere with logging normal files The previous code was adding standin files to the matcher's file list when neither the standin file nor the original existed in the context. Somehow, this was confusing the logging code into behaving differently from when the extension wasn't loaded. It seems that this was an attempt to support naming a directory that only contains largefiles, as a test fails if the else clause is dropped entirely. Therefore, only append the "standin" if it is a directory. This was found by running the test suite with --config extensions.largefiles=. The first added test used to log an additional cset that wasn't logged normally. The only relation it had to file 'a' is that 'a' was the source of a move, but it isn't clear why having '.hglf/a' in the list causes this change: @@ -47,6 +47,11 @@ Make sure largefiles doesn't interfere with logging a regular file $ hg log a --config extensions.largefiles= + changeset: 3:2ca5ba701980 + user: test + date: Thu Jan 01 00:00:04 1970 +0000 + summary: d + changeset: 0:9161b9aeaf16 user: test date: Thu Jan 01 00:00:01 1970 +0000 The second added test used to complain about a file not being in the parent revision: @@ -1638,10 +1643,8 @@ Ensure that largefiles doesn't intefere with following a normal file $ hg --config extensions.largefiles= log -f d -T '{desc}' -G - @ c - | - o a - + abort: cannot follow file not in parent revision: ".hglf/d" + [255] $ hg log -f d/a -T '{desc}' -G @ c | Note that there is still something fishy with the largefiles code, because when using a glob pattern like this: $ hg log 'glob:sub/*' the pattern list would contain '.hglf/glob:sub/*'. None of the tests show this (this test lives in test-largefiles.t at 1349), it was just something that I noticed when the code was loaded up with print statements.
author Matt Harbison <matt_harbison@yahoo.com>
date Fri, 30 Jan 2015 20:44:11 -0500
parents 476069509e72
children 0adc22a0b6b3
line wrap: on
line source

#!/usr/bin/env python

import os, sys, time, errno, signal

if os.name =='nt':
    import ctypes

    def _check(ret, expectederr=None):
        if ret == 0:
            winerrno = ctypes.GetLastError()
            if winerrno == expectederr:
                return True
            raise ctypes.WinError(winerrno)

    def kill(pid, logfn, tryhard=True):
        logfn('# Killing daemon process %d' % pid)
        PROCESS_TERMINATE = 1
        PROCESS_QUERY_INFORMATION = 0x400
        SYNCHRONIZE = 0x00100000
        WAIT_OBJECT_0 = 0
        WAIT_TIMEOUT = 258
        handle = ctypes.windll.kernel32.OpenProcess(
                PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION,
                False, pid)
        if handle == 0:
            _check(0, 87) # err 87 when process not found
            return # process not found, already finished
        try:
            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
            if r == WAIT_OBJECT_0:
                pass # terminated, but process handle still available
            elif r == WAIT_TIMEOUT:
                _check(ctypes.windll.kernel32.TerminateProcess(handle, -1))
            else:
                _check(r)

            # TODO?: forcefully kill when timeout
            #        and ?shorter waiting time? when tryhard==True
            r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100)
                                                       # timeout = 100 ms
            if r == WAIT_OBJECT_0:
                pass # process is terminated
            elif r == WAIT_TIMEOUT:
                logfn('# Daemon process %d is stuck')
            else:
                _check(r) # any error
        except: #re-raises
            ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error
            raise
        _check(ctypes.windll.kernel32.CloseHandle(handle))

else:
    def kill(pid, logfn, tryhard=True):
        try:
            os.kill(pid, 0)
            logfn('# Killing daemon process %d' % pid)
            os.kill(pid, signal.SIGTERM)
            if tryhard:
                for i in range(10):
                    time.sleep(0.05)
                    os.kill(pid, 0)
            else:
                time.sleep(0.1)
                os.kill(pid, 0)
            logfn('# Daemon process %d is stuck - really killing it' % pid)
            os.kill(pid, signal.SIGKILL)
        except OSError, err:
            if err.errno != errno.ESRCH:
                raise

def killdaemons(pidfile, tryhard=True, remove=False, logfn=None):
    if not logfn:
        logfn = lambda s: s
    # Kill off any leftover daemon processes
    try:
        fp = open(pidfile)
        for line in fp:
            try:
                pid = int(line)
            except ValueError:
                continue
            kill(pid, logfn, tryhard)
        fp.close()
        if remove:
            os.unlink(pidfile)
    except IOError:
        pass

if __name__ == '__main__':
    path, = sys.argv[1:]
    killdaemons(path)