tests/tinyproxy.py
author Matt Harbison <matt_harbison@yahoo.com>
Tue, 21 Jan 2020 11:32:33 -0500
changeset 44320 43eea17ae7b3
parent 43076 2372284d9457
child 45849 c102b704edb5
permissions -rwxr-xr-x
lfs: fix the stall and corruption issue when concurrently uploading blobs We've avoided the issue up to this point by gating worker usage with an experimental config. See 10e62d5efa73, and the thread linked there for some of the initial diagnosis, but essentially some data was being read from the blob before an error occurred and `keepalive` retried, but didn't rewind the file pointer. So the leading data was lost from the blob on the server, and the connection stalled, trying to send more data than available. In trying to recreate this, I was unable to do so uploading from Windows to CentOS 7. But it reproduced every time going from CentOS 7 to another CentOS 7 over https. I found recent fixes in the FaceBook repo to address this[1][2]. The commit message for the first is: The KeepAlive HTTP implementation is bugged in it's retry logic, it supports reading from a file pointer, but doesn't support rewinding of the seek cursor when it performs a retry. So it can happen that an upload fails for whatever reason and will then 'hang' on the retry event. The sequence of events that get triggered are: - Upload file A, goes OK. Keep-Alive caches connection. - Upload file B, fails due to (for example) failing Keep-Alive, but LFS file pointer has been consumed for the upload and fd has been closed. - Retry for file B starts, sets the Content-Length properly to the expected file size, but since file pointer has been consumed no data will be uploaded, causing the server to wait for the uploaded data until either client or server reaches a timeout, making it seem as our mercurial process hangs. This is just a stop-gap measure to prevent this behavior from blocking Mercurial (LFS has retry logic). A proper solutions need to be build on top of this stop-gap measure: for upload from file pointers, we should support fseek() on the interface. Since we expect to consume the whole file always anyways, this should be safe. This way we can seek back to the beginning on a retry. I ported those two patches, and it works. But I see that `url._sendfile()` does a rewind on `httpsendfile` objects[3], so maybe it's better to keep this all in one place and avoid a second seek. We may still want the first FaceBook patch as extra protection for this problem in general. The other two uses of `httpsendfile` are in the wire protocol to upload bundles, and to upload largefiles. Neither of these appear to use a worker, and I'm not sure why workers seem to trigger this, or if this could have happened without a worker. Since `httpsendfile` already has a `close()` method, that is dropped. That class also explicitly says there's no `__len__` attribute, so that is removed too. The override for `read()` is necessary to avoid the progressbar usage per file. [1] https://github.com/facebookexperimental/eden/commit/c350d6536d90c044c837abdd3675185644481469 [2] https://github.com/facebookexperimental/eden/commit/77f0d3fd0415e81b63e317e457af9c55c46103ee [3] https://www.mercurial-scm.org/repo/hg/file/5.2.2/mercurial/url.py#l176 Differential Revision: https://phab.mercurial-scm.org/D7962

#!/usr/bin/env python

from __future__ import absolute_import, print_function

__doc__ = """Tiny HTTP Proxy.

This module implements GET, HEAD, POST, PUT and DELETE methods
on BaseHTTPServer, and behaves as an HTTP proxy.  The CONNECT
method is also implemented experimentally, but has not been
tested yet.

Any help will be greatly appreciated.           SUZUKI Hisao
"""

__version__ = "0.2.1"

import optparse
import os
import select
import socket
import sys

from mercurial import (
    pycompat,
    util,
)

httpserver = util.httpserver
socketserver = util.socketserver
urlreq = util.urlreq

if os.environ.get('HGIPV6', '0') == '1':
    family = socket.AF_INET6
else:
    family = socket.AF_INET


class ProxyHandler(httpserver.basehttprequesthandler):
    __base = httpserver.basehttprequesthandler
    __base_handle = __base.handle

    server_version = "TinyHTTPProxy/" + __version__
    rbufsize = 0  # self.rfile Be unbuffered

    def handle(self):
        (ip, port) = self.client_address
        allowed = getattr(self, 'allowed_clients', None)
        if allowed is not None and ip not in allowed:
            self.raw_requestline = self.rfile.readline()
            if self.parse_request():
                self.send_error(403)
        else:
            self.__base_handle()

    def log_request(self, code='-', size='-'):
        xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
        self.log_message(
            '"%s" %s %s%s',
            self.requestline,
            str(code),
            str(size),
            ''.join([' %s:%s' % h for h in sorted(xheaders)]),
        )
        # Flush for Windows, so output isn't lost on TerminateProcess()
        sys.stdout.flush()
        sys.stderr.flush()

    def _connect_to(self, netloc, soc):
        i = netloc.find(':')
        if i >= 0:
            host_port = netloc[:i], int(netloc[i + 1 :])
        else:
            host_port = netloc, 80
        print("\t" "connect to %s:%d" % host_port)
        try:
            soc.connect(host_port)
        except socket.error as arg:
            try:
                msg = arg[1]
            except (IndexError, TypeError):
                msg = arg
            self.send_error(404, msg)
            return 0
        return 1

    def do_CONNECT(self):
        soc = socket.socket(family, socket.SOCK_STREAM)
        try:
            if self._connect_to(self.path, soc):
                self.log_request(200)
                self.wfile.write(
                    pycompat.bytestr(self.protocol_version)
                    + b" 200 Connection established\r\n"
                )
                self.wfile.write(
                    b"Proxy-agent: %s\r\n"
                    % pycompat.bytestr(self.version_string())
                )
                self.wfile.write(b"\r\n")
                self._read_write(soc, 300)
        finally:
            print("\t" "bye")
            soc.close()
            self.connection.close()

    def do_GET(self):
        (scm, netloc, path, params, query, fragment) = urlreq.urlparse(
            self.path, 'http'
        )
        if scm != 'http' or fragment or not netloc:
            self.send_error(400, "bad url %s" % self.path)
            return
        soc = socket.socket(family, socket.SOCK_STREAM)
        try:
            if self._connect_to(netloc, soc):
                self.log_request()
                url = urlreq.urlunparse(('', '', path, params, query, ''))
                soc.send(
                    b"%s %s %s\r\n"
                    % (
                        pycompat.bytestr(self.command),
                        pycompat.bytestr(url),
                        pycompat.bytestr(self.request_version),
                    )
                )
                self.headers['Connection'] = 'close'
                del self.headers['Proxy-Connection']
                for key, val in self.headers.items():
                    soc.send(
                        b"%s: %s\r\n"
                        % (pycompat.bytestr(key), pycompat.bytestr(val))
                    )
                soc.send(b"\r\n")
                self._read_write(soc)
        finally:
            print("\t" "bye")
            soc.close()
            self.connection.close()

    def _read_write(self, soc, max_idling=20):
        iw = [self.connection, soc]
        ow = []
        count = 0
        while True:
            count += 1
            (ins, _, exs) = select.select(iw, ow, iw, 3)
            if exs:
                break
            if ins:
                for i in ins:
                    if i is soc:
                        out = self.connection
                    else:
                        out = soc
                    try:
                        data = i.recv(8192)
                    except socket.error:
                        break
                    if data:
                        out.send(data)
                        count = 0
            else:
                print("\t" "idle", count)
            if count == max_idling:
                break

    do_HEAD = do_GET
    do_POST = do_GET
    do_PUT = do_GET
    do_DELETE = do_GET


class ThreadingHTTPServer(socketserver.ThreadingMixIn, httpserver.httpserver):
    def __init__(self, *args, **kwargs):
        httpserver.httpserver.__init__(self, *args, **kwargs)
        a = open("proxy.pid", "w")
        a.write(str(os.getpid()) + "\n")
        a.close()


def runserver(port=8000, bind=""):
    server_address = (bind, port)
    ProxyHandler.protocol_version = "HTTP/1.0"
    httpd = ThreadingHTTPServer(server_address, ProxyHandler)
    sa = httpd.socket.getsockname()
    print("Serving HTTP on", sa[0], "port", sa[1], "...")
    try:
        httpd.serve_forever()
    except KeyboardInterrupt:
        print("\nKeyboard interrupt received, exiting.")
        httpd.server_close()
        sys.exit(0)


if __name__ == '__main__':
    argv = sys.argv
    if argv[1:] and argv[1] in ('-h', '--help'):
        print(argv[0], "[port [allowed_client_name ...]]")
    else:
        if argv[2:]:
            allowed = []
            for name in argv[2:]:
                client = socket.gethostbyname(name)
                allowed.append(client)
                print("Accept: %s (%s)" % (client, name))
            ProxyHandler.allowed_clients = allowed
            del argv[2:]
        else:
            print("Any clients will be served...")

        parser = optparse.OptionParser()
        parser.add_option(
            '-b',
            '--bind',
            metavar='ADDRESS',
            help='Specify alternate bind address ' '[default: all interfaces]',
            default='',
        )
        (options, args) = parser.parse_args()
        port = 8000
        if len(args) == 1:
            port = int(args[0])
        runserver(port, options.bind)