changegroup: compute seen files as changesets are added (
issue4750)
Before this patch, addchangegroup() would walk the changelog and compute
the set of seen files between applying changesets and applying
manifests. When cloning large repositories such as mozilla-central,
this consumed a non-trivial amount of time. On my MBP, this walk takes
~10s. On a dainty EC2 instance, this was measured to take ~125s! On the
latter machine, this delay was enough for the Mercurial server to
disconnect the client, thinking it had timed out, thus causing a clone
to abort.
This patch enables the changelog to compute the set of changed files as
new revisions are added. By doing so, we:
* avoid a potentially heavy computation between changelog and manifest
processing by spreading the computation across all changelog additions
* avoid extra reads from the changelog by operating on the data as it is
added
The downside of this is that the add revision callback does result in
extra I/O. Before, we would perform a flush (and subsequent read to
construct the full revision) when new delta chains were created. For
changelogs, this is typically every 2-4 revisions. Using the callback
guarantees there will be a flush after every added revision *and* an
open + read of the changelog to obtain the full revision in order to
read the added files. So, this increases the frequency of these
operations by the average chain length. In the future, the revlog
should be smart enough to know how to read revisions that haven't been
flushed yet, thus eliminating this extra I/O.
On my MBP, the total CPU times for an `hg unbundle` with a local
mozilla-central gzip bundle containing 251,934 changesets and 211,065
files did not have a statistically significant change with this patch,
holding steady around 360s. So, the increased revlog flushing did not
have an effect.
With this patch, there is no longer a visible pause between applying
changeset and manifest data. Before, it sure felt like Mercurial was
lethargic making this transition. Now, the transition is nearly
instantaneous, giving the impression that Mercurial is faster. Of course,
eliminating this pause means that the potential for network disconnect due
to channel inactivity during the changelog walk is eliminated as well.
And that is the impetus behind this change.
#!/usr/bin/env python
__doc__ = """Tiny HTTP Proxy.
This module implements GET, HEAD, POST, PUT and DELETE methods
on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
method is also implemented experimentally, but has not been
tested yet.
Any help will be greatly appreciated. SUZUKI Hisao
"""
__version__ = "0.2.1"
import BaseHTTPServer, select, socket, SocketServer, urlparse, os
class ProxyHandler (BaseHTTPServer.BaseHTTPRequestHandler):
__base = BaseHTTPServer.BaseHTTPRequestHandler
__base_handle = __base.handle
server_version = "TinyHTTPProxy/" + __version__
rbufsize = 0 # self.rfile Be unbuffered
def handle(self):
(ip, port) = self.client_address
allowed = getattr(self, 'allowed_clients', None)
if allowed is not None and ip not in allowed:
self.raw_requestline = self.rfile.readline()
if self.parse_request():
self.send_error(403)
else:
self.__base_handle()
def log_request(self, code='-', size='-'):
xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
self.log_message('"%s" %s %s%s',
self.requestline, str(code), str(size),
''.join([' %s:%s' % h for h in sorted(xheaders)]))
def _connect_to(self, netloc, soc):
i = netloc.find(':')
if i >= 0:
host_port = netloc[:i], int(netloc[i + 1:])
else:
host_port = netloc, 80
print "\t" "connect to %s:%d" % host_port
try: soc.connect(host_port)
except socket.error as arg:
try: msg = arg[1]
except (IndexError, TypeError): msg = arg
self.send_error(404, msg)
return 0
return 1
def do_CONNECT(self):
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if self._connect_to(self.path, soc):
self.log_request(200)
self.wfile.write(self.protocol_version +
" 200 Connection established\r\n")
self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
self.wfile.write("\r\n")
self._read_write(soc, 300)
finally:
print "\t" "bye"
soc.close()
self.connection.close()
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urlparse.urlparse(
self.path, 'http')
if scm != 'http' or fragment or not netloc:
self.send_error(400, "bad url %s" % self.path)
return
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if self._connect_to(netloc, soc):
self.log_request()
soc.send("%s %s %s\r\n" % (
self.command,
urlparse.urlunparse(('', '', path, params, query, '')),
self.request_version))
self.headers['Connection'] = 'close'
del self.headers['Proxy-Connection']
for key_val in self.headers.items():
soc.send("%s: %s\r\n" % key_val)
soc.send("\r\n")
self._read_write(soc)
finally:
print "\t" "bye"
soc.close()
self.connection.close()
def _read_write(self, soc, max_idling=20):
iw = [self.connection, soc]
ow = []
count = 0
while True:
count += 1
(ins, _, exs) = select.select(iw, ow, iw, 3)
if exs:
break
if ins:
for i in ins:
if i is soc:
out = self.connection
else:
out = soc
try:
data = i.recv(8192)
except socket.error:
break
if data:
out.send(data)
count = 0
else:
print "\t" "idle", count
if count == max_idling:
break
do_HEAD = do_GET
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
class ThreadingHTTPServer (SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
def __init__(self, *args, **kwargs):
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
a = open("proxy.pid", "w")
a.write(str(os.getpid()) + "\n")
a.close()
if __name__ == '__main__':
from sys import argv
if argv[1:] and argv[1] in ('-h', '--help'):
print argv[0], "[port [allowed_client_name ...]]"
else:
if argv[2:]:
allowed = []
for name in argv[2:]:
client = socket.gethostbyname(name)
allowed.append(client)
print "Accept: %s (%s)" % (client, name)
ProxyHandler.allowed_clients = allowed
del argv[2:]
else:
print "Any clients will be served..."
BaseHTTPServer.test(ProxyHandler, ThreadingHTTPServer)