Only use lazy indexing for big indices and avoid the overhead of the
authormpm@selenic.com
Fri, 20 May 2005 17:35:20 -0800
changeset 116 e484cd5ec282
parent 115 39b438eeb25a
child 117 2ac722ad1a9d
Only use lazy indexing for big indices and avoid the overhead of the lazy index in the small index case.
mercurial/revlog.py
--- a/mercurial/revlog.py	Fri May 20 17:34:04 2005 -0800
+++ b/mercurial/revlog.py	Fri May 20 17:35:20 2005 -0800
@@ -124,14 +124,34 @@
         self.datafile = datafile
         self.opener = opener
         self.cache = None
-        # read the whole index for now, handle on-demand later
+
         try:
             i = self.opener(self.indexfile).read()
         except IOError:
             i = ""
-        parser = lazyparser(i)
-        self.index = lazyindex(parser)
-        self.nodemap = lazymap(parser)
+
+        if len(i) > 10000:
+            # big index, let's parse it on demand
+            parser = lazyparser(i)
+            self.index = lazyindex(parser)
+            self.nodemap = lazymap(parser)
+        else:
+            s = struct.calcsize(indexformat)
+            l = len(i) / s
+            self.index = [None] * l
+            m = [None] * l
+
+            n = 0
+            for f in xrange(0, len(i), s):
+                # offset, size, base, linkrev, p1, p2, nodeid
+                e = struct.unpack(indexformat, i[f:f + s])
+                m[n] = (e[6], n)
+                self.index[n] = e
+                n += 1
+
+            self.nodemap = dict(m)
+            self.nodemap[nullid] = -1
+            
 
     def tip(self): return self.node(len(self.index) - 1)
     def count(self): return len(self.index)