merge crew and main
authorBenoit Boissinot <benoit.boissinot@ens-lyon.org>
Mon, 11 Feb 2013 01:21:24 +0100
changeset 18653 170142161672
parent 18652 a5e94bee77ed (current diff)
parent 18651 e556659340f0 (diff)
child 18655 882681bc3166
merge crew and main
mercurial/dirstate.py
--- a/mercurial/dirstate.py	Mon Feb 11 01:17:50 2013 +0100
+++ b/mercurial/dirstate.py	Mon Feb 11 01:21:24 2013 +0100
@@ -9,7 +9,7 @@
 from node import nullid
 from i18n import _
 import scmutil, util, ignore, osutil, parsers, encoding
-import os, stat, errno
+import os, stat, errno, gc
 
 propertycache = util.propertycache
 filecache = scmutil.filecache
@@ -285,7 +285,23 @@
         if not st:
             return
 
-        p = parsers.parse_dirstate(self._map, self._copymap, st)
+        # Python's garbage collector triggers a GC each time a certain number
+        # of container objects (the number being defined by
+        # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
+        # for each file in the dirstate. The C version then immediately marks
+        # them as not to be tracked by the collector. However, this has no
+        # effect on when GCs are triggered, only on what objects the GC looks
+        # into. This means that O(number of files) GCs are unavoidable.
+        # Depending on when in the process's lifetime the dirstate is parsed,
+        # this can get very expensive. As a workaround, disable GC while
+        # parsing the dirstate.
+        gcenabled = gc.isenabled()
+        gc.disable()
+        try:
+            p = parsers.parse_dirstate(self._map, self._copymap, st)
+        finally:
+            if gcenabled:
+                gc.enable()
         if not self._dirtypl:
             self._pl = p
 
--- a/mercurial/merge.py	Mon Feb 11 01:17:50 2013 +0100
+++ b/mercurial/merge.py	Mon Feb 11 01:21:24 2013 +0100
@@ -196,6 +196,7 @@
     overwrite = force and not branchmerge
     actions, copy, movewithdir = [], {}, {}
 
+    followcopies = False
     if overwrite:
         pa = wctx
     elif pa == p2: # backwards
@@ -203,6 +204,13 @@
     elif not branchmerge and not wctx.dirty(missing=True):
         pass
     elif pa and repo.ui.configbool("merge", "followcopies", True):
+        followcopies = True
+
+    # manifests fetched in order are going to be faster, so prime the caches
+    [x.manifest() for x in
+     sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
+
+    if followcopies:
         ret = copies.mergecopies(repo, wctx, p2, pa)
         copy, movewithdir, diverge, renamedelete = ret
         for of, fl in diverge.iteritems():
@@ -515,12 +523,12 @@
             _checkcollision(mctx, None)
         else:
             _checkcollision(mctx, (tctx, ancestor))
-    if tctx.rev() is None:
-        actions += _forgetremoved(tctx, mctx, branchmerge)
     actions += manifestmerge(repo, tctx, mctx,
                              ancestor,
                              branchmerge, force,
                              partial)
+    if tctx.rev() is None:
+        actions += _forgetremoved(tctx, mctx, branchmerge)
     return actions
 
 def recordupdates(repo, actions, branchmerge):